Drop elliptic.Curve code, update import paths, and adapt to the new
FromBytes API. The tests in ed25519_test.go will require rescuing from
the git history.
This commit is contained in:
Filippo Valsorda 2019-03-30 22:22:42 -04:00
commit 8fdc84d186
13 changed files with 535 additions and 369 deletions

View File

@ -3,7 +3,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package group mplements group logic for the Ed25519 curve.
// Package group implements group logic for the Ed25519 curve.
package group
import (
@ -76,8 +76,7 @@ func (v *ExtendedGroupElement) Zero() *ExtendedGroupElement {
return v
}
var twoD = &radix51.FieldElement{1859910466990425, 932731440258426,
1072319116312658, 1815898335770999, 633789495995903}
var twoD = new(radix51.FieldElement).Add(D, D)
// This is the same addition formula everyone uses, "add-2008-hwcd-3".
// https://hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html#addition-add-2008-hwcd-3
@ -142,7 +141,7 @@ func (v *ExtendedGroupElement) Double(u *ExtendedGroupElement) *ExtendedGroupEle
C.Add(&C, &C) // TODO should probably implement FeSquare2
// D ← -1*A
D.Neg(&A) // implemented as substraction
D.Neg(&A) // implemented as subtraction
// E ← (X1+Y1)^2 A B
var t0 radix51.FieldElement
@ -172,7 +171,7 @@ type ProjectiveGroupElement struct {
func (v *ProjectiveGroupElement) FromAffine(x, y *big.Int) *ProjectiveGroupElement {
v.X.FromBig(x)
v.Y.FromBig(y)
v.Z.Zero()
v.Z.One()
return v
}
@ -227,7 +226,7 @@ func (v *ProjectiveGroupElement) Zero() *ProjectiveGroupElement {
func (v *ProjectiveGroupElement) DoubleZ1(u *ProjectiveGroupElement) *ProjectiveGroupElement {
var B, C, D, E, F radix51.FieldElement
if u.Z.Equal(radix51.Zero) != 1 {
if u.Z.Equal(radix51.One) != 1 {
panic("ed25519: DoubleZ1 called with Z != 1")
}

View File

@ -0,0 +1,129 @@
// Copyright (c) 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package radix51
import (
"testing"
"testing/quick"
)
func checkAliasingOneArg(f func(v, x *FieldElement) *FieldElement) func(v, x FieldElement) bool {
return func(v, x FieldElement) bool {
x1, v1 := x, x
// Calculate a reference f(x) without aliasing.
if out := f(&v, &x); out != &v {
return false
}
// Test aliasing the argument and the receiver.
if out := f(&v1, &v1); out != &v1 || v1 != v {
return false
}
// Ensure the arguments was not modified.
return x == x1
}
}
func checkAliasingTwoArgs(f func(v, x, y *FieldElement) *FieldElement) func(v, x, y FieldElement) bool {
return func(v, x, y FieldElement) bool {
x1, y1, v1 := x, y, FieldElement{}
// Calculate a reference f(x, y) without aliasing.
if out := f(&v, &x, &y); out != &v {
return false
}
// Test aliasing the first argument and the receiver.
v1 = x
if out := f(&v1, &v1, &y); out != &v1 || v1 != v {
return false
}
// Test aliasing the second argument and the receiver.
v1 = y
if out := f(&v1, &x, &v1); out != &v1 || v1 != v {
return false
}
// Calculate a reference f(x, x) without aliasing.
if out := f(&v, &x, &x); out != &v {
return false
}
// Test aliasing the first argument and the receiver.
v1 = x
if out := f(&v1, &v1, &x); out != &v1 || v1 != v {
return false
}
// Test aliasing the second argument and the receiver.
v1 = x
if out := f(&v1, &x, &v1); out != &v1 || v1 != v {
return false
}
// Test aliasing both arguments and the receiver.
v1 = x
if out := f(&v1, &v1, &v1); out != &v1 || v1 != v {
return false
}
// Ensure the arguments were not modified.
return x == x1 && y == y1
}
}
func TestAliasing(t *testing.T) {
type target struct {
name string
oneArgF func(v, x *FieldElement) *FieldElement
twoArgsF func(v, x, y *FieldElement) *FieldElement
}
for _, tt := range []target{
{name: "Abs", oneArgF: (*FieldElement).Abs},
{name: "Invert", oneArgF: (*FieldElement).Invert},
{name: "Neg", oneArgF: (*FieldElement).Neg},
{name: "reduce", oneArgF: (*FieldElement).reduce},
{name: "Set", oneArgF: (*FieldElement).Set},
{name: "Square", oneArgF: (*FieldElement).Square},
{
name: "CondNeg0",
oneArgF: func(v, x *FieldElement) *FieldElement {
return (*FieldElement).CondNeg(v, x, 0)
},
},
{
name: "CondNeg1",
oneArgF: func(v, x *FieldElement) *FieldElement {
return (*FieldElement).CondNeg(v, x, 1)
},
},
{name: "Mul", twoArgsF: (*FieldElement).Mul},
{name: "Add", twoArgsF: (*FieldElement).Add},
{name: "Sub", twoArgsF: (*FieldElement).Sub},
{
name: "Select0",
twoArgsF: func(v, x, y *FieldElement) *FieldElement {
return (*FieldElement).Select(v, x, y, 0)
},
},
{
name: "Select1",
twoArgsF: func(v, x, y *FieldElement) *FieldElement {
return (*FieldElement).Select(v, x, y, 1)
},
},
} {
var err error
switch {
case tt.oneArgF != nil:
err = quick.Check(checkAliasingOneArg(tt.oneArgF), &quick.Config{MaxCountScale: 1 << 8})
case tt.twoArgsF != nil:
err = quick.Check(checkAliasingTwoArgs(tt.twoArgsF), &quick.Config{MaxCountScale: 1 << 8})
}
if err != nil {
t.Errorf("%v: %v", tt.name, err)
}
}
}

View File

@ -3,25 +3,28 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Field arithmetic in radix 2^51 representation. This code is a port of the
// public domain amd64-51-30k version of ed25519 from SUPERCOP.
// GF(2^255-19) field arithmetic in radix 2^51 representation. This code is a
// port of the public domain amd64-51-30k version of ed25519 from SUPERCOP.
//
// The interface works similarly to math/big.Int, and all arguments and
// receivers are allowed to alias.
package radix51
import (
"crypto/subtle"
"encoding/binary"
"math/big"
"math/bits"
)
// FieldElement represents an element of the field GF(2^255-19). An element t
// represents the integer t[0] + t[1]*2^51 + t[2]*2^102 + t[3]*2^153 +
// t[4]*2^204. The zero value is a valid zero element.
// t[4]*2^204. Each limb must not exceed 2^54 - 1 to be valid.
//
// The zero value is a valid zero element.
type FieldElement [5]uint64
const (
// The vaule 2^51-1, used in carry propagation
maskLow51Bits = uint64(1)<<51 - 1
)
const maskLow51Bits uint64 = (1 << 51) - 1
var (
Zero = &FieldElement{0, 0, 0, 0, 0}
@ -30,60 +33,55 @@ var (
MinusOne = new(FieldElement).Neg(One)
)
// Zero sets v = 0 and returns v.
func (v *FieldElement) Zero() *FieldElement {
v[0] = 0
v[1] = 0
v[2] = 0
v[3] = 0
v[4] = 0
*v = *Zero
return v
}
// One sets v = 1 and returns v.
func (v *FieldElement) One() *FieldElement {
v[0] = 1
v[1] = 0
v[2] = 0
v[3] = 0
v[4] = 0
*v = *One
return v
}
// SetInt sets the receiving FieldElement to the specified small integer.
func (v *FieldElement) SetInt(x uint64) *FieldElement {
v[0] = x
v[1] = 0
v[2] = 0
v[3] = 0
v[4] = 0
return v
}
func (v *FieldElement) Reduce(u *FieldElement) *FieldElement {
v.Set(u)
// Lev v = v[0] + v[1]*2^51 + v[2]*2^102 + v[3]*2^153 + v[4]*2^204
// Reduce each limb below 2^51, propagating carries.
// lightReduce brings the limbs below 52, 51, 51, 51, 51 bits. It is split in
// two because the inliner works actively against us. The two functions MUST be
// called one after the other.
func (v *FieldElement) lightReduce1() *FieldElement {
v[1] += v[0] >> 51
v[0] = v[0] & maskLow51Bits
v[2] += v[1] >> 51
v[1] = v[1] & maskLow51Bits
v[3] += v[2] >> 51
return v
}
func (v *FieldElement) lightReduce2() *FieldElement {
v[2] = v[2] & maskLow51Bits
v[4] += v[3] >> 51
v[3] = v[3] & maskLow51Bits
v[0] += (v[4] >> 51) * 19
v[4] = v[4] & maskLow51Bits
return v
}
// We now hate a field element v < 2^255, but need v <= 2^255-19
// TODO Document why this works. It's the elaborate comment about r = h-pq etc etc.
// reduce reduces v modulo 2^255 - 19 and returns it.
func (v *FieldElement) reduce(u *FieldElement) *FieldElement {
v.Set(u).lightReduce1().lightReduce2()
// Get the carry bit
// After the light reduction we now have a field element representation
// v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
// If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
// generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
c := (v[0] + 19) >> 51
c = (v[1] + c) >> 51
c = (v[2] + c) >> 51
c = (v[3] + c) >> 51
c = (v[4] + c) >> 51
// If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
// effectively applying the reduction identity to the carry.
v[0] += 19 * c
v[1] += v[0] >> 51
@ -100,52 +98,38 @@ func (v *FieldElement) Reduce(u *FieldElement) *FieldElement {
return v
}
// Add sets v = a + b. Long sequences of additions without reduction that
// let coefficients grow larger than 54 bits would be a problem. Paper
// cautions: "do not have such sequences of additions".
// Add sets v = a + b and returns v.
//
// Long sequences of additions without reduction that let coefficients grow
// larger than 54 bits would be a problem. Paper cautions: "do not have such
// sequences of additions".
func (v *FieldElement) Add(a, b *FieldElement) *FieldElement {
v[0] = a[0] + b[0]
v[1] = a[1] + b[1]
v[2] = a[2] + b[2]
v[3] = a[3] + b[3]
v[4] = a[4] + b[4]
return v
return v.lightReduce1().lightReduce2()
}
// Sub sets v = a - b.
// Sub sets v = a - b and returns v.
func (v *FieldElement) Sub(a, b *FieldElement) *FieldElement {
t := *b
// Reduce each limb below 2^51, propagating carries. Ensures that results
// fit within the limbs. This would not be required for reduced input.
t[1] += t[0] >> 51
t[0] = t[0] & maskLow51Bits
t[2] += t[1] >> 51
t[1] = t[1] & maskLow51Bits
t[3] += t[2] >> 51
t[2] = t[2] & maskLow51Bits
t[4] += t[3] >> 51
t[3] = t[3] & maskLow51Bits
t[0] += (t[4] >> 51) * 19
t[4] = t[4] & maskLow51Bits
// This is slightly more complicated. Because we use unsigned coefficients, we
// first add a multiple of p and then subtract.
v[0] = (a[0] + 0xFFFFFFFFFFFDA) - t[0]
v[1] = (a[1] + 0xFFFFFFFFFFFFE) - t[1]
v[2] = (a[2] + 0xFFFFFFFFFFFFE) - t[2]
v[3] = (a[3] + 0xFFFFFFFFFFFFE) - t[3]
v[4] = (a[4] + 0xFFFFFFFFFFFFE) - t[4]
return v
// We first add 2 * p, to guarantee the subtraction won't underflow, and
// then subtract b (which can be up to 2^255 + 2^13 * 19).
v[0] = (a[0] + 0xFFFFFFFFFFFDA) - b[0]
v[1] = (a[1] + 0xFFFFFFFFFFFFE) - b[1]
v[2] = (a[2] + 0xFFFFFFFFFFFFE) - b[2]
v[3] = (a[3] + 0xFFFFFFFFFFFFE) - b[3]
v[4] = (a[4] + 0xFFFFFFFFFFFFE) - b[4]
return v.lightReduce1().lightReduce2()
}
// Neg sets v = -a.
// Neg sets v = -a and returns v.
func (v *FieldElement) Neg(a *FieldElement) *FieldElement {
return v.Sub(Zero, a)
}
// Invert sets v = 1/z mod p by calculating z^(p-2), p-2 = 2^255 - 21.
// Invert sets v = 1/z mod p and returns v.
func (v *FieldElement) Invert(z *FieldElement) *FieldElement {
// Inversion is implemented as exponentiation with exponent p 2. It uses the
// same sequence of 255 squarings and 11 multiplications as [Curve25519].
@ -210,159 +194,127 @@ func (v *FieldElement) Invert(z *FieldElement) *FieldElement {
return v.Mul(&t, &z11) // 2^255 - 21
}
// Set sets v = a and returns v.
func (v *FieldElement) Set(a *FieldElement) *FieldElement {
*v = *a
return v
}
func (v *FieldElement) FromBytes(x *[32]byte) *FieldElement {
v[0] = uint64(x[0])
v[0] |= uint64(x[1]) << 8
v[0] |= uint64(x[2]) << 16
v[0] |= uint64(x[3]) << 24
v[0] |= uint64(x[4]) << 32
v[0] |= uint64(x[5]) << 40
v[0] |= uint64(x[6]&7) << 48
// FromBytes sets v to x, which must be a 32 bytes little-endian encoding.
//
// Consistently with RFC 7748, the most significant bit (the high bit of the
// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
// are accepted.
func (v *FieldElement) FromBytes(x []byte) *FieldElement {
if len(x) != 32 {
panic("ed25519: invalid field element input size")
}
v[1] = uint64(x[6]) >> 3
v[1] |= uint64(x[7]) << 5
v[1] |= uint64(x[8]) << 13
v[1] |= uint64(x[9]) << 21
v[1] |= uint64(x[10]) << 29
v[1] |= uint64(x[11]) << 37
v[1] |= uint64(x[12]&63) << 45
// Provide headroom for the slight binary.LittleEndian.Uint64 overread. (We
// read 64 bits at an offset of 200, but then take only 4+51 into account.)
var buf [33]byte
copy(buf[:], x)
v[2] = uint64(x[12]) >> 6
v[2] |= uint64(x[13]) << 2
v[2] |= uint64(x[14]) << 10
v[2] |= uint64(x[15]) << 18
v[2] |= uint64(x[16]) << 26
v[2] |= uint64(x[17]) << 34
v[2] |= uint64(x[18]) << 42
v[2] |= uint64(x[19]&1) << 50
v[3] = uint64(x[19]) >> 1
v[3] |= uint64(x[20]) << 7
v[3] |= uint64(x[21]) << 15
v[3] |= uint64(x[22]) << 23
v[3] |= uint64(x[23]) << 31
v[3] |= uint64(x[24]) << 39
v[3] |= uint64(x[25]&15) << 47
v[4] = uint64(x[25]) >> 4
v[4] |= uint64(x[26]) << 4
v[4] |= uint64(x[27]) << 12
v[4] |= uint64(x[28]) << 20
v[4] |= uint64(x[29]) << 28
v[4] |= uint64(x[30]) << 36
v[4] |= uint64(x[31]&127) << 44
for i := range v {
bitsOffset := i * 51
v[i] = binary.LittleEndian.Uint64(buf[bitsOffset/8:])
v[i] >>= uint(bitsOffset % 8)
v[i] &= maskLow51Bits
}
return v
}
func (v *FieldElement) ToBytes(r *[32]byte) {
t := new(FieldElement).Reduce(v)
// Bytes appends a 32 bytes little-endian encoding of v to b.
func (v *FieldElement) Bytes(b []byte) []byte {
t := new(FieldElement).reduce(v)
r[0] = byte(t[0] & 0xff)
r[1] = byte((t[0] >> 8) & 0xff)
r[2] = byte((t[0] >> 16) & 0xff)
r[3] = byte((t[0] >> 24) & 0xff)
r[4] = byte((t[0] >> 32) & 0xff)
r[5] = byte((t[0] >> 40) & 0xff)
r[6] = byte((t[0] >> 48))
res, out := sliceForAppend(b, 32)
for i := range out {
out[i] = 0
}
r[6] ^= byte((t[1] << 3) & 0xf8)
r[7] = byte((t[1] >> 5) & 0xff)
r[8] = byte((t[1] >> 13) & 0xff)
r[9] = byte((t[1] >> 21) & 0xff)
r[10] = byte((t[1] >> 29) & 0xff)
r[11] = byte((t[1] >> 37) & 0xff)
r[12] = byte((t[1] >> 45))
r[12] ^= byte((t[2] << 6) & 0xc0)
r[13] = byte((t[2] >> 2) & 0xff)
r[14] = byte((t[2] >> 10) & 0xff)
r[15] = byte((t[2] >> 18) & 0xff)
r[16] = byte((t[2] >> 26) & 0xff)
r[17] = byte((t[2] >> 34) & 0xff)
r[18] = byte((t[2] >> 42) & 0xff)
r[19] = byte((t[2] >> 50))
r[19] ^= byte((t[3] << 1) & 0xfe)
r[20] = byte((t[3] >> 7) & 0xff)
r[21] = byte((t[3] >> 15) & 0xff)
r[22] = byte((t[3] >> 23) & 0xff)
r[23] = byte((t[3] >> 31) & 0xff)
r[24] = byte((t[3] >> 39) & 0xff)
r[25] = byte((t[3] >> 47))
r[25] ^= byte((t[4] << 4) & 0xf0)
r[26] = byte((t[4] >> 4) & 0xff)
r[27] = byte((t[4] >> 12) & 0xff)
r[28] = byte((t[4] >> 20) & 0xff)
r[29] = byte((t[4] >> 28) & 0xff)
r[30] = byte((t[4] >> 36) & 0xff)
r[31] = byte((t[4] >> 44))
}
func (v *FieldElement) FromBig(num *big.Int) *FieldElement {
var buf [32]byte
offset := 0
words := num.Bits()
numWords := len(words)
for n := 0; n < numWords; n++ {
word := words[n]
for i := 0; i < bits.UintSize/8; i++ {
if offset >= len(buf) {
var buf [8]byte
for i := range t {
bitsOffset := i * 51
binary.LittleEndian.PutUint64(buf[:], t[i]<<uint(bitsOffset%8))
for i, b := range buf {
off := bitsOffset/8 + i
if off >= len(out) {
break
}
buf[offset] = byte(word >> uint((i << 3)))
offset++
out[off] |= b
}
}
return v.FromBytes(&buf)
return res
}
// sliceForAppend extends the input slice by n bytes. head is the full extended
// slice, while tail is the appended part. If the original slice has sufficient
// capacity no allocation is performed.
func sliceForAppend(in []byte, n int) (head, tail []byte) {
if total := len(in) + n; cap(in) >= total {
head = in[:total]
} else {
head = make([]byte, total)
copy(head, in)
}
tail = head[len(in):]
return
}
// FromBig sets v = n and returns v. The bit length of n must not exceed 256.
func (v *FieldElement) FromBig(n *big.Int) *FieldElement {
if n.BitLen() > 32*8 {
panic("ed25519: invalid field element input size")
}
buf := make([]byte, 0, 32)
for _, word := range n.Bits() {
for i := 0; i < bits.UintSize; i += 8 {
if len(buf) >= cap(buf) {
break
}
buf = append(buf, byte(word))
word >>= 8
}
}
return v.FromBytes(buf[:32])
}
// ToBig returns v as a big.Int.
func (v *FieldElement) ToBig() *big.Int {
var buf [32]byte
v.ToBytes(&buf) // does a reduction
buf := v.Bytes(nil)
numWords := 256 / bits.UintSize
words := make([]big.Word, numWords)
offset := 0
byteSize := uint(bits.UintSize >> 3)
for n := 0; n < numWords; n++ {
word := uint(0)
for i := uint(0); i < byteSize; i++ {
if offset >= len(buf) {
words := make([]big.Word, 32*8/bits.UintSize)
for n := range words {
for i := 0; i < bits.UintSize; i += 8 {
if len(buf) == 0 {
break
}
word |= uint(buf[offset]) << (i << 3)
offset++
words[n] |= big.Word(buf[0]) << big.Word(i)
buf = buf[1:]
}
words[n] = big.Word(word)
}
out := new(big.Int)
return out.SetBits(words)
return new(big.Int).SetBits(words)
}
// Equal returns 1 if v and u are equal, and 0 otherwise.
func (v *FieldElement) Equal(u *FieldElement) int {
var sa, sv [32]byte
u.ToBytes(&sa)
v.ToBytes(&sv)
u.Bytes(sa[:0])
v.Bytes(sv[:0])
return subtle.ConstantTimeCompare(sa[:], sv[:])
}
const mask64Bits uint64 = (1 << 64) - 1
// Select sets v to a if cond == 1, and to b if cond == 0.
// v, a and b are allowed to overlap.
func (v *FieldElement) Select(a, b *FieldElement, cond int) *FieldElement {
m := uint64(cond) * 0xffffffffffffffff
m := uint64(cond) * mask64Bits
v[0] = (m & a[0]) | (^m & b[0])
v[1] = (m & a[1]) | (^m & b[1])
v[2] = (m & a[2]) | (^m & b[2])
@ -373,17 +325,18 @@ func (v *FieldElement) Select(a, b *FieldElement, cond int) *FieldElement {
// CondNeg sets v to -u if cond == 1, and to u if cond == 0.
func (v *FieldElement) CondNeg(u *FieldElement, cond int) *FieldElement {
return v.Select(v.Neg(u), u, cond)
tmp := new(FieldElement).Neg(u)
return v.Select(tmp, u, cond)
}
// IsNegative returns 1 if v is negative, and 0 otherwise.
func (v *FieldElement) IsNegative() int {
var b [32]byte
v.ToBytes(&b)
v.Bytes(b[:0])
return int(b[0] & 1)
}
// Abs sets v to |u|. v and u are allowed to overlap.
// Abs sets v to |u| and returns v.
func (v *FieldElement) Abs(u *FieldElement) *FieldElement {
return v.CondNeg(u, u.IsNegative())
}

View File

@ -6,22 +6,19 @@
package radix51
// Mul sets out = x * y.
// Mul sets v = x * y and returns v.
func (v *FieldElement) Mul(x, y *FieldElement) *FieldElement {
var x0, x1, x2, x3, x4 uint64
var y0, y1, y2, y3, y4 uint64
x0 := x[0]
x1 := x[1]
x2 := x[2]
x3 := x[3]
x4 := x[4]
x0 = x[0]
x1 = x[1]
x2 = x[2]
x3 = x[3]
x4 = x[4]
y0 = y[0]
y1 = y[1]
y2 = y[2]
y3 = y[3]
y4 = y[4]
y0 := y[0]
y1 := y[1]
y2 := y[2]
y3 := y[3]
y4 := y[4]
// Reduction can be carried out simultaneously to multiplication. For
// example, we do not compute a coefficient r_5 . Whenever the result of a
@ -34,39 +31,39 @@ func (v *FieldElement) Mul(x, y *FieldElement) *FieldElement {
x4_19 := x4 * 19
// calculate r0 = x0*y0 + 19*(x1*y4 + x2*y3 + x3*y2 + x4*y1)
r00, r01 := mul64x64(0, 0, x0, y0)
r00, r01 = mul64x64(r00, r01, x1_19, y4)
r00, r01 = mul64x64(r00, r01, x2_19, y3)
r00, r01 = mul64x64(r00, r01, x3_19, y2)
r00, r01 = mul64x64(r00, r01, x4_19, y1)
r00, r01 := madd64(0, 0, x0, y0)
r00, r01 = madd64(r00, r01, x1_19, y4)
r00, r01 = madd64(r00, r01, x2_19, y3)
r00, r01 = madd64(r00, r01, x3_19, y2)
r00, r01 = madd64(r00, r01, x4_19, y1)
// calculate r1 = x0*y1 + x1*y0 + 19*(x2*y4 + x3*y3 + x4*y2)
r10, r11 := mul64x64(0, 0, x0, y1)
r10, r11 = mul64x64(r10, r11, x1, y0)
r10, r11 = mul64x64(r10, r11, x2_19, y4)
r10, r11 = mul64x64(r10, r11, x3_19, y3)
r10, r11 = mul64x64(r10, r11, x4_19, y2)
r10, r11 := madd64(0, 0, x0, y1)
r10, r11 = madd64(r10, r11, x1, y0)
r10, r11 = madd64(r10, r11, x2_19, y4)
r10, r11 = madd64(r10, r11, x3_19, y3)
r10, r11 = madd64(r10, r11, x4_19, y2)
// calculate r2 = x0*y2 + x1*y1 + x2*y0 + 19*(x3*y4 + x4*y3)
r20, r21 := mul64x64(0, 0, x0, y2)
r20, r21 = mul64x64(r20, r21, x1, y1)
r20, r21 = mul64x64(r20, r21, x2, y0)
r20, r21 = mul64x64(r20, r21, x3_19, y4)
r20, r21 = mul64x64(r20, r21, x4_19, y3)
r20, r21 := madd64(0, 0, x0, y2)
r20, r21 = madd64(r20, r21, x1, y1)
r20, r21 = madd64(r20, r21, x2, y0)
r20, r21 = madd64(r20, r21, x3_19, y4)
r20, r21 = madd64(r20, r21, x4_19, y3)
// calculate r3 = x0*y3 + x1*y2 + x2*y1 + x3*y0 + 19*x4*y4
r30, r31 := mul64x64(0, 0, x0, y3)
r30, r31 = mul64x64(r30, r31, x1, y2)
r30, r31 = mul64x64(r30, r31, x2, y1)
r30, r31 = mul64x64(r30, r31, x3, y0)
r30, r31 = mul64x64(r30, r31, x4_19, y4)
r30, r31 := madd64(0, 0, x0, y3)
r30, r31 = madd64(r30, r31, x1, y2)
r30, r31 = madd64(r30, r31, x2, y1)
r30, r31 = madd64(r30, r31, x3, y0)
r30, r31 = madd64(r30, r31, x4_19, y4)
// calculate r4 = x0*y4 + x1*y3 + x2*y2 + x3*y1 + x4*y0
r40, r41 := mul64x64(0, 0, x0, y4)
r40, r41 = mul64x64(r40, r41, x1, y3)
r40, r41 = mul64x64(r40, r41, x2, y2)
r40, r41 = mul64x64(r40, r41, x3, y1)
r40, r41 = mul64x64(r40, r41, x4, y0)
r40, r41 := madd64(0, 0, x0, y4)
r40, r41 = madd64(r40, r41, x1, y3)
r40, r41 = madd64(r40, r41, x2, y2)
r40, r41 = madd64(r40, r41, x3, y1)
r40, r41 = madd64(r40, r41, x4, y0)
// After the multiplication we need to reduce (carry) the 5 coefficients to
// obtain a result with coefficients that are at most slightly larger than
@ -106,22 +103,6 @@ func (v *FieldElement) Mul(x, y *FieldElement) *FieldElement {
// r_0 to r_1 , from r_1 to r_2 , from r_2 to r_3 , from r_3 to r_4 , and
// finally from r_4 to r_0 . Each of these carries is done as one copy, one
// right shift by 51, one logical and with 2^51 1, and one addition.
r10 += r00 >> 51
r00 &= maskLow51Bits
r20 += r10 >> 51
r10 &= maskLow51Bits
r30 += r20 >> 51
r20 &= maskLow51Bits
r40 += r30 >> 51
r30 &= maskLow51Bits
r00 += (r40 >> 51) * 19
r40 &= maskLow51Bits
v[0] = r00
v[1] = r10
v[2] = r20
v[3] = r30
v[4] = r40
return v
*v = FieldElement{r00, r10, r20, r30, r40}
return v.lightReduce1().lightReduce2()
}

View File

@ -6,11 +6,11 @@
package radix51
// Mul sets out = x * y.
// Mul sets v = x * y and returns v.
func (v *FieldElement) Mul(x, y *FieldElement) *FieldElement {
feMul(v, x, y)
return v
}
// go:noescape
//go:noescape
func feMul(out, a, b *FieldElement)

View File

@ -7,11 +7,11 @@
// +build amd64,!noasm
// func feMul(outp *uint64, xp *uint64, yp *uint64)
// func feMul(out, a, b *FieldElement)
TEXT ·feMul(SB),$0-24
MOVQ outp+0(FP), DI
MOVQ xp+8(FP), BX
MOVQ yp+16(FP), CX
MOVQ out+0(FP), DI
MOVQ a+8(FP), BX
MOVQ b+16(FP), CX
// Calculate r0
MOVQ 0(BX), AX // rax <-- x0

View File

@ -6,19 +6,17 @@
package radix51
// Square sets v = x * x.
// Square sets v = x * x and returns v.
func (v *FieldElement) Square(x *FieldElement) *FieldElement {
// Squaring needs only 15 mul instructions. Some inputs are multiplied by 2;
// this is combined with multiplication by 19 where possible. The coefficient
// reduction after squaring is the same as for multiplication.
var x0, x1, x2, x3, x4 uint64
x0 = x[0]
x1 = x[1]
x2 = x[2]
x3 = x[3]
x4 = x[4]
x0 := x[0]
x1 := x[1]
x2 := x[2]
x3 := x[3]
x4 := x[4]
x0_2 := x0 << 1
x1_2 := x1 << 1
@ -31,29 +29,29 @@ func (v *FieldElement) Square(x *FieldElement) *FieldElement {
x4_19 := x4 * 19
// r0 = x0*x0 + x1*38*x4 + x2*38*x3
r00, r01 := mul64x64(0, 0, x0, x0)
r00, r01 = mul64x64(r00, r01, x1_38, x4)
r00, r01 = mul64x64(r00, r01, x2_38, x3)
r00, r01 := madd64(0, 0, x0, x0)
r00, r01 = madd64(r00, r01, x1_38, x4)
r00, r01 = madd64(r00, r01, x2_38, x3)
// r1 = x0*2*x1 + x2*38*x4 + x3*19*x3
r10, r11 := mul64x64(0, 0, x0_2, x1)
r10, r11 = mul64x64(r10, r11, x2_38, x4)
r10, r11 = mul64x64(r10, r11, x3_19, x3)
r10, r11 := madd64(0, 0, x0_2, x1)
r10, r11 = madd64(r10, r11, x2_38, x4)
r10, r11 = madd64(r10, r11, x3_19, x3)
// r2 = x0*2*x2 + x1*x1 + x3*38*x4
r20, r21 := mul64x64(0, 0, x0_2, x2)
r20, r21 = mul64x64(r20, r21, x1, x1)
r20, r21 = mul64x64(r20, r21, x3_38, x4)
r20, r21 := madd64(0, 0, x0_2, x2)
r20, r21 = madd64(r20, r21, x1, x1)
r20, r21 = madd64(r20, r21, x3_38, x4)
// r3 = x0*2*x3 + x1*2*x2 + x4*19*x4
r30, r31 := mul64x64(0, 0, x0_2, x3)
r30, r31 = mul64x64(r30, r31, x1_2, x2)
r30, r31 = mul64x64(r30, r31, x4_19, x4)
r30, r31 := madd64(0, 0, x0_2, x3)
r30, r31 = madd64(r30, r31, x1_2, x2)
r30, r31 = madd64(r30, r31, x4_19, x4)
// r4 = x0*2*x4 + x1*2*x3 + x2*x2
r40, r41 := mul64x64(0, 0, x0_2, x4)
r40, r41 = mul64x64(r40, r41, x1_2, x3)
r40, r41 = mul64x64(r40, r41, x2, x2)
r40, r41 := madd64(0, 0, x0_2, x4)
r40, r41 = madd64(r40, r41, x1_2, x3)
r40, r41 = madd64(r40, r41, x2, x2)
// Same reduction
@ -79,21 +77,6 @@ func (v *FieldElement) Square(x *FieldElement) *FieldElement {
r41 *= 19
r00 += r41
r10 += r00 >> 51
r00 &= maskLow51Bits
r20 += r10 >> 51
r10 &= maskLow51Bits
r30 += r20 >> 51
r20 &= maskLow51Bits
r40 += r30 >> 51
r30 &= maskLow51Bits
r00 += (r40 >> 51) * 19
r40 &= maskLow51Bits
v[0] = r00
v[1] = r10
v[2] = r20
v[3] = r30
v[4] = r40
return v
*v = FieldElement{r00, r10, r20, r30, r40}
return v.lightReduce1().lightReduce2()
}

View File

@ -6,11 +6,11 @@
package radix51
// Square sets v = x * x.
// Square sets v = x * x and returns v.
func (v *FieldElement) Square(x *FieldElement) *FieldElement {
feSquare(v, x)
return v
}
// go:noescape
//go:noescape
func feSquare(out, x *FieldElement)

View File

@ -4,10 +4,10 @@
// +build amd64,!noasm
// func feSquare(outp *uint64, xp *uint64)
// func feSquare(out, x *FieldElement)
TEXT ·feSquare(SB),4,$0-16
MOVQ outp+0(FP), DI
MOVQ xp+8(FP), SI
MOVQ out+0(FP), DI
MOVQ x+8(FP), SI
// r0 = x0*x0 + x1*38*x4 + x2*38*x3
MOVQ 0(SI), AX

View File

@ -1,4 +1,5 @@
// Copyright (c) 2017 George Tankersley. All rights reserved.
// Copyright (c) 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -8,95 +9,190 @@ import (
"bytes"
"crypto/rand"
"io"
"math/big"
mathrand "math/rand"
"reflect"
"testing"
"unsafe"
"testing/quick"
)
// quickCheckConfig will make each quickcheck test run (1024 * -quickchecks)
// times. The default value of -quickchecks is 100.
var quickCheckConfig = &quick.Config{MaxCountScale: 1 << 10}
func generateFieldElement(rand *mathrand.Rand) FieldElement {
// Generation strategy: generate random limb values bounded by
// 2**(51+b), where b is a parameter controlling the bit-excess.
// TODO: randomly decide to set the limbs to "weird" values.
b := uint64(0) // TODO: set this higher once we know the bounds.
mask := (uint64(1) << (51 + b)) - 1
return FieldElement{
rand.Uint64() & mask,
rand.Uint64() & mask,
rand.Uint64() & mask,
rand.Uint64() & mask,
rand.Uint64() & mask,
}
}
func (x FieldElement) Generate(rand *mathrand.Rand, size int) reflect.Value {
return reflect.ValueOf(generateFieldElement(rand))
}
func TestMulDistributesOverAdd(t *testing.T) {
mulDistributesOverAdd := func(x, y, z FieldElement) bool {
// Compute t1 = (x+y)*z
t1 := new(FieldElement)
t1.Add(&x, &y)
t1.Mul(t1, &z)
// Compute t2 = x*z + y*z
t2 := new(FieldElement)
t3 := new(FieldElement)
t2.Mul(&x, &z)
t3.Mul(&y, &z)
t2.Add(t2, t3)
return t1.Equal(t2) == 1
}
if err := quick.Check(mulDistributesOverAdd, quickCheckConfig); err != nil {
t.Error(err)
}
}
func TestMul64to128(t *testing.T) {
a := uint64(5)
b := uint64(5)
r0, r1 := mul64x64(0, 0, a, b)
r0, r1 := madd64(0, 0, a, b)
if r0 != 0x19 || r1 != 0 {
t.Errorf("lo-range wide mult failed, got %d + %d*(2**64)", r0, r1)
}
a = uint64(18014398509481983) // 2^54 - 1
b = uint64(18014398509481983) // 2^54 - 1
r0, r1 = mul64x64(0, 0, a, b)
r0, r1 = madd64(0, 0, a, b)
if r0 != 0xff80000000000001 || r1 != 0xfffffffffff {
t.Errorf("hi-range wide mult failed, got %d + %d*(2**64)", r0, r1)
}
a = uint64(1125899906842661)
b = uint64(2097155)
r0, r1 = mul64x64(0, 0, a, b)
r0, r1 = mul64x64(r0, r1, a, b)
r0, r1 = mul64x64(r0, r1, a, b)
r0, r1 = mul64x64(r0, r1, a, b)
r0, r1 = mul64x64(r0, r1, a, b)
r0, r1 = madd64(0, 0, a, b)
r0, r1 = madd64(r0, r1, a, b)
r0, r1 = madd64(r0, r1, a, b)
r0, r1 = madd64(r0, r1, a, b)
r0, r1 = madd64(r0, r1, a, b)
if r0 != 16888498990613035 || r1 != 640 {
t.Errorf("wrong answer: %d + %d*(2**64)", r0, r1)
}
}
func BenchmarkWideMultInline(t *testing.B) {
var r0, r1, ol, oh uint64
a := uint64(18014398509481983) // 2^54 - 1
b := uint64(18014398509481983) // 2^54 - 1
for i := 0; i < t.N; i++ {
t1 := (a>>32)*(b&0xFFFFFFFF) + ((a & 0xFFFFFFFF) * (b & 0xFFFFFFFF) >> 32)
t2 := (a&0xFFFFFFFF)*(b>>32) + (t1 & 0xFFFFFFFF)
ol = (a * b) + r0
cmp := ol < r0
oh = r1 + (a>>32)*(b>>32) + t1>>32 + t2>>32 + uint64(*(*byte)(unsafe.Pointer(&cmp)))
r1 = oh
r0 = ol
}
}
func BenchmarkWideMultCall(t *testing.B) {
var r0, r1 uint64
a := uint64(18014398509481983)
b := uint64(18014398509481983)
for i := 0; i < t.N; i++ {
r0, r1 = mul64x64(r0, r1, a, b)
r0, r1 = madd64(r0, r1, a, b)
}
}
func TestFeFromBytesRoundTrip(t *testing.T) {
var in, out [32]byte
var fe, r FieldElement
func TestFromBytesRoundTrip(t *testing.T) {
f1 := func(in, out [32]byte, fe FieldElement) bool {
fe.FromBytes(in[:])
fe.Bytes(out[:0])
in = [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
// Mask the most significant bit as it's ignored by FromBytes. (Now
// instead of earlier so we check the masking in FromBytes is working.)
in[len(in)-1] &= (1 << 7) - 1
fe.FromBytes(&in)
fe.ToBytes(&out)
// TODO: values in the range [2^255-19, 2^255-1] will still fail the
// comparison as they will have been reduced in the round-trip, but the
// current quickcheck generation strategy will never hit them, which is
// not good. We should have a weird generator that aims for edge cases,
// and we'll know it works when this test breaks.
if !bytes.Equal(in[:], out[:]) {
t.Error("Bytes<>FE doesn't roundtrip")
return bytes.Equal(in[:], out[:])
}
if err := quick.Check(f1, nil); err != nil {
t.Errorf("failed bytes->FE->bytes round-trip: %v", err)
}
// Random field element
fe[0] = 0x4e645be9215a2
fe[1] = 0x4e9654922df12
fe[2] = 0x5829e468b0205
fe[3] = 0x5e8fca9e0881c
fe[4] = 0x5c490f087d796
f2 := func(fe, r FieldElement, out [32]byte) bool {
fe.Bytes(out[:0])
r.FromBytes(out[:])
fe.ToBytes(&out)
r.FromBytes(&out)
// Intentionally not using Equal not to go through Bytes again.
// Calling reduce because both Generate and FromBytes can produce
// non-canonical representations.
fe.reduce(&fe)
r.reduce(&r)
return fe == r
}
if err := quick.Check(f2, nil); err != nil {
t.Errorf("failed FE->bytes->FE round-trip: %v", err)
}
for i := 0; i < len(fe); i++ {
if r[i] != fe[i] {
t.Error("FE<>Bytes doesn't roundtrip")
// Check some fixed vectors from dalek
type feRTTest struct {
fe FieldElement
b []byte
}
var tests = []feRTTest{
{
fe: FieldElement([5]uint64{358744748052810, 1691584618240980, 977650209285361, 1429865912637724, 560044844278676}),
b: []byte{74, 209, 69, 197, 70, 70, 161, 222, 56, 226, 229, 19, 112, 60, 25, 92, 187, 74, 222, 56, 50, 153, 51, 233, 40, 74, 57, 6, 160, 185, 213, 31},
},
{
fe: FieldElement([5]uint64{84926274344903, 473620666599931, 365590438845504, 1028470286882429, 2146499180330972}),
b: []byte{199, 23, 106, 112, 61, 77, 216, 79, 186, 60, 11, 118, 13, 16, 103, 15, 42, 32, 83, 250, 44, 57, 204, 198, 78, 199, 253, 119, 146, 172, 3, 122},
},
}
for _, tt := range tests {
if !bytes.Equal(tt.fe.Bytes(nil), tt.b) || new(FieldElement).FromBytes(tt.b).Equal(&tt.fe) != 1 {
t.Errorf("Failed fixed roundtrip: %v", tt)
}
}
}
func swapEndianness(buf []byte) []byte {
for i := 0; i < len(buf)/2; i++ {
buf[i], buf[len(buf)-i-1] = buf[len(buf)-i-1], buf[i]
}
return buf
}
func TestBytesBigEquivalence(t *testing.T) {
f1 := func(in, out [32]byte, fe, fe1 FieldElement) bool {
fe.FromBytes(in[:])
in[len(in)-1] &= (1 << 7) - 1 // mask the most significant bit
b := new(big.Int).SetBytes(swapEndianness(in[:]))
fe1.FromBig(b)
if fe != fe1 {
return false
}
fe.Bytes(out[:0])
buf := make([]byte, 32) // pad with zeroes
copy(buf, swapEndianness(fe1.ToBig().Bytes()))
return bytes.Equal(out[:], buf)
}
if err := quick.Check(f1, nil); err != nil {
t.Error(err)
}
}
func TestFromBytesRoundTripEdgeCases(t *testing.T) {
// TODO: values close to 0, close to 2^255-19, between 2^255-19 and 2^255-1,
// and between 2^255 and 2^256-1. Test both the documented FromBytes
// behavior, and that Bytes reduces them.
}
// Tests self-consistency between FeMul and FeSquare.
func TestSanity(t *testing.T) {
var x FieldElement
@ -113,7 +209,7 @@ func TestSanity(t *testing.T) {
// t.Fatalf("all ones failed\nmul.s: %d\nmul.g: %d\nsqr.s: %d\nsqr.g: %d\n", x2, x2Go, x2sq, x2sqGo)
// }
if !vartimeEqual(x2, x2sq) {
if x2 != x2sq {
t.Fatalf("all ones failed\nmul: %x\nsqr: %x\n", x2, x2sq)
}
@ -123,7 +219,7 @@ func TestSanity(t *testing.T) {
if err != nil {
t.Fatal(err)
}
x.FromBytes(&bytes)
x.FromBytes(bytes[:])
x2.Mul(&x, &x)
// FeMulGo(&x2Go, &x, &x)
@ -134,21 +230,12 @@ func TestSanity(t *testing.T) {
// t.Fatalf("random field element failed\nfe: %x\n\nmul.s: %x\nmul.g: %x\nsqr.s: %x\nsqr.g: %x\n", x, x2, x2Go, x2sq, x2sqGo)
// }
if !vartimeEqual(x2, x2sq) {
if x2 != x2sq {
t.Fatalf("all ones failed\nmul: %x\nsqr: %x\n", x2, x2sq)
}
}
func vartimeEqual(x, y FieldElement) bool {
for i := 0; i < 5; i++ {
if x[i] != y[i] {
return false
}
}
return true
}
func TestFeEqual(t *testing.T) {
func TestEqual(t *testing.T) {
var x FieldElement = [5]uint64{1, 1, 1, 1, 1}
var y FieldElement = [5]uint64{5, 4, 3, 2, 1}
@ -163,16 +250,16 @@ func TestFeEqual(t *testing.T) {
}
}
func TestFeInvert(t *testing.T) {
func TestInvert(t *testing.T) {
var x FieldElement = [5]uint64{1, 1, 1, 1, 1}
var one FieldElement = [5]uint64{1, 0, 0, 0, 0}
var xinv, r FieldElement
xinv.Invert(&x)
r.Mul(&x, &xinv)
r.Reduce(&r)
r.reduce(&r)
if !vartimeEqual(one, r) {
if one != r {
t.Errorf("inversion identity failed, got: %x", r)
}
@ -182,13 +269,13 @@ func TestFeInvert(t *testing.T) {
if err != nil {
t.Fatal(err)
}
x.FromBytes(&bytes)
x.FromBytes(bytes[:])
xinv.Invert(&x)
r.Mul(&x, &xinv)
r.Reduce(&r)
r.reduce(&r)
if !vartimeEqual(one, r) {
if one != r {
t.Errorf("random inversion identity failed, got: %x for field element %x", r, x)
}
}

View File

@ -0,0 +1,18 @@
// Copyright (c) 2019 George Tankersley. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.12
package radix51
import "math/bits"
// madd64 multiples two 64-bit numbers and adds them to a split 128-bit accumulator.
func madd64(lo, hi, a, b uint64) (ol uint64, oh uint64) {
oh, ol = bits.Mul64(a, b)
var c uint64
ol, c = bits.Add64(ol, lo, 0)
oh, _ = bits.Add64(oh, hi, c)
return
}

View File

@ -0,0 +1,19 @@
// Copyright (c) 2017 George Tankersley. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.12
package radix51
import "unsafe"
// madd64 multiplies two 64-bit numbers and adds them to a split 128-bit accumulator.
func madd64(lo, hi, a, b uint64) (ol uint64, oh uint64) {
t1 := (a>>32)*(b&0xFFFFFFFF) + ((a & 0xFFFFFFFF) * (b & 0xFFFFFFFF) >> 32)
t2 := (a&0xFFFFFFFF)*(b>>32) + (t1 & 0xFFFFFFFF)
ol = (a * b) + lo
cmp := ol < lo
oh = hi + (a>>32)*(b>>32) + t1>>32 + t2>>32 + uint64(*(*byte)(unsafe.Pointer(&cmp)))
return
}

View File

@ -54,16 +54,13 @@ func (e *Element) FromUniformBytes(b []byte) {
panic("ristretto255: FromUniformBytes: input is not 64 bytes long")
}
var buf [32]byte
f := &radix51.FieldElement{}
copy(buf[:], b[:32])
f.FromBytes(&buf)
f.FromBytes(b[:32])
p1 := &group.ExtendedGroupElement{}
mapToPoint(p1, f)
copy(buf[:], b[32:])
f.FromBytes(&buf)
f.FromBytes(b[32:])
p2 := &group.ExtendedGroupElement{}
mapToPoint(p2, f)