From c9d2135504c4317faa08cfeb36ebf1e604d5f5c8 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Sun, 3 Mar 2019 04:17:18 -0500 Subject: [PATCH] internal/radix51: rewrite FromBytes and AppendBytes with encoding/binary --- internal/radix51/fe.go | 111 ++++++++++-------------------------- internal/radix51/fe_test.go | 87 +++++++++++++++++++++------- 2 files changed, 97 insertions(+), 101 deletions(-) diff --git a/internal/radix51/fe.go b/internal/radix51/fe.go index ef06d66..f6a6522 100644 --- a/internal/radix51/fe.go +++ b/internal/radix51/fe.go @@ -12,6 +12,7 @@ package radix51 import ( "crypto/subtle" + "encoding/binary" "math/big" "math/bits" ) @@ -214,101 +215,51 @@ func (v *FieldElement) Set(a *FieldElement) *FieldElement { } // FromBytes sets v to x, which must be a 32 bytes little-endian encoding. +// +// Consistently with RFC 7748, the most significant bit (the high bit of the +// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) +// are accepted. func (v *FieldElement) FromBytes(x []byte) *FieldElement { if len(x) != 32 { panic("ed25519: invalid field element input size") } - v[0] = uint64(x[0]) - v[0] |= uint64(x[1]) << 8 - v[0] |= uint64(x[2]) << 16 - v[0] |= uint64(x[3]) << 24 - v[0] |= uint64(x[4]) << 32 - v[0] |= uint64(x[5]) << 40 - v[0] |= uint64(x[6]&7) << 48 + // Provide headroom for the slight binary.LittleEndian.Uint64 overread. (We + // read 64 bits at an offset of 200, but then take only 4+51 into account.) + var buf [33]byte + copy(buf[:], x) - v[1] = uint64(x[6]) >> 3 - v[1] |= uint64(x[7]) << 5 - v[1] |= uint64(x[8]) << 13 - v[1] |= uint64(x[9]) << 21 - v[1] |= uint64(x[10]) << 29 - v[1] |= uint64(x[11]) << 37 - v[1] |= uint64(x[12]&63) << 45 - - v[2] = uint64(x[12]) >> 6 - v[2] |= uint64(x[13]) << 2 - v[2] |= uint64(x[14]) << 10 - v[2] |= uint64(x[15]) << 18 - v[2] |= uint64(x[16]) << 26 - v[2] |= uint64(x[17]) << 34 - v[2] |= uint64(x[18]) << 42 - v[2] |= uint64(x[19]&1) << 50 - - v[3] = uint64(x[19]) >> 1 - v[3] |= uint64(x[20]) << 7 - v[3] |= uint64(x[21]) << 15 - v[3] |= uint64(x[22]) << 23 - v[3] |= uint64(x[23]) << 31 - v[3] |= uint64(x[24]) << 39 - v[3] |= uint64(x[25]&15) << 47 - - v[4] = uint64(x[25]) >> 4 - v[4] |= uint64(x[26]) << 4 - v[4] |= uint64(x[27]) << 12 - v[4] |= uint64(x[28]) << 20 - v[4] |= uint64(x[29]) << 28 - v[4] |= uint64(x[30]) << 36 - v[4] |= uint64(x[31]&127) << 44 + for i := range v { + bitsOffset := i * 51 + v[i] = binary.LittleEndian.Uint64(buf[bitsOffset/8:]) + v[i] >>= uint(bitsOffset % 8) + v[i] &= maskLow51Bits + } return v } // AppendBytes appends a 32 bytes little-endian encoding of v to b. func (v *FieldElement) AppendBytes(b []byte) []byte { - res, r := sliceForAppend(b, 32) - t := new(FieldElement).Reduce(v) - r[0] = byte(t[0] & 0xff) - r[1] = byte((t[0] >> 8) & 0xff) - r[2] = byte((t[0] >> 16) & 0xff) - r[3] = byte((t[0] >> 24) & 0xff) - r[4] = byte((t[0] >> 32) & 0xff) - r[5] = byte((t[0] >> 40) & 0xff) - r[6] = byte((t[0] >> 48)) + res, out := sliceForAppend(b, 32) + for i := range out { + out[i] = 0 + } - r[6] ^= byte((t[1] << 3) & 0xf8) - r[7] = byte((t[1] >> 5) & 0xff) - r[8] = byte((t[1] >> 13) & 0xff) - r[9] = byte((t[1] >> 21) & 0xff) - r[10] = byte((t[1] >> 29) & 0xff) - r[11] = byte((t[1] >> 37) & 0xff) - r[12] = byte((t[1] >> 45)) - - r[12] ^= byte((t[2] << 6) & 0xc0) - r[13] = byte((t[2] >> 2) & 0xff) - r[14] = byte((t[2] >> 10) & 0xff) - r[15] = byte((t[2] >> 18) & 0xff) - r[16] = byte((t[2] >> 26) & 0xff) - r[17] = byte((t[2] >> 34) & 0xff) - r[18] = byte((t[2] >> 42) & 0xff) - r[19] = byte((t[2] >> 50)) - - r[19] ^= byte((t[3] << 1) & 0xfe) - r[20] = byte((t[3] >> 7) & 0xff) - r[21] = byte((t[3] >> 15) & 0xff) - r[22] = byte((t[3] >> 23) & 0xff) - r[23] = byte((t[3] >> 31) & 0xff) - r[24] = byte((t[3] >> 39) & 0xff) - r[25] = byte((t[3] >> 47)) - - r[25] ^= byte((t[4] << 4) & 0xf0) - r[26] = byte((t[4] >> 4) & 0xff) - r[27] = byte((t[4] >> 12) & 0xff) - r[28] = byte((t[4] >> 20) & 0xff) - r[29] = byte((t[4] >> 28) & 0xff) - r[30] = byte((t[4] >> 36) & 0xff) - r[31] = byte((t[4] >> 44)) + var buf [8]byte + for i := range t { + bitsOffset := i * 51 + binary.LittleEndian.PutUint64(buf[:], t[i]<= len(out) { + break + } + out[off] |= b + } + } return res } diff --git a/internal/radix51/fe_test.go b/internal/radix51/fe_test.go index 2366573..413cce7 100644 --- a/internal/radix51/fe_test.go +++ b/internal/radix51/fe_test.go @@ -1,4 +1,5 @@ // Copyright (c) 2017 George Tankersley. All rights reserved. +// Copyright (c) 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,6 +9,7 @@ import ( "bytes" "crypto/rand" "io" + "math/big" mathrand "math/rand" "reflect" "testing" @@ -21,7 +23,8 @@ var quickCheckConfig = &quick.Config{MaxCountScale: 1 << 8} func generateFieldElement(rand *mathrand.Rand) FieldElement { // Generation strategy: generate random limb values bounded by // 2**(51+b), where b is a parameter controlling the bit-excess. - b := uint64(0) + // TODO: randomly decide to set the limbs to "weird" values. + b := uint64(0) // TODO: set this higher once we know the bounds. mask := (uint64(1) << (51 + b)) - 1 return FieldElement{ rand.Uint64() & mask, @@ -116,35 +119,77 @@ func BenchmarkWideMultCall(t *testing.B) { } } -func TestFeFromBytesRoundTrip(t *testing.T) { - var in, out [32]byte - var fe, r FieldElement +func TestFromBytesRoundTrip(t *testing.T) { + f1 := func(in, out [32]byte, fe FieldElement) bool { + fe.FromBytes(in[:]) + fe.AppendBytes(out[:0]) - in = [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + // Mask the most significant bit as it's ignored by FromBytes. (Now + // instead of earlier so we check the masking in FromBytes is working.) + in[len(in)-1] &= (1 << 7) - 1 - fe.FromBytes(in[:]) - fe.AppendBytes(out[:0]) + // TODO: values in the range [2^255-19, 2^255-1] will still fail the + // comparison as they will have been reduced in the round-trip, but the + // current quickcheck generation strategy will never hit them, which is + // not good. We should have a weird generator that aims for edge cases, + // and we'll know it works when this test breaks. - if !bytes.Equal(in[:], out[:]) { - t.Error("Bytes<>FE doesn't roundtrip") + return bytes.Equal(in[:], out[:]) + } + if err := quick.Check(f1, nil); err != nil { + t.Errorf("failed bytes->FE->bytes round-trip: %v", err) } - // Random field element - fe[0] = 0x4e645be9215a2 - fe[1] = 0x4e9654922df12 - fe[2] = 0x5829e468b0205 - fe[3] = 0x5e8fca9e0881c - fe[4] = 0x5c490f087d796 + f2 := func(fe, r FieldElement, out [32]byte) bool { + fe.AppendBytes(out[:0]) + r.FromBytes(out[:]) - fe.AppendBytes(out[:0]) - r.FromBytes(out[:]) + // Intentionally not using Equal not to go through AppendBytes again. + // Calling Reduce because both Generate and FromBytes can produce + // non-canonical representations. + fe.Reduce(&fe) + r.Reduce(&r) + return fe == r + } + if err := quick.Check(f2, nil); err != nil { + t.Errorf("failed FE->bytes->FE round-trip: %v", err) + } +} - for i := 0; i < len(fe); i++ { - if r[i] != fe[i] { - t.Error("FE<>Bytes doesn't roundtrip") +func swapEndianness(buf []byte) []byte { + for i := 0; i < len(buf)/2; i++ { + buf[i], buf[len(buf)-i-1] = buf[len(buf)-i-1], buf[i] + } + return buf +} + +func TestBytesBigEquivalence(t *testing.T) { + f1 := func(in, out [32]byte, fe, fe1 FieldElement) bool { + fe.FromBytes(in[:]) + + in[len(in)-1] &= (1 << 7) - 1 // mask the most significant bit + b := new(big.Int).SetBytes(swapEndianness(in[:])) + fe1.FromBig(b) + + if fe != fe1 { + return false } + + fe.AppendBytes(out[:0]) + buf := make([]byte, 32) // pad with zeroes + copy(buf, swapEndianness(fe1.ToBig().Bytes())) + + return bytes.Equal(out[:], buf) } + if err := quick.Check(f1, nil); err != nil { + t.Error(err) + } +} + +func TestFromBytesRoundTripEdgeCases(t *testing.T) { + // TODO: values close to 0, close to 2^255-19, between 2^255-19 and 2^255-1, + // and between 2^255 and 2^256-1. Test both the documented FromBytes + // behavior, and that AppendBytes reduces them. } // Tests self-consistency between FeMul and FeSquare.