-
Notifications
You must be signed in to change notification settings - Fork 227
Expand file tree
/
Copy pathvector.go
More file actions
444 lines (396 loc) · 12.6 KB
/
vector.go
File metadata and controls
444 lines (396 loc) · 12.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
// Copyright 2020-2025 Consensys Software Inc.
// Licensed under the Apache License, Version 2.0. See the LICENSE file for details.
// Code generated by consensys/gnark-crypto DO NOT EDIT
package fp
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math/bits"
"runtime"
"slices"
"strings"
"sync"
"sync/atomic"
"unsafe"
)
// Vector represents a slice of Element.
//
// It implements the following interfaces:
// - Stringer
// - io.WriterTo
// - io.ReaderFrom
// - encoding.BinaryMarshaler
// - encoding.BinaryUnmarshaler
// - sort.Interface
type Vector []Element
// MarshalBinary implements encoding.BinaryMarshaler
func (vector *Vector) MarshalBinary() (data []byte, err error) {
var buf bytes.Buffer
if _, err = vector.WriteTo(&buf); err != nil {
return
}
return buf.Bytes(), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (vector *Vector) UnmarshalBinary(data []byte) error {
r := bytes.NewReader(data)
_, err := vector.ReadFrom(r)
return err
}
// WriteTo implements io.WriterTo and writes a vector of big endian encoded Element.
// Length of the vector is encoded as a uint32 on the first 4 bytes.
func (vector *Vector) WriteTo(w io.Writer) (int64, error) {
// encode slice length
if err := binary.Write(w, binary.BigEndian, uint32(len(*vector))); err != nil {
return 0, err
}
n := int64(4)
var buf [Bytes]byte
for i := 0; i < len(*vector); i++ {
BigEndian.PutElement(&buf, (*vector)[i])
m, err := w.Write(buf[:])
n += int64(m)
if err != nil {
return n, err
}
}
return n, nil
}
// AsyncReadFrom implements an asynchronous version of [Vector.ReadFrom]. It
// reads the reader r in full and then performs the validation and conversion to
// Montgomery form separately in a goroutine. Any error encountered during
// reading is returned directly, while errors encountered during
// validation/conversion are sent on the returned channel. Thus the caller must
// wait on the channel to ensure the vector is ready to use. The method
// additionally returns the number of bytes read from r.
//
// The reader can contain more bytes than needed to decode the vector, in which
// case the extra bytes are ignored. In that case the reader is not seeked nor
// read further.
//
// The method allocates sufficiently large slice to store the vector. If the
// current slice fits the vector, it is reused, otherwise the slice is grown to
// fit the vector.
//
// The serialized encoding is as follows:
// - first 4 bytes: length of the vector as a big-endian uint32
// - for each element of the vector, [Bytes] bytes representing the element in
// big-endian encoding.
func (vector *Vector) AsyncReadFrom(r io.Reader) (int64, error, chan error) { // nolint ST1008
chErr := make(chan error, 1)
var buf [Bytes]byte
if read, err := io.ReadFull(r, buf[:4]); err != nil {
close(chErr)
return int64(read), err, chErr
}
headerSliceLen := uint64(binary.BigEndian.Uint32(buf[:4]))
// to avoid allocating too large slice when the header is tampered, we limit
// the maximum allocation. We set the target to 4GB. This incurs a performance
// hit when reading very large slices, but protects against OOM.
targetSize := uint64(1 << 32) // 4GB
if bits.UintSize == 32 {
// reduce target size to 1GB on 32 bits architectures
targetSize = uint64(1 << 30) // 1GB
}
maxAllocateSliceLength := targetSize / uint64(Bytes)
totalRead := int64(4)
*vector = (*vector)[:0]
if headerSliceLen == 0 {
// if the vector was nil previously even by reslicing we have a nil vector.
// but we want to have an empty slice to indicate that the vector has zero length.
if *vector == nil {
*vector = []Element{}
}
// we return already here to avoid launching a goroutine doing nothing below
close(chErr)
return totalRead, nil, chErr
}
for i := uint64(0); i < headerSliceLen; i += maxAllocateSliceLength {
if len(*vector) <= int(i) {
(*vector) = append(*vector, make([]Element, int(min(headerSliceLen-i, maxAllocateSliceLength)))...)
}
bSlice := unsafe.Slice((*byte)(unsafe.Pointer(&(*vector)[i])), int(min(headerSliceLen-i, maxAllocateSliceLength))*Bytes)
read, err := io.ReadFull(r, bSlice)
totalRead += int64(read)
if errors.Is(err, io.ErrUnexpectedEOF) {
close(chErr)
return totalRead, fmt.Errorf("less data than expected: read %d elements, expected %d", i+uint64(read)/Bytes, headerSliceLen), chErr
}
if err != nil {
close(chErr)
return totalRead, err, chErr
}
}
bSlice := unsafe.Slice((*byte)(unsafe.Pointer(&(*vector)[0])), int(headerSliceLen)*Bytes)
go func() {
var cptErrors uint64
// process the elements in parallel
execute(int(headerSliceLen), func(start, end int) {
var z Element
for i := start; i < end; i++ {
// we have to set vector[i]
bstart := i * Bytes
bend := bstart + Bytes
b := bSlice[bstart:bend]
z[0] = binary.BigEndian.Uint64(b[40:48])
z[1] = binary.BigEndian.Uint64(b[32:40])
z[2] = binary.BigEndian.Uint64(b[24:32])
z[3] = binary.BigEndian.Uint64(b[16:24])
z[4] = binary.BigEndian.Uint64(b[8:16])
z[5] = binary.BigEndian.Uint64(b[0:8])
if !z.smallerThanModulus() {
atomic.AddUint64(&cptErrors, 1)
return
}
z.toMont()
(*vector)[i] = z
}
})
if cptErrors > 0 {
chErr <- fmt.Errorf("async read: %d elements failed validation", cptErrors)
}
close(chErr)
}()
return totalRead, nil, chErr
}
// ReadFrom reads the vector from the reader r. It returns the number of bytes
// read and an error, if any. The errors can be:
// - an error while reading from r (including [io.EOF] i.e. not enough bytes in r);
// - when decoding the bytes into elements.
//
// The reader can contain more bytes than needed to decode the vector, in which case
// the extra bytes are ignored. In that case the reader is not seeked nor read further.
//
// The method allocates sufficiently large slice to store the vector. If the current slice fits
// the vector, it is reused, otherwise the slice is grown to fit the vector.
//
// The serialized encoding is as follows:
// - first 4 bytes: length of the vector as a big-endian uint32
// - for each element of the vector, [Bytes] bytes representing the element in big-endian encoding.
//
// The method implements [io.ReaderFrom] interface.
func (vector *Vector) ReadFrom(r io.Reader) (int64, error) {
var buf [Bytes]byte
if read, err := io.ReadFull(r, buf[:4]); err != nil {
return int64(read), err
}
headerSliceLen := uint64(binary.BigEndian.Uint32(buf[:4]))
// to avoid allocating too large slice when the header is tampered, we limit
// the maximum allocation. We set the target to 4GB. This incurs a performance
// hit when reading very large slices, but protects against OOM.
targetSize := uint64(1 << 32) // 4GB
if bits.UintSize == 32 {
// reduce target size to 1GB on 32 bits architectures
targetSize = uint64(1 << 30) // 1GB
}
maxAllocateSliceLength := targetSize / uint64(Bytes)
totalRead := int64(4) // include already the header length
*vector = (*vector)[:0]
// if the vector was nil previously even by reslicing we have a nil vector. But we want
// to have an empty slice to indicate that the vector has zero length. When headerSliceLen == 0
// we handle this edge case after reading the header as the loop body below is skipped.
if headerSliceLen == 0 && *vector == nil {
*vector = []Element{}
}
for i := uint64(0); i < headerSliceLen; i++ {
read, err := io.ReadFull(r, buf[:])
totalRead += int64(read)
if errors.Is(err, io.EOF) {
return totalRead, fmt.Errorf("less data than expected: read %d elements, expected %d", i, headerSliceLen)
}
if err != nil {
return totalRead, fmt.Errorf("error reading element %d: %w", i, err)
}
if uint64(cap(*vector)) <= i {
(*vector) = slices.Grow(*vector, int(min(headerSliceLen-i, maxAllocateSliceLength)))
}
el, err := BigEndian.Element(&buf)
if err != nil {
return totalRead, fmt.Errorf("error decoding element %d: %w", i, err)
}
*vector = append(*vector, el)
}
return totalRead, nil
}
// String implements fmt.Stringer interface
func (vector Vector) String() string {
var sbb strings.Builder
sbb.WriteByte('[')
for i := 0; i < len(vector); i++ {
sbb.WriteString(vector[i].String())
if i != len(vector)-1 {
sbb.WriteByte(',')
}
}
sbb.WriteByte(']')
return sbb.String()
}
// Len is the number of elements in the collection.
func (vector Vector) Len() int {
return len(vector)
}
// Less reports whether the element with
// index i should sort before the element with index j.
func (vector Vector) Less(i, j int) bool {
return vector[i].Cmp(&vector[j]) == -1
}
// Swap swaps the elements with indexes i and j.
func (vector Vector) Swap(i, j int) {
vector[i], vector[j] = vector[j], vector[i]
}
// SetRandom sets the elements in vector to independent uniform random values in [0, q).
//
// This might error only if reading from crypto/rand.Reader errors,
// in which case the values in vector are undefined.
func (vector Vector) SetRandom() error {
for i := range vector {
if _, err := vector[i].SetRandom(); err != nil {
return err
}
}
return nil
}
// Exp sets vector[i] = a[i]ᵏ for all i
func (vector Vector) Exp(a Vector, k int64) {
N := len(a)
if N != len(vector) {
panic("vector.Exp: vectors don't have the same length")
}
if k == 0 {
for i := range vector {
vector[i].SetOne()
}
return
}
base := a
exp := k
if k < 0 {
// call batch inverse
base = BatchInvert(a)
exp = -k // if k == math.MinInt64, -k overflows, but uint64(-k) is correct
} else if N > 0 {
// ensure that vector and a are not the same slice; else we need to copy a into base
v0 := &vector[0] // #nosec G602 we check that N > 0 above
a0 := &a[0] // #nosec G602 we check that N > 0 above
if v0 == a0 {
base = make(Vector, N)
copy(base, a)
}
}
copy(vector, base)
// Use bits.Len64 to iterate only over significant bits
for i := bits.Len64(uint64(exp)) - 2; i >= 0; i-- {
vector.Mul(vector, vector)
if (uint64(exp)>>uint(i))&1 != 0 {
vector.Mul(vector, base)
}
}
}
// MustSetRandom sets the elements in vector to independent uniform random values in [0, q).
//
// It panics if reading from crypto/rand.Reader errors.
func (vector Vector) MustSetRandom() {
for i := range vector {
if _, err := vector[i].SetRandom(); err != nil {
panic(err)
}
}
}
// Equal returns true if vector and other have the same length and same elements.
func (vector Vector) Equal(other Vector) bool {
return slices.Equal(vector, other)
}
func addVecGeneric(res, a, b Vector) {
if len(a) != len(b) || len(a) != len(res) {
panic("vector.Add: vectors don't have the same length")
}
for i := 0; i < len(a); i++ {
res[i].Add(&a[i], &b[i])
}
}
func subVecGeneric(res, a, b Vector) {
if len(a) != len(b) || len(a) != len(res) {
panic("vector.Sub: vectors don't have the same length")
}
for i := 0; i < len(a); i++ {
res[i].Sub(&a[i], &b[i])
}
}
func scalarMulVecGeneric(res, a Vector, b *Element) {
if len(a) != len(res) {
panic("vector.ScalarMul: vectors don't have the same length")
}
for i := 0; i < len(a); i++ {
res[i].Mul(&a[i], b)
}
}
func sumVecGeneric(res *Element, a Vector) {
for i := 0; i < len(a); i++ {
res.Add(res, &a[i])
}
}
func innerProductVecGeneric(res *Element, a, b Vector) {
if len(a) != len(b) {
panic("vector.InnerProduct: vectors don't have the same length")
}
var tmp Element
for i := 0; i < len(a); i++ {
tmp.Mul(&a[i], &b[i])
res.Add(res, &tmp)
}
}
func mulVecGeneric(res, a, b Vector) {
if len(a) != len(b) || len(a) != len(res) {
panic("vector.Mul: vectors don't have the same length")
}
for i := 0; i < len(a); i++ {
res[i].Mul(&a[i], &b[i])
}
}
// TODO @gbotrel make a public package out of that.
// execute executes the work function in parallel.
// this is copy paste from internal/parallel/parallel.go
// as we don't want to generate code importing internal/
func execute(nbIterations int, work func(int, int), maxCpus ...int) {
nbTasks := runtime.NumCPU()
if len(maxCpus) == 1 {
nbTasks = maxCpus[0]
if nbTasks < 1 {
nbTasks = 1
} else if nbTasks > 512 {
nbTasks = 512
}
}
if nbTasks == 1 {
// no go routines
work(0, nbIterations)
return
}
nbIterationsPerCpus := nbIterations / nbTasks
// more CPUs than tasks: a CPU will work on exactly one iteration
if nbIterationsPerCpus < 1 {
nbIterationsPerCpus = 1
nbTasks = nbIterations
}
var wg sync.WaitGroup
extraTasks := nbIterations - (nbTasks * nbIterationsPerCpus)
extraTasksOffset := 0
for i := 0; i < nbTasks; i++ {
wg.Add(1)
_start := i*nbIterationsPerCpus + extraTasksOffset
_end := _start + nbIterationsPerCpus
if extraTasks > 0 {
_end++
extraTasks--
extraTasksOffset++
}
go func() {
work(_start, _end)
wg.Done()
}()
}
wg.Wait()
}