7 #ifndef SECP256K1_ECMULT_IMPL_H 8 #define SECP256K1_ECMULT_IMPL_H 19 #if defined(EXHAUSTIVE_TEST_ORDER) 23 # if EXHAUSTIVE_TEST_ORDER > 128 25 # elif EXHAUSTIVE_TEST_ORDER > 8 45 #define WNAF_SIZE_BITS(bits, w) (((bits) + (w) - 1) / (w)) 46 #define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w) 49 #define PIPPENGER_SCRATCH_OBJECTS 6 50 #define STRAUSS_SCRATCH_OBJECTS 5 52 #define PIPPENGER_MAX_BUCKET_WINDOW 12 55 #define ECMULT_PIPPENGER_THRESHOLD 88 57 #define ECMULT_MAX_POINTS_PER_BATCH 5000000 80 secp256k1_gej_double_var(&d,
a, NULL);
95 secp256k1_ge_set_xy(&d_ge, &d.
x, &d.
y);
96 secp256k1_ge_set_gej_zinv(&pre_a[0],
a, &d.
z);
97 secp256k1_gej_set_ge(&ai, &pre_a[0]);
105 for (i = 1; i < n; i++) {
106 secp256k1_gej_add_ge_var(&ai, &ai, &d_ge, &zr[i]);
107 secp256k1_ge_set_xy(&pre_a[i], &ai.
x, &ai.
y);
114 secp256k1_fe_mul(z, &ai.
z, &d.
z);
117 #define SECP256K1_ECMULT_TABLE_VERIFY(n,w) \ 118 VERIFY_CHECK(((n) & 1) == 1); \ 119 VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ 120 VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); 128 secp256k1_fe_negate(&(r->
y), &(r->
y), 1);
135 secp256k1_ge_set_xy(r, &x[(n-1)/2], &pre[(n-1)/2].y);
137 secp256k1_ge_set_xy(r, &x[(-n-1)/2], &pre[(-n-1)/2].y);
138 secp256k1_fe_negate(&(r->
y), &(r->
y), 1);
145 secp256k1_ge_from_storage(r, &pre[(n-1)/2]);
147 secp256k1_ge_from_storage(r, &pre[(-n-1)/2]);
148 secp256k1_fe_negate(&(r->
y), &(r->
y), 1);
159 static int secp256k1_ecmult_wnaf(
int *wnaf,
int len,
const secp256k1_scalar *
a,
int w) {
161 int last_set_bit = -1;
171 memset(wnaf, 0, len *
sizeof(wnaf[0]));
174 if (secp256k1_scalar_get_bits(&s, 255, 1)) {
175 secp256k1_scalar_negate(&s, &s);
182 if (secp256k1_scalar_get_bits(&s, bit, 1) == (
unsigned int)carry) {
188 if (now > len - bit) {
192 word = secp256k1_scalar_get_bits_var(&s, bit, now) + carry;
194 carry = (word >> (w-1)) & 1;
197 wnaf[bit] = sign * word;
204 int verify_bit = bit;
208 while (verify_bit < 256) {
209 VERIFY_CHECK(secp256k1_scalar_get_bits(&s, verify_bit, 1) == 0);
214 return last_set_bit + 1;
238 int wnaf_ng_128[129];
245 secp256k1_fe_set_int(&Z, 1);
246 for (np = 0; np < num; ++np) {
249 if (secp256k1_scalar_is_zero(&na[np]) || secp256k1_gej_is_infinity(&
a[np])) {
253 secp256k1_scalar_split_lambda(&na_1, &na_lam, &na[np]);
256 state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(
state->ps[no].wnaf_na_1, 129, &na_1,
WINDOW_A);
257 state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(
state->ps[no].wnaf_na_lam, 129, &na_lam,
WINDOW_A);
280 secp256k1_fe_normalize_var(&Z);
282 secp256k1_gej_rescale(&tmp, &Z);
293 for (np = 0; np < no; ++np) {
301 secp256k1_scalar_split_128(&ng_1, &ng_128, ng);
304 bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1,
WINDOW_G);
305 bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128,
WINDOW_G);
306 if (bits_ng_1 >
bits) {
309 if (bits_ng_128 >
bits) {
314 secp256k1_gej_set_infinity(r);
316 for (i =
bits - 1; i >= 0; i--) {
318 secp256k1_gej_double_var(r, r, NULL);
319 for (np = 0; np < no; ++np) {
320 if (i < state->ps[np].
bits_na_1 && (n =
state->ps[np].wnaf_na_1[i])) {
322 secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
326 secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
329 if (i < bits_ng_1 && (n = wnaf_ng_1[i])) {
331 secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
333 if (i < bits_ng_128 && (n = wnaf_ng_128[i])) {
335 secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
340 secp256k1_fe_mul(&r->
z, &r->
z, &Z);
353 secp256k1_ecmult_strauss_wnaf(&
state, r, 1,
a, na, ng);
356 static size_t secp256k1_strauss_scratch_size(
size_t n_points) {
358 return n_points*point_size;
366 const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
368 secp256k1_gej_set_infinity(r);
369 if (inp_g_sc == NULL && n_points == 0) {
382 if (points == NULL || scalars == NULL ||
state.aux == NULL ||
state.pre_a == NULL ||
state.ps == NULL) {
383 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
387 for (i = 0; i < n_points; i++) {
389 if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) {
390 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
393 secp256k1_gej_set_ge(&points[i], &point);
395 secp256k1_ecmult_strauss_wnaf(&
state, r, n_points, points, scalars, inp_g_sc);
396 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
402 return secp256k1_ecmult_strauss_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0);
406 return secp256k1_scratch_max_allocation(error_callback, scratch,
STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1);
416 static int secp256k1_wnaf_fixed(
int *wnaf,
const secp256k1_scalar *s,
int w) {
423 if (secp256k1_scalar_is_zero(s)) {
424 for (pos = 0; pos <
WNAF_SIZE(w); pos++) {
430 if (secp256k1_scalar_is_even(s)) {
434 wnaf[0] = secp256k1_scalar_get_bits_var(work, 0, w) + skew;
441 for (pos =
WNAF_SIZE(w) - 1; pos > 0; pos--) {
442 int val = secp256k1_scalar_get_bits_var(work, pos * w, pos ==
WNAF_SIZE(w)-1 ? last_w : w);
451 while (pos <= max_pos) {
452 int val = secp256k1_scalar_get_bits_var(work, pos * w, pos ==
WNAF_SIZE(w)-1 ? last_w : w);
453 if ((val & 1) == 0) {
454 wnaf[pos - 1] -= (1 << w);
455 wnaf[pos] = (val + 1);
464 if (pos >= 2 && ((wnaf[pos - 1] == 1 && wnaf[pos - 2] < 0) || (wnaf[pos - 1] == -1 && wnaf[pos - 2] > 0))) {
465 if (wnaf[pos - 1] == 1) {
466 wnaf[pos - 2] += 1 << w;
468 wnaf[pos - 2] -= 1 << w;
496 size_t n_wnaf =
WNAF_SIZE(bucket_window+1);
502 for (np = 0; np < num; ++np) {
503 if (secp256k1_scalar_is_zero(&sc[np]) || secp256k1_ge_is_infinity(&pt[np])) {
506 state->ps[no].input_pos = np;
507 state->ps[no].skew_na = secp256k1_wnaf_fixed(&
state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1);
510 secp256k1_gej_set_infinity(r);
516 for (i = n_wnaf - 1; i >= 0; i--) {
520 secp256k1_gej_set_infinity(&buckets[j]);
523 for (np = 0; np < no; ++np) {
524 int n =
state->wnaf_na[np*n_wnaf + i];
531 int skew = point_state.
skew_na;
533 secp256k1_ge_neg(&tmp, &pt[point_state.
input_pos]);
534 secp256k1_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL);
539 secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.
input_pos], NULL);
542 secp256k1_ge_neg(&tmp, &pt[point_state.
input_pos]);
543 secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL);
547 for(j = 0; j < bucket_window; j++) {
548 secp256k1_gej_double_var(r, r, NULL);
551 secp256k1_gej_set_infinity(&running_sum);
561 secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL);
562 secp256k1_gej_add_var(r, r, &running_sum, NULL);
565 secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL);
566 secp256k1_gej_double_var(r, r, NULL);
567 secp256k1_gej_add_var(r, r, &running_sum, NULL);
576 static int secp256k1_pippenger_bucket_window(
size_t n) {
581 }
else if (n <= 20) {
583 }
else if (n <= 57) {
585 }
else if (n <= 136) {
587 }
else if (n <= 235) {
589 }
else if (n <= 1260) {
591 }
else if (n <= 4420) {
593 }
else if (n <= 7880) {
595 }
else if (n <= 16050) {
605 static size_t secp256k1_pippenger_bucket_window_inv(
int bucket_window) {
606 switch(bucket_window) {
616 case 10:
return 7880;
617 case 11:
return 16050;
626 secp256k1_scalar_split_lambda(s1, s2, &tmp);
627 secp256k1_ge_mul_lambda(p2, p1);
629 if (secp256k1_scalar_is_high(s1)) {
630 secp256k1_scalar_negate(s1, s1);
631 secp256k1_ge_neg(p1, p1);
633 if (secp256k1_scalar_is_high(s2)) {
634 secp256k1_scalar_negate(s2, s2);
635 secp256k1_ge_neg(p2, p2);
643 static size_t secp256k1_pippenger_scratch_size(
size_t n_points,
int bucket_window) {
644 size_t entries = 2*n_points + 2;
650 const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
654 size_t entries = 2*n_points + 2;
660 size_t point_idx = 0;
664 secp256k1_gej_set_infinity(r);
665 if (inp_g_sc == NULL && n_points == 0) {
668 bucket_window = secp256k1_pippenger_bucket_window(n_points);
674 points = (
secp256k1_ge *) secp256k1_scratch_alloc(error_callback, scratch, entries *
sizeof(*points));
675 scalars = (
secp256k1_scalar *) secp256k1_scratch_alloc(error_callback, scratch, entries *
sizeof(*scalars));
676 state_space = (
struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(error_callback, scratch,
sizeof(*state_space));
677 if (points == NULL || scalars == NULL || state_space == NULL) {
678 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
682 state_space->
wnaf_na = (
int *) secp256k1_scratch_alloc(error_callback, scratch, entries*(
WNAF_SIZE(bucket_window+1)) *
sizeof(
int));
683 buckets = (
secp256k1_gej *) secp256k1_scratch_alloc(error_callback, scratch, (1<<bucket_window) *
sizeof(*buckets));
684 if (state_space->
ps == NULL || state_space->
wnaf_na == NULL || buckets == NULL) {
685 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
689 if (inp_g_sc != NULL) {
690 scalars[0] = *inp_g_sc;
691 points[0] = secp256k1_ge_const_g;
693 secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]);
697 while (point_idx < n_points) {
698 if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) {
699 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
703 secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]);
708 secp256k1_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx);
711 for(i = 0; (size_t)i < idx; i++) {
712 secp256k1_scalar_clear(&scalars[i]);
714 for(j = 0; j <
WNAF_SIZE(bucket_window+1); j++) {
718 for(i = 0; i < 1<<bucket_window; i++) {
719 secp256k1_gej_clear(&buckets[i]);
721 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
727 return secp256k1_ecmult_pippenger_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0);
742 size_t max_points = secp256k1_pippenger_bucket_window_inv(bucket_window);
743 size_t space_for_points;
744 size_t space_overhead;
747 entry_size = 2*entry_size;
749 if (space_overhead > max_alloc) {
752 space_for_points = max_alloc - space_overhead;
754 n_points = space_for_points/entry_size;
755 n_points = n_points > max_points ? max_points : n_points;
756 if (n_points >
res) {
759 if (n_points < max_points) {
776 secp256k1_scalar_set_int(&szero, 0);
777 secp256k1_gej_set_infinity(r);
778 secp256k1_gej_set_infinity(&tmpj);
780 secp256k1_ecmult(r, &tmpj, &szero, inp_g_sc);
781 for (point_idx = 0; point_idx < n_points; point_idx++) {
785 if (!cb(&scalar, &point, point_idx, cbdata)) {
789 secp256k1_gej_set_ge(&pointj, &point);
790 secp256k1_ecmult(&tmpj, &pointj, &scalar, NULL);
791 secp256k1_gej_add_var(r, r, &tmpj, NULL);
798 static int secp256k1_ecmult_multi_batch_size_helper(
size_t *n_batches,
size_t *n_batch_points,
size_t max_n_batch_points,
size_t n) {
799 if (max_n_batch_points == 0) {
811 *n_batches = 1 + (n - 1) / max_n_batch_points;
812 *n_batch_points = 1 + (n - 1) / *n_batches;
822 size_t n_batch_points;
824 secp256k1_gej_set_infinity(r);
825 if (inp_g_sc == NULL && n == 0) {
829 secp256k1_scalar_set_int(&szero, 0);
830 secp256k1_ecmult(r, r, &szero, inp_g_sc);
833 if (scratch == NULL) {
834 return secp256k1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n);
841 if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(error_callback, scratch), n)) {
842 return secp256k1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n);
845 f = secp256k1_ecmult_pippenger_batch;
847 if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(error_callback, scratch), n)) {
848 return secp256k1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n);
850 f = secp256k1_ecmult_strauss_batch;
852 for(i = 0; i < n_batches; i++) {
853 size_t nbp = n < n_batch_points ? n : n_batch_points;
854 size_t offset = n_batch_points*i;
856 if (!f(error_callback, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) {
859 secp256k1_gej_add_var(r, r, &tmp, NULL);
#define VERIFY_CHECK(cond)
struct secp256k1_strauss_point_state * ps
#define ECMULT_TABLE_SIZE(w)
int(* secp256k1_ecmult_multi_func)(const secp256k1_callback *error_callback, secp256k1_scratch *, secp256k1_gej *, const secp256k1_scalar *, secp256k1_ecmult_multi_callback cb, void *, size_t)
#define PIPPENGER_MAX_BUCKET_WINDOW
const secp256k1_ge_storage secp256k1_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]
#define PIPPENGER_SCRATCH_OBJECTS
#define ECMULT_PIPPENGER_THRESHOLD
const secp256k1_ge_storage secp256k1_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]
#define ECMULT_MAX_POINTS_PER_BATCH
#define SECP256K1_ECMULT_TABLE_VERIFY(n, w)
#define STRAUSS_SCRATCH_OBJECTS
const GenericPointer< typename T::ValueType > T2 T::AllocatorType & a
struct secp256k1_pippenger_point_state * ps
int() secp256k1_ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data)