7 #ifndef SECP256K1_MODINV64_IMPL_H 8 #define SECP256K1_MODINV64_IMPL_H 45 secp256k1_i128_from_i64(&c, 0);
46 for (i = 0; i < 4; ++i) {
47 if (i < alen) secp256k1_i128_accum_mul(&c,
a->v[i], factor);
48 r->
v[i] = secp256k1_i128_to_u64(&c) & M62; secp256k1_i128_rshift(&c, 62);
50 if (4 < alen) secp256k1_i128_accum_mul(&c,
a->v[4], factor);
51 secp256k1_i128_from_i64(&d, secp256k1_i128_to_i64(&c));
53 r->
v[4] = secp256k1_i128_to_i64(&c);
60 secp256k1_modinv64_mul_62(&am,
a, alen, 1);
61 secp256k1_modinv64_mul_62(&bm, b, 5, factor);
62 for (i = 0; i < 4; ++i) {
67 for (i = 4; i >= 0; --i) {
68 if (am.
v[i] < bm.
v[i])
return -1;
69 if (am.
v[i] > bm.
v[i])
return 1;
77 secp256k1_i128_det(&
a, t->
u, t->
v, t->
q, t->
r);
78 return secp256k1_i128_check_pow2(&
a, n);
88 int64_t r0 = r->
v[0], r1 = r->
v[1], r2 = r->
v[2], r3 = r->
v[3], r4 = r->
v[4];
94 for (i = 0; i < 5; ++i) {
108 r0 += modinfo->
modulus.
v[0] & cond_add;
109 r1 += modinfo->
modulus.
v[1] & cond_add;
110 r2 += modinfo->
modulus.
v[2] & cond_add;
111 r3 += modinfo->
modulus.
v[3] & cond_add;
112 r4 += modinfo->
modulus.
v[4] & cond_add;
113 cond_negate = sign >> 63;
114 r0 = (r0 ^ cond_negate) - cond_negate;
115 r1 = (r1 ^ cond_negate) - cond_negate;
116 r2 = (r2 ^ cond_negate) - cond_negate;
117 r3 = (r3 ^ cond_negate) - cond_negate;
118 r4 = (r4 ^ cond_negate) - cond_negate;
120 r1 += r0 >> 62; r0 &= M62;
121 r2 += r1 >> 62; r1 &= M62;
122 r3 += r2 >> 62; r2 &= M62;
123 r4 += r3 >> 62; r3 &= M62;
128 r0 += modinfo->
modulus.
v[0] & cond_add;
129 r1 += modinfo->
modulus.
v[1] & cond_add;
130 r2 += modinfo->
modulus.
v[2] & cond_add;
131 r3 += modinfo->
modulus.
v[3] & cond_add;
132 r4 += modinfo->
modulus.
v[4] & cond_add;
134 r1 += r0 >> 62; r0 &= M62;
135 r2 += r1 >> 62; r1 &= M62;
136 r3 += r2 >> 62; r2 &= M62;
137 r4 += r3 >> 62; r3 &= M62;
175 uint64_t u = 8, v = 0, q = 0, r = 8;
176 uint64_t c1, c2, f = f0, g = g0, x, y, z;
179 for (i = 3; i < 62; ++i) {
197 zeta = (zeta ^ c1) - 1;
238 uint64_t u = 1, v = 0, q = 0, r = 1;
241 int i = 62, limit, zeros;
245 zeros = secp256k1_ctz64_var(g | (
UINT64_MAX << i));
264 tmp = f; f = g; g = -tmp;
265 tmp = u; u = q; q = -tmp;
266 tmp = v; v = r; r = -tmp;
270 limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
276 w = (f * g * (f * f - 2)) & m;
280 limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
286 w = f + (((f + 1) & 4) << 1);
318 const int64_t d0 = d->
v[0], d1 = d->
v[1], d2 = d->
v[2], d3 = d->
v[3], d4 = d->
v[4];
319 const int64_t e0 = e->
v[0], e1 = e->
v[1], e2 = e->
v[2], e3 = e->
v[3], e4 = e->
v[4];
320 const int64_t u = t->
u, v = t->
v, q = t->
q, r = t->
r;
328 VERIFY_CHECK((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) >= 0);
329 VERIFY_CHECK((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) >= 0);
336 md = (u & sd) + (v & se);
337 me = (q & sd) + (r & se);
339 secp256k1_i128_mul(&cd, u, d0);
340 secp256k1_i128_accum_mul(&cd, v, e0);
341 secp256k1_i128_mul(&ce, q, d0);
342 secp256k1_i128_accum_mul(&ce, r, e0);
344 md -= (modinfo->
modulus_inv62 * secp256k1_i128_to_u64(&cd) + md) & M62;
345 me -= (modinfo->
modulus_inv62 * secp256k1_i128_to_u64(&ce) + me) & M62;
347 secp256k1_i128_accum_mul(&cd, modinfo->
modulus.
v[0], md);
348 secp256k1_i128_accum_mul(&ce, modinfo->
modulus.
v[0], me);
350 VERIFY_CHECK((secp256k1_i128_to_u64(&cd) & M62) == 0); secp256k1_i128_rshift(&cd, 62);
351 VERIFY_CHECK((secp256k1_i128_to_u64(&ce) & M62) == 0); secp256k1_i128_rshift(&ce, 62);
353 secp256k1_i128_accum_mul(&cd, u, d1);
354 secp256k1_i128_accum_mul(&cd, v, e1);
355 secp256k1_i128_accum_mul(&ce, q, d1);
356 secp256k1_i128_accum_mul(&ce, r, e1);
358 secp256k1_i128_accum_mul(&cd, modinfo->
modulus.
v[1], md);
359 secp256k1_i128_accum_mul(&ce, modinfo->
modulus.
v[1], me);
361 d->
v[0] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62);
362 e->
v[0] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62);
364 secp256k1_i128_accum_mul(&cd, u, d2);
365 secp256k1_i128_accum_mul(&cd, v, e2);
366 secp256k1_i128_accum_mul(&ce, q, d2);
367 secp256k1_i128_accum_mul(&ce, r, e2);
369 secp256k1_i128_accum_mul(&cd, modinfo->
modulus.
v[2], md);
370 secp256k1_i128_accum_mul(&ce, modinfo->
modulus.
v[2], me);
372 d->
v[1] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62);
373 e->
v[1] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62);
375 secp256k1_i128_accum_mul(&cd, u, d3);
376 secp256k1_i128_accum_mul(&cd, v, e3);
377 secp256k1_i128_accum_mul(&ce, q, d3);
378 secp256k1_i128_accum_mul(&ce, r, e3);
380 secp256k1_i128_accum_mul(&cd, modinfo->
modulus.
v[3], md);
381 secp256k1_i128_accum_mul(&ce, modinfo->
modulus.
v[3], me);
383 d->
v[2] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62);
384 e->
v[2] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62);
386 secp256k1_i128_accum_mul(&cd, u, d4);
387 secp256k1_i128_accum_mul(&cd, v, e4);
388 secp256k1_i128_accum_mul(&ce, q, d4);
389 secp256k1_i128_accum_mul(&ce, r, e4);
390 secp256k1_i128_accum_mul(&cd, modinfo->
modulus.
v[4], md);
391 secp256k1_i128_accum_mul(&ce, modinfo->
modulus.
v[4], me);
392 d->
v[3] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62);
393 e->
v[3] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62);
395 d->
v[4] = secp256k1_i128_to_i64(&cd);
396 e->
v[4] = secp256k1_i128_to_i64(&ce);
412 const int64_t g0 = g->
v[0], g1 = g->
v[1], g2 = g->
v[2], g3 = g->
v[3], g4 = g->
v[4];
413 const int64_t u = t->
u, v = t->
v, q = t->
q, r = t->
r;
416 secp256k1_i128_mul(&cf, u, f0);
417 secp256k1_i128_accum_mul(&cf, v, g0);
418 secp256k1_i128_mul(&cg, q, f0);
419 secp256k1_i128_accum_mul(&cg, r, g0);
421 VERIFY_CHECK((secp256k1_i128_to_u64(&cf) & M62) == 0); secp256k1_i128_rshift(&cf, 62);
422 VERIFY_CHECK((secp256k1_i128_to_u64(&cg) & M62) == 0); secp256k1_i128_rshift(&cg, 62);
424 secp256k1_i128_accum_mul(&cf, u,
f1);
425 secp256k1_i128_accum_mul(&cf, v, g1);
426 secp256k1_i128_accum_mul(&cg, q,
f1);
427 secp256k1_i128_accum_mul(&cg, r, g1);
428 f->
v[0] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
429 g->
v[0] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
431 secp256k1_i128_accum_mul(&cf, u,
f2);
432 secp256k1_i128_accum_mul(&cf, v, g2);
433 secp256k1_i128_accum_mul(&cg, q,
f2);
434 secp256k1_i128_accum_mul(&cg, r, g2);
435 f->
v[1] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
436 g->
v[1] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
438 secp256k1_i128_accum_mul(&cf, u,
f3);
439 secp256k1_i128_accum_mul(&cf, v, g3);
440 secp256k1_i128_accum_mul(&cg, q,
f3);
441 secp256k1_i128_accum_mul(&cg, r, g3);
442 f->
v[2] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
443 g->
v[2] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
445 secp256k1_i128_accum_mul(&cf, u,
f4);
446 secp256k1_i128_accum_mul(&cf, v, g4);
447 secp256k1_i128_accum_mul(&cg, q,
f4);
448 secp256k1_i128_accum_mul(&cg, r, g4);
449 f->
v[3] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
450 g->
v[3] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
452 f->
v[4] = secp256k1_i128_to_i64(&cf);
453 g->
v[4] = secp256k1_i128_to_i64(&cg);
464 const int64_t u = t->
u, v = t->
v, q = t->
q, r = t->
r;
472 secp256k1_i128_mul(&cf, u, fi);
473 secp256k1_i128_accum_mul(&cf, v, gi);
474 secp256k1_i128_mul(&cg, q, fi);
475 secp256k1_i128_accum_mul(&cg, r, gi);
477 VERIFY_CHECK((secp256k1_i128_to_u64(&cf) & M62) == 0); secp256k1_i128_rshift(&cf, 62);
478 VERIFY_CHECK((secp256k1_i128_to_u64(&cg) & M62) == 0); secp256k1_i128_rshift(&cg, 62);
481 for (i = 1; i < len; ++i) {
484 secp256k1_i128_accum_mul(&cf, u, fi);
485 secp256k1_i128_accum_mul(&cf, v, gi);
486 secp256k1_i128_accum_mul(&cg, q, fi);
487 secp256k1_i128_accum_mul(&cg, r, gi);
488 f->
v[i - 1] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
489 g->
v[i - 1] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
492 f->
v[len - 1] = secp256k1_i128_to_i64(&cf);
493 g->
v[len - 1] = secp256k1_i128_to_i64(&cg);
507 for (i = 0; i < 10; ++i) {
510 zeta = secp256k1_modinv64_divsteps_59(zeta, f.
v[0], g.
v[0], &t);
512 secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo);
520 secp256k1_modinv64_update_fg_62(&f, &g, &t);
534 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0);
536 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 ||
537 secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 ||
538 (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
539 secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
540 (secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->
modulus, 1) == 0 ||
541 secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->
modulus, -1) == 0)));
545 secp256k1_modinv64_normalize_62(&d, f.
v[4], modinfo);
567 eta = secp256k1_modinv64_divsteps_62_var(eta, f.
v[0], g.
v[0], &t);
569 secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo);
577 secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t);
582 for (j = 1; j < len; ++j) {
586 if (cond == 0)
break;
592 cond = ((
int64_t)len - 2) >> 63;
593 cond |= fn ^ (fn >> 63);
594 cond |= gn ^ (gn >> 63);
614 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0);
616 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 ||
617 secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 ||
618 (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
619 secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
620 (secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->
modulus, 1) == 0 ||
621 secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->
modulus, -1) == 0)));
625 secp256k1_modinv64_normalize_62(&d, f.
v[len - 1], modinfo);
#define VERIFY_CHECK(cond)
const T1 const T2 const T3 & f3
const T1 const T2 const T3 const T4 & f4
int128_t secp256k1_int128
unsigned __int64 uint64_t
secp256k1_modinv64_signed62 modulus
const GenericPointer< typename T::ValueType > T2 T::AllocatorType & a