7 #ifndef SECP256K1_SCALAR_REPR_IMPL_H 8 #define SECP256K1_SCALAR_REPR_IMPL_H 15 #define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL) 16 #define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL) 17 #define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL) 18 #define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) 21 #define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1) 22 #define SECP256K1_N_C_1 (~SECP256K1_N_1) 23 #define SECP256K1_N_C_2 (1) 26 #define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL) 27 #define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL) 28 #define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) 29 #define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL) 47 return (
a->d[offset >> 6] >> (offset & 0x3F)) & ((((
uint64_t)1) <<
count) - 1);
53 if ((offset +
count - 1) >> 6 == offset >> 6) {
54 return secp256k1_scalar_get_bits(
a, offset,
count);
57 return ((
a->d[offset >> 6] >> (offset & 0x3F)) | (
a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((
uint64_t)1) <<
count) - 1);
76 secp256k1_u128_from_u64(&t, r->
d[0]);
78 r->
d[0] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
79 secp256k1_u128_accum_u64(&t, r->
d[1]);
81 r->
d[1] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
82 secp256k1_u128_accum_u64(&t, r->
d[2]);
84 r->
d[2] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
85 secp256k1_u128_accum_u64(&t, r->
d[3]);
86 r->
d[3] = secp256k1_u128_to_u64(&t);
93 secp256k1_u128_from_u64(&t,
a->d[0]);
94 secp256k1_u128_accum_u64(&t, b->
d[0]);
95 r->
d[0] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
96 secp256k1_u128_accum_u64(&t,
a->d[1]);
97 secp256k1_u128_accum_u64(&t, b->
d[1]);
98 r->
d[1] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
99 secp256k1_u128_accum_u64(&t,
a->d[2]);
100 secp256k1_u128_accum_u64(&t, b->
d[2]);
101 r->
d[2] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
102 secp256k1_u128_accum_u64(&t,
a->d[3]);
103 secp256k1_u128_accum_u64(&t, b->
d[3]);
104 r->
d[3] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
105 overflow = secp256k1_u128_to_u64(&t) + secp256k1_scalar_check_overflow(r);
107 secp256k1_scalar_reduce(r, overflow);
111 static void secp256k1_scalar_cadd_bit(
secp256k1_scalar *r,
unsigned int bit,
int flag) {
114 bit += ((
uint32_t) flag - 1) & 0x100;
115 secp256k1_u128_from_u64(&t, r->
d[0]);
116 secp256k1_u128_accum_u64(&t, ((
uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
117 r->
d[0] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
118 secp256k1_u128_accum_u64(&t, r->
d[1]);
119 secp256k1_u128_accum_u64(&t, ((
uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
120 r->
d[1] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
121 secp256k1_u128_accum_u64(&t, r->
d[2]);
122 secp256k1_u128_accum_u64(&t, ((
uint64_t)((bit >> 6) == 2)) << (bit & 0x3F));
123 r->
d[2] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
124 secp256k1_u128_accum_u64(&t, r->
d[3]);
125 secp256k1_u128_accum_u64(&t, ((
uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
126 r->
d[3] = secp256k1_u128_to_u64(&t);
132 static void secp256k1_scalar_set_b32(
secp256k1_scalar *r,
const unsigned char *b32,
int *overflow) {
138 over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r));
144 static void secp256k1_scalar_get_b32(
unsigned char *bin,
const secp256k1_scalar*
a) {
145 bin[0] =
a->d[3] >> 56; bin[1] =
a->d[3] >> 48; bin[2] =
a->d[3] >> 40; bin[3] =
a->d[3] >> 32; bin[4] =
a->d[3] >> 24; bin[5] =
a->d[3] >> 16; bin[6] =
a->d[3] >> 8; bin[7] =
a->d[3];
146 bin[8] =
a->d[2] >> 56; bin[9] =
a->d[2] >> 48; bin[10] =
a->d[2] >> 40; bin[11] =
a->d[2] >> 32; bin[12] =
a->d[2] >> 24; bin[13] =
a->d[2] >> 16; bin[14] =
a->d[2] >> 8; bin[15] =
a->d[2];
147 bin[16] =
a->d[1] >> 56; bin[17] =
a->d[1] >> 48; bin[18] =
a->d[1] >> 40; bin[19] =
a->d[1] >> 32; bin[20] =
a->d[1] >> 24; bin[21] =
a->d[1] >> 16; bin[22] =
a->d[1] >> 8; bin[23] =
a->d[1];
148 bin[24] =
a->d[0] >> 56; bin[25] =
a->d[0] >> 48; bin[26] =
a->d[0] >> 40; bin[27] =
a->d[0] >> 32; bin[28] =
a->d[0] >> 24; bin[29] =
a->d[0] >> 16; bin[30] =
a->d[0] >> 8; bin[31] =
a->d[0];
152 return (
a->d[0] |
a->d[1] |
a->d[2] |
a->d[3]) == 0;
156 uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(
a) == 0);
158 secp256k1_u128_from_u64(&t, ~
a->d[0]);
160 r->
d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
161 secp256k1_u128_accum_u64(&t, ~
a->d[1]);
163 r->
d[1] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
164 secp256k1_u128_accum_u64(&t, ~
a->d[2]);
166 r->
d[2] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
167 secp256k1_u128_accum_u64(&t, ~
a->d[3]);
169 r->
d[3] = secp256k1_u128_to_u64(&t) & nonzero;
173 return ((
a->d[0] ^ 1) |
a->d[1] |
a->d[2] |
a->d[3]) == 0;
192 uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
194 secp256k1_u128_from_u64(&t, r->
d[0] ^ mask);
196 r->
d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
197 secp256k1_u128_accum_u64(&t, r->
d[1] ^ mask);
199 r->
d[1] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
200 secp256k1_u128_accum_u64(&t, r->
d[2] ^ mask);
202 r->
d[2] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
203 secp256k1_u128_accum_u64(&t, r->
d[3] ^ mask);
205 r->
d[3] = secp256k1_u128_to_u64(&t) & nonzero;
206 return 2 * (mask == 0) - 1;
212 #define muladd(a,b) { \ 215 secp256k1_uint128 t; \ 216 secp256k1_u128_mul(&t, a, b); \ 217 th = secp256k1_u128_hi_u64(&t); \ 218 tl = secp256k1_u128_to_u64(&t); \ 224 VERIFY_CHECK((c1 >= th) || (c2 != 0)); \ 228 #define muladd_fast(a,b) { \ 231 secp256k1_uint128 t; \ 232 secp256k1_u128_mul(&t, a, b); \ 233 th = secp256k1_u128_hi_u64(&t); \ 234 tl = secp256k1_u128_to_u64(&t); \ 239 VERIFY_CHECK(c1 >= th); \ 243 #define sumadd(a) { \ 252 #define sumadd_fast(a) { \ 255 VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \ 256 VERIFY_CHECK(c2 == 0); \ 260 #define extract(n) { \ 268 #define extract_fast(n) { \ 272 VERIFY_CHECK(c2 == 0); \ 276 #ifdef USE_ASM_X86_64 278 uint64_t m0, m1, m2, m3, m4, m5, m6;
282 __asm__ __volatile__(
284 "movq 32(%%rsi), %%r11\n" 285 "movq 40(%%rsi), %%r12\n" 286 "movq 48(%%rsi), %%r13\n" 287 "movq 56(%%rsi), %%r14\n" 289 "movq 0(%%rsi), %%r8\n" 291 "xorq %%r10, %%r10\n" 301 "addq 8(%%rsi), %%r9\n" 307 "adcq %%rdx, %%r10\n" 313 "adcq %%rdx, %%r10\n" 319 "addq 16(%%rsi), %%r10\n" 325 "addq %%rax, %%r10\n" 331 "addq %%rax, %%r10\n" 335 "addq %%r11, %%r10\n" 340 "xorq %%r10, %%r10\n" 342 "addq 24(%%rsi), %%r8\n" 368 "adcq %%rdx, %%r10\n" 377 "addq %%r14, %%r10\n" 383 :
"=g"(m0),
"=g"(m1),
"=g"(m2),
"=g"(m3),
"=g"(m4),
"=g"(m5),
"=g"(m6)
385 :
"rax",
"rdx",
"r8",
"r9",
"r10",
"r11",
"r12",
"r13",
"r14",
"cc");
388 __asm__ __volatile__(
396 "xorq %%r10, %%r10\n" 412 "adcq %%rdx, %%r10\n" 418 "adcq %%rdx, %%r10\n" 430 "addq %%rax, %%r10\n" 436 "addq %%rax, %%r10\n" 440 "addq %%r11, %%r10\n" 462 :
"=&g"(p0),
"=&g"(p1),
"=&g"(p2),
"=g"(p3),
"=g"(p4)
464 :
"rax",
"rdx",
"r8",
"r9",
"r10",
"r11",
"r12",
"r13",
"cc");
467 __asm__ __volatile__(
477 "movq %%rax, 0(%q6)\n" 490 "movq %%r8, 8(%q6)\n" 499 "movq %%r9, 16(%q6)\n" 505 "movq %%r8, 24(%q6)\n" 510 :
"rax",
"rdx",
"r8",
"r9",
"r10",
"cc",
"memory");
514 uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
522 c0 = l[0]; c1 = 0; c2 = 0;
549 c0 = m0; c1 = 0; c2 = 0;
570 secp256k1_u128_from_u64(&c128, p0);
572 r->
d[0] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
573 secp256k1_u128_accum_u64(&c128, p1);
575 r->
d[1] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
576 secp256k1_u128_accum_u64(&c128, p2);
577 secp256k1_u128_accum_u64(&c128, p4);
578 r->
d[2] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
579 secp256k1_u128_accum_u64(&c128, p3);
580 r->
d[3] = secp256k1_u128_to_u64(&c128);
581 c = secp256k1_u128_hi_u64(&c128);
585 secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
589 #ifdef USE_ASM_X86_64 591 __asm__ __volatile__(
593 "movq 0(%%rdi), %%r15\n" 594 "movq 8(%%rdi), %%rbx\n" 595 "movq 16(%%rdi), %%rcx\n" 596 "movq 0(%%rdx), %%r11\n" 597 "movq 8(%%rdx), %%r12\n" 598 "movq 16(%%rdx), %%r13\n" 599 "movq 24(%%rdx), %%r14\n" 601 "movq %%r15, %%rax\n" 604 "movq %%rax, 0(%%rsi)\n" 608 "xorq %%r10, %%r10\n" 610 "movq %%r15, %%rax\n" 616 "movq %%rbx, %%rax\n" 622 "movq %%r8, 8(%%rsi)\n" 625 "movq %%r15, %%rax\n" 628 "adcq %%rdx, %%r10\n" 631 "movq %%rbx, %%rax\n" 634 "adcq %%rdx, %%r10\n" 637 "movq %%rcx, %%rax\n" 640 "adcq %%rdx, %%r10\n" 643 "movq %%r9, 16(%%rsi)\n" 646 "movq %%r15, %%rax\n" 648 "addq %%rax, %%r10\n" 652 "movq 24(%%rdi), %%r15\n" 654 "movq %%rbx, %%rax\n" 656 "addq %%rax, %%r10\n" 660 "movq %%rcx, %%rax\n" 662 "addq %%rax, %%r10\n" 666 "movq %%r15, %%rax\n" 668 "addq %%rax, %%r10\n" 672 "movq %%r10, 24(%%rsi)\n" 673 "xorq %%r10, %%r10\n" 675 "movq %%rbx, %%rax\n" 681 "movq %%rcx, %%rax\n" 687 "movq %%r15, %%rax\n" 693 "movq %%r8, 32(%%rsi)\n" 696 "movq %%rcx, %%rax\n" 699 "adcq %%rdx, %%r10\n" 702 "movq %%r15, %%rax\n" 705 "adcq %%rdx, %%r10\n" 708 "movq %%r9, 40(%%rsi)\n" 710 "movq %%r15, %%rax\n" 712 "addq %%rax, %%r10\n" 715 "movq %%r10, 48(%%rsi)\n" 717 "movq %%r8, 56(%%rsi)\n" 720 :
"rax",
"rbx",
"rcx",
"r8",
"r9",
"r10",
"r11",
"r12",
"r13",
"r14",
"r15",
"cc",
"memory");
764 secp256k1_scalar_mul_512(l,
a, b);
765 secp256k1_scalar_reduce_512(r, l);
772 ret = r->
d[0] & ((1 << n) - 1);
773 r->
d[0] = (r->
d[0] >> n) + (r->
d[1] << (64 - n));
774 r->
d[1] = (r->
d[1] >> n) + (r->
d[2] << (64 - n));
775 r->
d[2] = (r->
d[2] >> n) + (r->
d[3] << (64 - n));
776 r->
d[3] = (r->
d[3] >> n);
792 return ((
a->d[0] ^ b->
d[0]) | (
a->d[1] ^ b->
d[1]) | (
a->d[2] ^ b->
d[2]) | (
a->d[3] ^ b->
d[3])) == 0;
797 unsigned int shiftlimbs;
798 unsigned int shiftlow;
799 unsigned int shifthigh;
801 secp256k1_scalar_mul_512(l,
a, b);
802 shiftlimbs = shift >> 6;
803 shiftlow = shift & 0x3F;
804 shifthigh = 64 - shiftlow;
805 r->
d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
806 r->
d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
807 r->
d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
808 r->
d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
809 secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
817 r->
d[0] = (r->
d[0] & mask0) | (
a->d[0] & mask1);
818 r->
d[1] = (r->
d[1] & mask0) | (
a->d[1] & mask1);
819 r->
d[2] = (r->
d[2] & mask0) | (
a->d[2] & mask1);
820 r->
d[3] = (r->
d[3] & mask0) | (
a->d[3] & mask1);
824 const uint64_t a0 =
a->v[0], a1 =
a->v[1], a2 =
a->v[2], a3 =
a->v[3], a4 =
a->v[4];
835 r->
d[0] = a0 | a1 << 62;
836 r->
d[1] = a1 >> 2 | a2 << 60;
837 r->
d[2] = a2 >> 4 | a3 << 58;
838 r->
d[3] = a3 >> 6 | a4 << 56;
847 const uint64_t a0 =
a->d[0], a1 =
a->d[1], a2 =
a->d[2], a3 =
a->d[3];
854 r->
v[1] = (a0 >> 62 | a1 << 2) & M62;
855 r->
v[2] = (a1 >> 60 | a2 << 4) & M62;
856 r->
v[3] = (a2 >> 58 | a3 << 6) & M62;
861 {{0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256}},
868 int zero_in = secp256k1_scalar_is_zero(x);
870 secp256k1_scalar_to_signed62(&s, x);
871 secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar);
872 secp256k1_scalar_from_signed62(r, &s);
882 int zero_in = secp256k1_scalar_is_zero(x);
884 secp256k1_scalar_to_signed62(&s, x);
885 secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar);
886 secp256k1_scalar_from_signed62(r, &s);
894 return !(
a->d[0] & 1);
#define VERIFY_CHECK(cond)
mdb_size_t count(MDB_cursor *cur)
unsigned __int64 uint64_t
const GenericPointer< typename T::ValueType > T2 T::AllocatorType & a
#define muladd_fast(a, b)
#define SECP256K1_CHECKMEM_CHECK_VERIFY(p, len)