48 out[0] =
a[0] + b[0] ; c = (
out[0] >> 51);
out[0] &= reduce_mask_51;
49 out[1] =
a[1] + b[1] + c; c = (
out[1] >> 51);
out[1] &= reduce_mask_51;
50 out[2] =
a[2] + b[2] + c; c = (
out[2] >> 51);
out[2] &= reduce_mask_51;
51 out[3] =
a[3] + b[3] + c; c = (
out[3] >> 51);
out[3] &= reduce_mask_51;
52 out[4] =
a[4] + b[4] + c; c = (
out[4] >> 51);
out[4] &= reduce_mask_51;
57 static const uint64_t twoP0 = 0x0fffffffffffda;
58 static const uint64_t twoP1234 = 0x0ffffffffffffe;
59 static const uint64_t fourP0 = 0x1fffffffffffb4;
60 static const uint64_t fourP1234 = 0x1ffffffffffffc;
65 out[0] =
a[0] + twoP0 - b[0];
66 out[1] =
a[1] + twoP1234 - b[1];
67 out[2] =
a[2] + twoP1234 - b[2];
68 out[3] =
a[3] + twoP1234 - b[3];
69 out[4] =
a[4] + twoP1234 - b[4];
75 out[0] =
a[0] + fourP0 - b[0];
76 out[1] =
a[1] + fourP1234 - b[1];
77 out[2] =
a[2] + fourP1234 - b[2];
78 out[3] =
a[3] + fourP1234 - b[3];
79 out[4] =
a[4] + fourP1234 - b[4];
85 out[0] =
a[0] + fourP0 - b[0] ; c = (
out[0] >> 51);
out[0] &= reduce_mask_51;
86 out[1] =
a[1] + fourP1234 - b[1] + c; c = (
out[1] >> 51);
out[1] &= reduce_mask_51;
87 out[2] =
a[2] + fourP1234 - b[2] + c; c = (
out[2] >> 51);
out[2] &= reduce_mask_51;
88 out[3] =
a[3] + fourP1234 - b[3] + c; c = (
out[3] >> 51);
out[3] &= reduce_mask_51;
89 out[4] =
a[4] + fourP1234 - b[4] + c; c = (
out[4] >> 51);
out[4] &= reduce_mask_51;
97 out[0] = twoP0 -
a[0] ; c = (
out[0] >> 51);
out[0] &= reduce_mask_51;
98 out[1] = twoP1234 -
a[1] + c; c = (
out[1] >> 51);
out[1] &= reduce_mask_51;
99 out[2] = twoP1234 -
a[2] + c; c = (
out[2] >> 51);
out[2] &= reduce_mask_51;
100 out[3] = twoP1234 -
a[3] + c; c = (
out[3] >> 51);
out[3] &= reduce_mask_51;
101 out[4] = twoP1234 -
a[4] + c; c = (
out[4] >> 51);
out[4] &= reduce_mask_51;
108 #if !defined(HAVE_NATIVE_UINT128) 112 uint64_t r0,r1,r2,r3,r4,s0,s1,s2,s3,s4,c;
126 #if defined(HAVE_NATIVE_UINT128) 127 t[0] = ((uint128_t) r0) * s0;
128 t[1] = ((uint128_t) r0) * s1 + ((uint128_t) r1) * s0;
129 t[2] = ((uint128_t) r0) * s2 + ((uint128_t) r2) * s0 + ((uint128_t) r1) * s1;
130 t[3] = ((uint128_t) r0) * s3 + ((uint128_t) r3) * s0 + ((uint128_t) r1) * s2 + ((uint128_t) r2) * s1;
131 t[4] = ((uint128_t) r0) * s4 + ((uint128_t) r4) * s0 + ((uint128_t) r3) * s1 + ((uint128_t) r1) * s3 + ((uint128_t) r2) * s2;
133 mul64x64_128(t[0], r0, s0)
134 mul64x64_128(t[1], r0, s1) mul64x64_128(mul, r1, s0) add128(t[1], mul)
135 mul64x64_128(t[2], r0, s2) mul64x64_128(mul, r2, s0) add128(t[2], mul) mul64x64_128(mul, r1, s1) add128(t[2], mul)
136 mul64x64_128(t[3], r0, s3) mul64x64_128(mul, r3, s0) add128(t[3], mul) mul64x64_128(mul, r1, s2) add128(t[3], mul) mul64x64_128(mul, r2, s1) add128(t[3], mul)
137 mul64x64_128(t[4], r0, s4) mul64x64_128(mul, r4, s0) add128(t[4], mul) mul64x64_128(mul, r3, s1) add128(t[4], mul) mul64x64_128(mul, r1, s3) add128(t[4], mul) mul64x64_128(mul, r2, s2) add128(t[4], mul)
145 #if defined(HAVE_NATIVE_UINT128) 146 t[0] += ((uint128_t) r4) * s1 + ((uint128_t) r1) * s4 + ((uint128_t) r2) * s3 + ((uint128_t) r3) * s2;
147 t[1] += ((uint128_t) r4) * s2 + ((uint128_t) r2) * s4 + ((uint128_t) r3) * s3;
148 t[2] += ((uint128_t) r4) * s3 + ((uint128_t) r3) * s4;
149 t[3] += ((uint128_t) r4) * s4;
151 mul64x64_128(mul, r4, s1) add128(t[0], mul) mul64x64_128(mul, r1, s4) add128(t[0], mul) mul64x64_128(mul, r2, s3) add128(t[0], mul) mul64x64_128(mul, r3, s2) add128(t[0], mul)
152 mul64x64_128(mul, r4, s2) add128(t[1], mul) mul64x64_128(mul, r2, s4) add128(t[1], mul) mul64x64_128(mul, r3, s3) add128(t[1], mul)
153 mul64x64_128(mul, r4, s3) add128(t[2], mul) mul64x64_128(mul, r3, s4) add128(t[2], mul)
154 mul64x64_128(mul, r4, s4) add128(t[3], mul)
158 r0 = lo128(t[0]) & reduce_mask_51; shr128(c, t[0], 51);
159 add128_64(t[1], c) r1 = lo128(t[1]) & reduce_mask_51; shr128(c, t[1], 51);
160 add128_64(t[2], c) r2 = lo128(t[2]) & reduce_mask_51; shr128(c, t[2], 51);
161 add128_64(t[3], c) r3 = lo128(t[3]) & reduce_mask_51; shr128(c, t[3], 51);
162 add128_64(t[4], c) r4 = lo128(t[4]) & reduce_mask_51; shr128(c, t[4], 51);
163 r0 += c * 19; c = r0 >> 51; r0 = r0 & reduce_mask_51;
175 curve25519_mul(out, in2, in);
181 #if !defined(HAVE_NATIVE_UINT128) 201 #if defined(HAVE_NATIVE_UINT128) 202 t[0] = ((uint128_t) r0) * r0 + ((uint128_t) d4) * r1 + (((uint128_t) d2) * (r3 ));
203 t[1] = ((uint128_t) d0) * r1 + ((uint128_t) d4) * r2 + (((uint128_t) r3) * (r3 * 19));
204 t[2] = ((uint128_t) d0) * r2 + ((uint128_t) r1) * r1 + (((uint128_t) d4) * (r3 ));
205 t[3] = ((uint128_t) d0) * r3 + ((uint128_t) d1) * r2 + (((uint128_t) r4) * (d419 ));
206 t[4] = ((uint128_t) d0) * r4 + ((uint128_t) d1) * r3 + (((uint128_t) r2) * (r2 ));
208 mul64x64_128(t[0], r0, r0) mul64x64_128(mul, d4, r1) add128(t[0], mul) mul64x64_128(mul, d2, r3) add128(t[0], mul)
209 mul64x64_128(t[1], d0, r1) mul64x64_128(mul, d4, r2) add128(t[1], mul) mul64x64_128(mul, r3, r3 * 19) add128(t[1], mul)
210 mul64x64_128(t[2], d0, r2) mul64x64_128(mul, r1, r1) add128(t[2], mul) mul64x64_128(mul, d4, r3) add128(t[2], mul)
211 mul64x64_128(t[3], d0, r3) mul64x64_128(mul, d1, r2) add128(t[3], mul) mul64x64_128(mul, r4, d419) add128(t[3], mul)
212 mul64x64_128(t[4], d0, r4) mul64x64_128(mul, d1, r3) add128(t[4], mul) mul64x64_128(mul, r2, r2) add128(t[4], mul)
215 r0 = lo128(t[0]) & reduce_mask_51;
216 r1 = lo128(t[1]) & reduce_mask_51; shl128(c, t[0], 13); r1 += c;
217 r2 = lo128(t[2]) & reduce_mask_51; shl128(c, t[1], 13); r2 += c;
218 r3 = lo128(t[3]) & reduce_mask_51; shl128(c, t[2], 13); r3 += c;
219 r4 = lo128(t[4]) & reduce_mask_51; shl128(c, t[3], 13); r4 += c;
220 shl128(c, t[4], 13); r0 += c * 19;
221 c = r0 >> 51; r0 &= reduce_mask_51;
222 r1 += c ; c = r1 >> 51; r1 &= reduce_mask_51;
223 r2 += c ; c = r2 >> 51; r2 &= reduce_mask_51;
224 r3 += c ; c = r3 >> 51; r3 &= reduce_mask_51;
225 r4 += c ; c = r4 >> 51; r4 &= reduce_mask_51;
238 #if !defined(HAVE_NATIVE_UINT128) 257 #if defined(HAVE_NATIVE_UINT128) 258 t[0] = ((uint128_t) r0) * r0 + ((uint128_t) d4) * r1 + (((uint128_t) d2) * (r3 ));
259 t[1] = ((uint128_t) d0) * r1 + ((uint128_t) d4) * r2 + (((uint128_t) r3) * (r3 * 19));
260 t[2] = ((uint128_t) d0) * r2 + ((uint128_t) r1) * r1 + (((uint128_t) d4) * (r3 ));
261 t[3] = ((uint128_t) d0) * r3 + ((uint128_t) d1) * r2 + (((uint128_t) r4) * (d419 ));
262 t[4] = ((uint128_t) d0) * r4 + ((uint128_t) d1) * r3 + (((uint128_t) r2) * (r2 ));
264 mul64x64_128(t[0], r0, r0) mul64x64_128(mul, d4, r1) add128(t[0], mul) mul64x64_128(mul, d2, r3) add128(t[0], mul)
265 mul64x64_128(t[1], d0, r1) mul64x64_128(mul, d4, r2) add128(t[1], mul) mul64x64_128(mul, r3, r3 * 19) add128(t[1], mul)
266 mul64x64_128(t[2], d0, r2) mul64x64_128(mul, r1, r1) add128(t[2], mul) mul64x64_128(mul, d4, r3) add128(t[2], mul)
267 mul64x64_128(t[3], d0, r3) mul64x64_128(mul, d1, r2) add128(t[3], mul) mul64x64_128(mul, r4, d419) add128(t[3], mul)
268 mul64x64_128(t[4], d0, r4) mul64x64_128(mul, d1, r3) add128(t[4], mul) mul64x64_128(mul, r2, r2) add128(t[4], mul)
271 r0 = lo128(t[0]) & reduce_mask_51; shr128(c, t[0], 51);
272 add128_64(t[1], c) r1 = lo128(t[1]) & reduce_mask_51; shr128(c, t[1], 51);
273 add128_64(t[2], c) r2 = lo128(t[2]) & reduce_mask_51; shr128(c, t[2], 51);
274 add128_64(t[3], c) r3 = lo128(t[3]) & reduce_mask_51; shr128(c, t[3], 51);
275 add128_64(t[4], c) r4 = lo128(t[4]) & reduce_mask_51; shr128(c, t[4], 51);
276 r0 += c * 19; c = r0 >> 51; r0 = r0 & reduce_mask_51;
288 curve25519_expand(
bignum25519 out,
const unsigned char *in) {
289 static const union {
uint8_t b[2];
uint16_t s; } endian_check = {{1,0}};
292 if (endian_check.s == 1) {
299 ((((uint64_t)in[s + 0]) ) | \ 300 (((uint64_t)in[s + 1]) << 8) | \ 301 (((uint64_t)in[s + 2]) << 16) | \ 302 (((uint64_t)in[s + 3]) << 24) | \ 303 (((uint64_t)in[s + 4]) << 32) | \ 304 (((uint64_t)in[s + 5]) << 40) | \ 305 (((uint64_t)in[s + 6]) << 48) | \ 306 (((uint64_t)in[s + 7]) << 56)) 314 out[0] = x0 & reduce_mask_51; x0 = (x0 >> 51) | (x1 << 13);
315 out[1] = x0 & reduce_mask_51; x1 = (x1 >> 38) | (x2 << 26);
316 out[2] = x1 & reduce_mask_51; x2 = (x2 >> 25) | (x3 << 39);
317 out[3] = x2 & reduce_mask_51; x3 = (x3 >> 12);
318 out[4] = x3 & reduce_mask_51;
325 curve25519_contract(
unsigned char *out,
const bignum25519 input) {
335 #define curve25519_contract_carry() \ 336 t[1] += t[0] >> 51; t[0] &= reduce_mask_51; \ 337 t[2] += t[1] >> 51; t[1] &= reduce_mask_51; \ 338 t[3] += t[2] >> 51; t[2] &= reduce_mask_51; \ 339 t[4] += t[3] >> 51; t[3] &= reduce_mask_51; 341 #define curve25519_contract_carry_full() curve25519_contract_carry() \ 342 t[0] += 19 * (t[4] >> 51); t[4] &= reduce_mask_51; 344 #define curve25519_contract_carry_final() curve25519_contract_carry() \ 345 t[4] &= reduce_mask_51; 356 t[0] += (reduce_mask_51 + 1) - 19;
357 t[1] += (reduce_mask_51 + 1) - 1;
358 t[2] += (reduce_mask_51 + 1) - 1;
359 t[3] += (reduce_mask_51 + 1) - 1;
360 t[4] += (reduce_mask_51 + 1) - 1;
365 #define write51full(n,shift) \ 366 f = ((t[n] >> shift) | (t[n+1] << (51 - shift))); \ 367 for (i = 0; i < 8; i++, f >>= 8) *out++ = (unsigned char)f; 368 #define write51(n) write51full(n,13*n) 375 #if !defined(ED25519_GCC_64BIT_CHOOSE) 380 const uint64_t nb = flag - 1, b = ~nb;
383 outq[0] = (outq[0] & nb) | (inq[0] & b);
384 outq[1] = (outq[1] & nb) | (inq[1] & b);
385 outq[2] = (outq[2] & nb) | (inq[2] & b);
386 outq[3] = (outq[3] & nb) | (inq[3] & b);
387 outq[4] = (outq[4] & nb) | (inq[4] & b);
388 outq[5] = (outq[5] & nb) | (inq[5] & b);
389 outq[6] = (outq[6] & nb) | (inq[6] & b);
390 outq[7] = (outq[7] & nb) | (inq[7] & b);
391 outq[8] = (outq[8] & nb) | (inq[8] & b);
392 outq[9] = (outq[9] & nb) | (inq[9] & b);
393 outq[10] = (outq[10] & nb) | (inq[10] & b);
394 outq[11] = (outq[11] & nb) | (inq[11] & b);
403 x0 = swap & (
a[0] ^ b[0]);
a[0] ^= x0; b[0] ^= x0;
404 x1 = swap & (
a[1] ^ b[1]);
a[1] ^= x1; b[1] ^= x1;
405 x2 = swap & (
a[2] ^ b[2]);
a[2] ^= x2; b[2] ^= x2;
406 x3 = swap & (
a[3] ^ b[3]);
a[3] ^= x3; b[3] ^= x3;
407 x4 = swap & (
a[4] ^ b[4]);
a[4] ^= x4; b[4] ^= x4;
412 #define ED25519_64BIT_TABLES
#define curve25519_square(r, n)
#define curve25519_add_after_basic
mdb_size_t count(MDB_cursor *cur)
unsigned __int64 uint64_t
#define curve25519_mul_noinline
const GenericPointer< typename T::ValueType > T2 T::AllocatorType & a
#define curve25519_contract_carry_final()
#define curve25519_contract_carry_full()