Electroneum
scalar_impl.h
Go to the documentation of this file.
1 /***********************************************************************
2  * Copyright (c) 2014 Pieter Wuille *
3  * Distributed under the MIT software license, see the accompanying *
4  * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5  ***********************************************************************/
6 
7 #ifndef SECP256K1_SCALAR_IMPL_H
8 #define SECP256K1_SCALAR_IMPL_H
9 
10 #ifdef VERIFY
11 #include <string.h>
12 #endif
13 
14 #include "scalar.h"
15 #include "util.h"
16 
17 #if defined(EXHAUSTIVE_TEST_ORDER)
18 #include "scalar_low_impl.h"
19 #elif defined(SECP256K1_WIDEMUL_INT128)
20 #include "scalar_4x64_impl.h"
21 #elif defined(SECP256K1_WIDEMUL_INT64)
22 #include "scalar_8x32_impl.h"
23 #else
24 #error "Please select wide multiplication implementation"
25 #endif
26 
27 static const secp256k1_scalar secp256k1_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
28 static const secp256k1_scalar secp256k1_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
29 
30 static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned char *bin) {
31  int overflow;
32  secp256k1_scalar_set_b32(r, bin, &overflow);
33  return (!overflow) & (!secp256k1_scalar_is_zero(r));
34 }
35 
36 #if defined(EXHAUSTIVE_TEST_ORDER)
37 /* Begin of section generated by sage/gen_exhaustive_groups.sage. */
38 # if EXHAUSTIVE_TEST_ORDER == 7
39 # define EXHAUSTIVE_TEST_LAMBDA 2
40 # elif EXHAUSTIVE_TEST_ORDER == 13
41 # define EXHAUSTIVE_TEST_LAMBDA 9
42 # elif EXHAUSTIVE_TEST_ORDER == 199
43 # define EXHAUSTIVE_TEST_LAMBDA 92
44 # else
45 # error No known lambda for the specified exhaustive test group order.
46 # endif
47 /* End of section generated by sage/gen_exhaustive_groups.sage. */
48 
55 static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
56  *r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER;
57  *r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
58 }
59 #else
60 
63 static const secp256k1_scalar secp256k1_const_lambda = SECP256K1_SCALAR_CONST(
64  0x5363AD4CUL, 0xC05C30E0UL, 0xA5261C02UL, 0x8812645AUL,
65  0x122E22EAUL, 0x20816678UL, 0xDF02967CUL, 0x1B23BD72UL
66 );
67 
68 #ifdef VERIFY
69 static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k);
70 #endif
71 
72 /*
73  * Both lambda and beta are primitive cube roots of unity. That is lamba^3 == 1 mod n and
74  * beta^3 == 1 mod p, where n is the curve order and p is the field order.
75  *
76  * Furthermore, because (X^3 - 1) = (X - 1)(X^2 + X + 1), the primitive cube roots of unity are
77  * roots of X^2 + X + 1. Therefore lambda^2 + lamba == -1 mod n and beta^2 + beta == -1 mod p.
78  * (The other primitive cube roots of unity are lambda^2 and beta^2 respectively.)
79  *
80  * Let l = -1/2 + i*sqrt(3)/2, the complex root of X^2 + X + 1. We can define a ring
81  * homomorphism phi : Z[l] -> Z_n where phi(a + b*l) == a + b*lambda mod n. The kernel of phi
82  * is a lattice over Z[l] (considering Z[l] as a Z-module). This lattice is generated by a
83  * reduced basis {a1 + b1*l, a2 + b2*l} where
84  *
85  * - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
86  * - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3}
87  * - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8}
88  * - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
89  *
90  * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
91  * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
92  * and k2 are small in absolute value.
93  *
94  * The algorithm computes c1 = round(b2 * k / n) and c2 = round((-b1) * k / n), and gives
95  * k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and
96  * compute r2 = k2 mod n, and r1 = k1 mod n = (k - r2 * lambda) mod n, avoiding the need for
97  * the constants a1 and a2.
98  *
99  * g1, g2 are precomputed constants used to replace division with a rounded multiplication
100  * when decomposing the scalar for an endomorphism-based point multiplication.
101  *
102  * The possibility of using precomputed estimates is mentioned in "Guide to Elliptic Curve
103  * Cryptography" (Hankerson, Menezes, Vanstone) in section 3.5.
104  *
105  * The derivation is described in the paper "Efficient Software Implementation of Public-Key
106  * Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez),
107  * Section 4.3 (here we use a somewhat higher-precision estimate):
108  * d = a1*b2 - b1*a2
109  * g1 = round(2^384 * b2/d)
110  * g2 = round(2^384 * (-b1)/d)
111  *
112  * (Note that d is also equal to the curve order, n, here because [a1,b1] and [a2,b2]
113  * can be found as outputs of the Extended Euclidean Algorithm on inputs n and lambda).
114  *
115  * The function below splits k into r1 and r2, such that
116  * - r1 + lambda * r2 == k (mod n)
117  * - either r1 < 2^128 or -r1 mod n < 2^128
118  * - either r2 < 2^128 or -r2 mod n < 2^128
119  *
120  * See proof below.
121  */
122 static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
123  secp256k1_scalar c1, c2;
124  static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST(
125  0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL,
126  0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL
127  );
128  static const secp256k1_scalar minus_b2 = SECP256K1_SCALAR_CONST(
129  0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
130  0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL
131  );
132  static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST(
133  0x3086D221UL, 0xA7D46BCDUL, 0xE86C90E4UL, 0x9284EB15UL,
134  0x3DAA8A14UL, 0x71E8CA7FUL, 0xE893209AUL, 0x45DBB031UL
135  );
136  static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST(
137  0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL,
138  0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL
139  );
140  VERIFY_CHECK(r1 != k);
141  VERIFY_CHECK(r2 != k);
142  /* these _var calls are constant time since the shift amount is constant */
143  secp256k1_scalar_mul_shift_var(&c1, k, &g1, 384);
144  secp256k1_scalar_mul_shift_var(&c2, k, &g2, 384);
145  secp256k1_scalar_mul(&c1, &c1, &minus_b1);
146  secp256k1_scalar_mul(&c2, &c2, &minus_b2);
147  secp256k1_scalar_add(r2, &c1, &c2);
148  secp256k1_scalar_mul(r1, r2, &secp256k1_const_lambda);
149  secp256k1_scalar_negate(r1, r1);
150  secp256k1_scalar_add(r1, r1, k);
151 
152 #ifdef VERIFY
153  secp256k1_scalar_split_lambda_verify(r1, r2, k);
154 #endif
155 }
156 
157 #ifdef VERIFY
158 /*
159  * Proof for secp256k1_scalar_split_lambda's bounds.
160  *
161  * Let
162  * - epsilon1 = 2^256 * |g1/2^384 - b2/d|
163  * - epsilon2 = 2^256 * |g2/2^384 - (-b1)/d|
164  * - c1 = round(k*g1/2^384)
165  * - c2 = round(k*g2/2^384)
166  *
167  * Lemma 1: |c1 - k*b2/d| < 2^-1 + epsilon1
168  *
169  * |c1 - k*b2/d|
170  * =
171  * |c1 - k*g1/2^384 + k*g1/2^384 - k*b2/d|
172  * <= {triangle inequality}
173  * |c1 - k*g1/2^384| + |k*g1/2^384 - k*b2/d|
174  * =
175  * |c1 - k*g1/2^384| + k*|g1/2^384 - b2/d|
176  * < {rounding in c1 and 0 <= k < 2^256}
177  * 2^-1 + 2^256 * |g1/2^384 - b2/d|
178  * = {definition of epsilon1}
179  * 2^-1 + epsilon1
180  *
181  * Lemma 2: |c2 - k*(-b1)/d| < 2^-1 + epsilon2
182  *
183  * |c2 - k*(-b1)/d|
184  * =
185  * |c2 - k*g2/2^384 + k*g2/2^384 - k*(-b1)/d|
186  * <= {triangle inequality}
187  * |c2 - k*g2/2^384| + |k*g2/2^384 - k*(-b1)/d|
188  * =
189  * |c2 - k*g2/2^384| + k*|g2/2^384 - (-b1)/d|
190  * < {rounding in c2 and 0 <= k < 2^256}
191  * 2^-1 + 2^256 * |g2/2^384 - (-b1)/d|
192  * = {definition of epsilon2}
193  * 2^-1 + epsilon2
194  *
195  * Let
196  * - k1 = k - c1*a1 - c2*a2
197  * - k2 = - c1*b1 - c2*b2
198  *
199  * Lemma 3: |k1| < (a1 + a2 + 1)/2 < 2^128
200  *
201  * |k1|
202  * = {definition of k1}
203  * |k - c1*a1 - c2*a2|
204  * = {(a1*b2 - b1*a2)/n = 1}
205  * |k*(a1*b2 - b1*a2)/n - c1*a1 - c2*a2|
206  * =
207  * |a1*(k*b2/n - c1) + a2*(k*(-b1)/n - c2)|
208  * <= {triangle inequality}
209  * a1*|k*b2/n - c1| + a2*|k*(-b1)/n - c2|
210  * < {Lemma 1 and Lemma 2}
211  * a1*(2^-1 + epslion1) + a2*(2^-1 + epsilon2)
212  * < {rounding up to an integer}
213  * (a1 + a2 + 1)/2
214  * < {rounding up to a power of 2}
215  * 2^128
216  *
217  * Lemma 4: |k2| < (-b1 + b2)/2 + 1 < 2^128
218  *
219  * |k2|
220  * = {definition of k2}
221  * |- c1*a1 - c2*a2|
222  * = {(b1*b2 - b1*b2)/n = 0}
223  * |k*(b1*b2 - b1*b2)/n - c1*b1 - c2*b2|
224  * =
225  * |b1*(k*b2/n - c1) + b2*(k*(-b1)/n - c2)|
226  * <= {triangle inequality}
227  * (-b1)*|k*b2/n - c1| + b2*|k*(-b1)/n - c2|
228  * < {Lemma 1 and Lemma 2}
229  * (-b1)*(2^-1 + epslion1) + b2*(2^-1 + epsilon2)
230  * < {rounding up to an integer}
231  * (-b1 + b2)/2 + 1
232  * < {rounding up to a power of 2}
233  * 2^128
234  *
235  * Let
236  * - r2 = k2 mod n
237  * - r1 = k - r2*lambda mod n.
238  *
239  * Notice that r1 is defined such that r1 + r2 * lambda == k (mod n).
240  *
241  * Lemma 5: r1 == k1 mod n.
242  *
243  * r1
244  * == {definition of r1 and r2}
245  * k - k2*lambda
246  * == {definition of k2}
247  * k - (- c1*b1 - c2*b2)*lambda
248  * ==
249  * k + c1*b1*lambda + c2*b2*lambda
250  * == {a1 + b1*lambda == 0 mod n and a2 + b2*lambda == 0 mod n}
251  * k - c1*a1 - c2*a2
252  * == {definition of k1}
253  * k1
254  *
255  * From Lemma 3, Lemma 4, Lemma 5 and the definition of r2, we can conclude that
256  *
257  * - either r1 < 2^128 or -r1 mod n < 2^128
258  * - either r2 < 2^128 or -r2 mod n < 2^128.
259  *
260  * Q.E.D.
261  */
262 static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k) {
264  unsigned char buf1[32];
265  unsigned char buf2[32];
266 
267  /* (a1 + a2 + 1)/2 is 0xa2a8918ca85bafe22016d0b917e4dd77 */
268  static const unsigned char k1_bound[32] = {
269  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
270  0xa2, 0xa8, 0x91, 0x8c, 0xa8, 0x5b, 0xaf, 0xe2, 0x20, 0x16, 0xd0, 0xb9, 0x17, 0xe4, 0xdd, 0x77
271  };
272 
273  /* (-b1 + b2)/2 + 1 is 0x8a65287bd47179fb2be08846cea267ed */
274  static const unsigned char k2_bound[32] = {
275  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
276  0x8a, 0x65, 0x28, 0x7b, 0xd4, 0x71, 0x79, 0xfb, 0x2b, 0xe0, 0x88, 0x46, 0xce, 0xa2, 0x67, 0xed
277  };
278 
279  secp256k1_scalar_mul(&s, &secp256k1_const_lambda, r2);
280  secp256k1_scalar_add(&s, &s, r1);
281  VERIFY_CHECK(secp256k1_scalar_eq(&s, k));
282 
283  secp256k1_scalar_negate(&s, r1);
284  secp256k1_scalar_get_b32(buf1, r1);
285  secp256k1_scalar_get_b32(buf2, &s);
286  VERIFY_CHECK(secp256k1_memcmp_var(buf1, k1_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k1_bound, 32) < 0);
287 
288  secp256k1_scalar_negate(&s, r2);
289  secp256k1_scalar_get_b32(buf1, r2);
290  secp256k1_scalar_get_b32(buf2, &s);
291  VERIFY_CHECK(secp256k1_memcmp_var(buf1, k2_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k2_bound, 32) < 0);
292 }
293 #endif /* VERIFY */
294 #endif /* !defined(EXHAUSTIVE_TEST_ORDER) */
295 
296 #endif /* SECP256K1_SCALAR_IMPL_H */
#define VERIFY_CHECK(cond)
Definition: util.h:96
#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0)
Definition: scalar_4x64.h:17