Bitcoin Core  0.20.99
P2P Digital Currency
scalar_impl.h
Go to the documentation of this file.
1 /**********************************************************************
2  * Copyright (c) 2014 Pieter Wuille *
3  * Distributed under the MIT software license, see the accompanying *
4  * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5  **********************************************************************/
6 
7 #ifndef SECP256K1_SCALAR_IMPL_H
8 #define SECP256K1_SCALAR_IMPL_H
9 
10 #ifdef VERIFY
11 #include <string.h>
12 #endif
13 
14 #include "scalar.h"
15 #include "util.h"
16 
17 #if defined HAVE_CONFIG_H
18 #include "libsecp256k1-config.h"
19 #endif
20 
21 #if defined(EXHAUSTIVE_TEST_ORDER)
22 #include "scalar_low_impl.h"
23 #elif defined(SECP256K1_WIDEMUL_INT128)
24 #include "scalar_4x64_impl.h"
25 #elif defined(SECP256K1_WIDEMUL_INT64)
26 #include "scalar_8x32_impl.h"
27 #else
28 #error "Please select wide multiplication implementation"
29 #endif
30 
31 static const secp256k1_scalar secp256k1_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
32 static const secp256k1_scalar secp256k1_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
33 
34 #ifndef USE_NUM_NONE
36  unsigned char c[32];
38  secp256k1_num_set_bin(r, c, 32);
39 }
40 
43 #if defined(EXHAUSTIVE_TEST_ORDER)
44  static const unsigned char order[32] = {
45  0,0,0,0,0,0,0,0,
46  0,0,0,0,0,0,0,0,
47  0,0,0,0,0,0,0,0,
48  0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER
49  };
50 #else
51  static const unsigned char order[32] = {
52  0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
53  0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
54  0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
55  0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
56  };
57 #endif
58  secp256k1_num_set_bin(r, order, 32);
59 }
60 #endif
61 
62 static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned char *bin) {
63  int overflow;
64  secp256k1_scalar_set_b32(r, bin, &overflow);
65  return (!overflow) & (!secp256k1_scalar_is_zero(r));
66 }
67 
69 #if defined(EXHAUSTIVE_TEST_ORDER)
70  int i;
71  *r = 0;
72  for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
73  if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
74  *r = i;
75  /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
76  * have a composite group order; fix it in exhaustive_tests.c). */
77  VERIFY_CHECK(*r != 0);
78 }
79 #else
81  int i;
82  /* First compute xN as x ^ (2^N - 1) for some values of N,
83  * and uM as x ^ M for some values of M. */
84  secp256k1_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126;
85  secp256k1_scalar u2, u5, u9, u11, u13;
86 
87  secp256k1_scalar_sqr(&u2, x);
88  secp256k1_scalar_mul(&x2, &u2, x);
89  secp256k1_scalar_mul(&u5, &u2, &x2);
90  secp256k1_scalar_mul(&x3, &u5, &u2);
91  secp256k1_scalar_mul(&u9, &x3, &u2);
92  secp256k1_scalar_mul(&u11, &u9, &u2);
93  secp256k1_scalar_mul(&u13, &u11, &u2);
94 
95  secp256k1_scalar_sqr(&x6, &u13);
96  secp256k1_scalar_sqr(&x6, &x6);
97  secp256k1_scalar_mul(&x6, &x6, &u11);
98 
99  secp256k1_scalar_sqr(&x8, &x6);
100  secp256k1_scalar_sqr(&x8, &x8);
101  secp256k1_scalar_mul(&x8, &x8, &x2);
102 
103  secp256k1_scalar_sqr(&x14, &x8);
104  for (i = 0; i < 5; i++) {
105  secp256k1_scalar_sqr(&x14, &x14);
106  }
107  secp256k1_scalar_mul(&x14, &x14, &x6);
108 
109  secp256k1_scalar_sqr(&x28, &x14);
110  for (i = 0; i < 13; i++) {
111  secp256k1_scalar_sqr(&x28, &x28);
112  }
113  secp256k1_scalar_mul(&x28, &x28, &x14);
114 
115  secp256k1_scalar_sqr(&x56, &x28);
116  for (i = 0; i < 27; i++) {
117  secp256k1_scalar_sqr(&x56, &x56);
118  }
119  secp256k1_scalar_mul(&x56, &x56, &x28);
120 
121  secp256k1_scalar_sqr(&x112, &x56);
122  for (i = 0; i < 55; i++) {
123  secp256k1_scalar_sqr(&x112, &x112);
124  }
125  secp256k1_scalar_mul(&x112, &x112, &x56);
126 
127  secp256k1_scalar_sqr(&x126, &x112);
128  for (i = 0; i < 13; i++) {
129  secp256k1_scalar_sqr(&x126, &x126);
130  }
131  secp256k1_scalar_mul(&x126, &x126, &x14);
132 
133  /* Then accumulate the final result (t starts at x126). */
134  t = &x126;
135  for (i = 0; i < 3; i++) {
136  secp256k1_scalar_sqr(t, t);
137  }
138  secp256k1_scalar_mul(t, t, &u5); /* 101 */
139  for (i = 0; i < 4; i++) { /* 0 */
140  secp256k1_scalar_sqr(t, t);
141  }
142  secp256k1_scalar_mul(t, t, &x3); /* 111 */
143  for (i = 0; i < 4; i++) { /* 0 */
144  secp256k1_scalar_sqr(t, t);
145  }
146  secp256k1_scalar_mul(t, t, &u5); /* 101 */
147  for (i = 0; i < 5; i++) { /* 0 */
148  secp256k1_scalar_sqr(t, t);
149  }
150  secp256k1_scalar_mul(t, t, &u11); /* 1011 */
151  for (i = 0; i < 4; i++) {
152  secp256k1_scalar_sqr(t, t);
153  }
154  secp256k1_scalar_mul(t, t, &u11); /* 1011 */
155  for (i = 0; i < 4; i++) { /* 0 */
156  secp256k1_scalar_sqr(t, t);
157  }
158  secp256k1_scalar_mul(t, t, &x3); /* 111 */
159  for (i = 0; i < 5; i++) { /* 00 */
160  secp256k1_scalar_sqr(t, t);
161  }
162  secp256k1_scalar_mul(t, t, &x3); /* 111 */
163  for (i = 0; i < 6; i++) { /* 00 */
164  secp256k1_scalar_sqr(t, t);
165  }
166  secp256k1_scalar_mul(t, t, &u13); /* 1101 */
167  for (i = 0; i < 4; i++) { /* 0 */
168  secp256k1_scalar_sqr(t, t);
169  }
170  secp256k1_scalar_mul(t, t, &u5); /* 101 */
171  for (i = 0; i < 3; i++) {
172  secp256k1_scalar_sqr(t, t);
173  }
174  secp256k1_scalar_mul(t, t, &x3); /* 111 */
175  for (i = 0; i < 5; i++) { /* 0 */
176  secp256k1_scalar_sqr(t, t);
177  }
178  secp256k1_scalar_mul(t, t, &u9); /* 1001 */
179  for (i = 0; i < 6; i++) { /* 000 */
180  secp256k1_scalar_sqr(t, t);
181  }
182  secp256k1_scalar_mul(t, t, &u5); /* 101 */
183  for (i = 0; i < 10; i++) { /* 0000000 */
184  secp256k1_scalar_sqr(t, t);
185  }
186  secp256k1_scalar_mul(t, t, &x3); /* 111 */
187  for (i = 0; i < 4; i++) { /* 0 */
188  secp256k1_scalar_sqr(t, t);
189  }
190  secp256k1_scalar_mul(t, t, &x3); /* 111 */
191  for (i = 0; i < 9; i++) { /* 0 */
192  secp256k1_scalar_sqr(t, t);
193  }
194  secp256k1_scalar_mul(t, t, &x8); /* 11111111 */
195  for (i = 0; i < 5; i++) { /* 0 */
196  secp256k1_scalar_sqr(t, t);
197  }
198  secp256k1_scalar_mul(t, t, &u9); /* 1001 */
199  for (i = 0; i < 6; i++) { /* 00 */
200  secp256k1_scalar_sqr(t, t);
201  }
202  secp256k1_scalar_mul(t, t, &u11); /* 1011 */
203  for (i = 0; i < 4; i++) {
204  secp256k1_scalar_sqr(t, t);
205  }
206  secp256k1_scalar_mul(t, t, &u13); /* 1101 */
207  for (i = 0; i < 5; i++) {
208  secp256k1_scalar_sqr(t, t);
209  }
210  secp256k1_scalar_mul(t, t, &x2); /* 11 */
211  for (i = 0; i < 6; i++) { /* 00 */
212  secp256k1_scalar_sqr(t, t);
213  }
214  secp256k1_scalar_mul(t, t, &u13); /* 1101 */
215  for (i = 0; i < 10; i++) { /* 000000 */
216  secp256k1_scalar_sqr(t, t);
217  }
218  secp256k1_scalar_mul(t, t, &u13); /* 1101 */
219  for (i = 0; i < 4; i++) {
220  secp256k1_scalar_sqr(t, t);
221  }
222  secp256k1_scalar_mul(t, t, &u9); /* 1001 */
223  for (i = 0; i < 6; i++) { /* 00000 */
224  secp256k1_scalar_sqr(t, t);
225  }
226  secp256k1_scalar_mul(t, t, x); /* 1 */
227  for (i = 0; i < 8; i++) { /* 00 */
228  secp256k1_scalar_sqr(t, t);
229  }
230  secp256k1_scalar_mul(r, t, &x6); /* 111111 */
231 }
232 
234  return !(a->d[0] & 1);
235 }
236 #endif
237 
239 #if defined(USE_SCALAR_INV_BUILTIN)
241 #elif defined(USE_SCALAR_INV_NUM)
242  unsigned char b[32];
243  secp256k1_num n, m;
244  secp256k1_scalar t = *x;
246  secp256k1_num_set_bin(&n, b, 32);
248  secp256k1_num_mod_inverse(&n, &n, &m);
249  secp256k1_num_get_bin(b, 32, &n);
250  secp256k1_scalar_set_b32(r, b, NULL);
251  /* Verify that the inverse was computed correctly, without GMP code. */
252  secp256k1_scalar_mul(&t, &t, r);
254 #else
255 #error "Please select scalar inverse implementation"
256 #endif
257 }
258 
259 /* These parameters are generated using sage/gen_exhaustive_groups.sage. */
260 #if defined(EXHAUSTIVE_TEST_ORDER)
261 # if EXHAUSTIVE_TEST_ORDER == 13
262 # define EXHAUSTIVE_TEST_LAMBDA 9
263 # elif EXHAUSTIVE_TEST_ORDER == 199
264 # define EXHAUSTIVE_TEST_LAMBDA 92
265 # else
266 # error No known lambda for the specified exhaustive test group order.
267 # endif
268 
276  *r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER;
277  *r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
278 }
279 #else
280 
284  0x5363AD4CUL, 0xC05C30E0UL, 0xA5261C02UL, 0x8812645AUL,
285  0x122E22EAUL, 0x20816678UL, 0xDF02967CUL, 0x1B23BD72UL
286 );
287 
288 #ifdef VERIFY
289 static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k);
290 #endif
291 
292 /*
293  * Both lambda and beta are primitive cube roots of unity. That is lamba^3 == 1 mod n and
294  * beta^3 == 1 mod p, where n is the curve order and p is the field order.
295  *
296  * Futhermore, because (X^3 - 1) = (X - 1)(X^2 + X + 1), the primitive cube roots of unity are
297  * roots of X^2 + X + 1. Therefore lambda^2 + lamba == -1 mod n and beta^2 + beta == -1 mod p.
298  * (The other primitive cube roots of unity are lambda^2 and beta^2 respectively.)
299  *
300  * Let l = -1/2 + i*sqrt(3)/2, the complex root of X^2 + X + 1. We can define a ring
301  * homomorphism phi : Z[l] -> Z_n where phi(a + b*l) == a + b*lambda mod n. The kernel of phi
302  * is a lattice over Z[l] (considering Z[l] as a Z-module). This lattice is generated by a
303  * reduced basis {a1 + b1*l, a2 + b2*l} where
304  *
305  * - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
306  * - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3}
307  * - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8}
308  * - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
309  *
310  * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
311  * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
312  * and k2 are small in absolute value.
313  *
314  * The algorithm computes c1 = round(b2 * k / n) and c2 = round((-b1) * k / n), and gives
315  * k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and
316  * compute r2 = k2 mod n, and r1 = k1 mod n = (k - r2 * lambda) mod n, avoiding the need for
317  * the constants a1 and a2.
318  *
319  * g1, g2 are precomputed constants used to replace division with a rounded multiplication
320  * when decomposing the scalar for an endomorphism-based point multiplication.
321  *
322  * The possibility of using precomputed estimates is mentioned in "Guide to Elliptic Curve
323  * Cryptography" (Hankerson, Menezes, Vanstone) in section 3.5.
324  *
325  * The derivation is described in the paper "Efficient Software Implementation of Public-Key
326  * Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez),
327  * Section 4.3 (here we use a somewhat higher-precision estimate):
328  * d = a1*b2 - b1*a2
329  * g1 = round(2^384 * b2/d)
330  * g2 = round(2^384 * (-b1)/d)
331  *
332  * (Note that d is also equal to the curve order, n, here because [a1,b1] and [a2,b2]
333  * can be found as outputs of the Extended Euclidean Algorithm on inputs n and lambda).
334  *
335  * The function below splits k into r1 and r2, such that
336  * - r1 + lambda * r2 == k (mod n)
337  * - either r1 < 2^128 or -r1 mod n < 2^128
338  * - either r2 < 2^128 or -r2 mod n < 2^128
339  *
340  * See proof below.
341  */
343  secp256k1_scalar c1, c2;
344  static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST(
345  0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL,
346  0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL
347  );
348  static const secp256k1_scalar minus_b2 = SECP256K1_SCALAR_CONST(
349  0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
350  0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL
351  );
352  static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST(
353  0x3086D221UL, 0xA7D46BCDUL, 0xE86C90E4UL, 0x9284EB15UL,
354  0x3DAA8A14UL, 0x71E8CA7FUL, 0xE893209AUL, 0x45DBB031UL
355  );
356  static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST(
357  0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL,
358  0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL
359  );
360  VERIFY_CHECK(r1 != k);
361  VERIFY_CHECK(r2 != k);
362  /* these _var calls are constant time since the shift amount is constant */
363  secp256k1_scalar_mul_shift_var(&c1, k, &g1, 384);
364  secp256k1_scalar_mul_shift_var(&c2, k, &g2, 384);
365  secp256k1_scalar_mul(&c1, &c1, &minus_b1);
366  secp256k1_scalar_mul(&c2, &c2, &minus_b2);
367  secp256k1_scalar_add(r2, &c1, &c2);
368  secp256k1_scalar_mul(r1, r2, &secp256k1_const_lambda);
369  secp256k1_scalar_negate(r1, r1);
370  secp256k1_scalar_add(r1, r1, k);
371 
372 #ifdef VERIFY
373  secp256k1_scalar_split_lambda_verify(r1, r2, k);
374 #endif
375 }
376 
377 #ifdef VERIFY
378 /*
379  * Proof for secp256k1_scalar_split_lambda's bounds.
380  *
381  * Let
382  * - epsilon1 = 2^256 * |g1/2^384 - b2/d|
383  * - epsilon2 = 2^256 * |g2/2^384 - (-b1)/d|
384  * - c1 = round(k*g1/2^384)
385  * - c2 = round(k*g2/2^384)
386  *
387  * Lemma 1: |c1 - k*b2/d| < 2^-1 + epsilon1
388  *
389  * |c1 - k*b2/d|
390  * =
391  * |c1 - k*g1/2^384 + k*g1/2^384 - k*b2/d|
392  * <= {triangle inequality}
393  * |c1 - k*g1/2^384| + |k*g1/2^384 - k*b2/d|
394  * =
395  * |c1 - k*g1/2^384| + k*|g1/2^384 - b2/d|
396  * < {rounding in c1 and 0 <= k < 2^256}
397  * 2^-1 + 2^256 * |g1/2^384 - b2/d|
398  * = {definition of epsilon1}
399  * 2^-1 + epsilon1
400  *
401  * Lemma 2: |c2 - k*(-b1)/d| < 2^-1 + epsilon2
402  *
403  * |c2 - k*(-b1)/d|
404  * =
405  * |c2 - k*g2/2^384 + k*g2/2^384 - k*(-b1)/d|
406  * <= {triangle inequality}
407  * |c2 - k*g2/2^384| + |k*g2/2^384 - k*(-b1)/d|
408  * =
409  * |c2 - k*g2/2^384| + k*|g2/2^384 - (-b1)/d|
410  * < {rounding in c2 and 0 <= k < 2^256}
411  * 2^-1 + 2^256 * |g2/2^384 - (-b1)/d|
412  * = {definition of epsilon2}
413  * 2^-1 + epsilon2
414  *
415  * Let
416  * - k1 = k - c1*a1 - c2*a2
417  * - k2 = - c1*b1 - c2*b2
418  *
419  * Lemma 3: |k1| < (a1 + a2 + 1)/2 < 2^128
420  *
421  * |k1|
422  * = {definition of k1}
423  * |k - c1*a1 - c2*a2|
424  * = {(a1*b2 - b1*a2)/n = 1}
425  * |k*(a1*b2 - b1*a2)/n - c1*a1 - c2*a2|
426  * =
427  * |a1*(k*b2/n - c1) + a2*(k*(-b1)/n - c2)|
428  * <= {triangle inequality}
429  * a1*|k*b2/n - c1| + a2*|k*(-b1)/n - c2|
430  * < {Lemma 1 and Lemma 2}
431  * a1*(2^-1 + epslion1) + a2*(2^-1 + epsilon2)
432  * < {rounding up to an integer}
433  * (a1 + a2 + 1)/2
434  * < {rounding up to a power of 2}
435  * 2^128
436  *
437  * Lemma 4: |k2| < (-b1 + b2)/2 + 1 < 2^128
438  *
439  * |k2|
440  * = {definition of k2}
441  * |- c1*a1 - c2*a2|
442  * = {(b1*b2 - b1*b2)/n = 0}
443  * |k*(b1*b2 - b1*b2)/n - c1*b1 - c2*b2|
444  * =
445  * |b1*(k*b2/n - c1) + b2*(k*(-b1)/n - c2)|
446  * <= {triangle inequality}
447  * (-b1)*|k*b2/n - c1| + b2*|k*(-b1)/n - c2|
448  * < {Lemma 1 and Lemma 2}
449  * (-b1)*(2^-1 + epslion1) + b2*(2^-1 + epsilon2)
450  * < {rounding up to an integer}
451  * (-b1 + b2)/2 + 1
452  * < {rounding up to a power of 2}
453  * 2^128
454  *
455  * Let
456  * - r2 = k2 mod n
457  * - r1 = k - r2*lambda mod n.
458  *
459  * Notice that r1 is defined such that r1 + r2 * lambda == k (mod n).
460  *
461  * Lemma 5: r1 == k1 mod n.
462  *
463  * r1
464  * == {definition of r1 and r2}
465  * k - k2*lambda
466  * == {definition of k2}
467  * k - (- c1*b1 - c2*b2)*lambda
468  * ==
469  * k + c1*b1*lambda + c2*b2*lambda
470  * == {a1 + b1*lambda == 0 mod n and a2 + b2*lambda == 0 mod n}
471  * k - c1*a1 - c2*a2
472  * == {definition of k1}
473  * k1
474  *
475  * From Lemma 3, Lemma 4, Lemma 5 and the definition of r2, we can conclude that
476  *
477  * - either r1 < 2^128 or -r1 mod n < 2^128
478  * - either r2 < 2^128 or -r2 mod n < 2^128.
479  *
480  * Q.E.D.
481  */
482 static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k) {
484  unsigned char buf1[32];
485  unsigned char buf2[32];
486 
487  /* (a1 + a2 + 1)/2 is 0xa2a8918ca85bafe22016d0b917e4dd77 */
488  static const unsigned char k1_bound[32] = {
489  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
490  0xa2, 0xa8, 0x91, 0x8c, 0xa8, 0x5b, 0xaf, 0xe2, 0x20, 0x16, 0xd0, 0xb9, 0x17, 0xe4, 0xdd, 0x77
491  };
492 
493  /* (-b1 + b2)/2 + 1 is 0x8a65287bd47179fb2be08846cea267ed */
494  static const unsigned char k2_bound[32] = {
495  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
496  0x8a, 0x65, 0x28, 0x7b, 0xd4, 0x71, 0x79, 0xfb, 0x2b, 0xe0, 0x88, 0x46, 0xce, 0xa2, 0x67, 0xed
497  };
498 
499  secp256k1_scalar_mul(&s, &secp256k1_const_lambda, r2);
500  secp256k1_scalar_add(&s, &s, r1);
502 
503  secp256k1_scalar_negate(&s, r1);
504  secp256k1_scalar_get_b32(buf1, r1);
505  secp256k1_scalar_get_b32(buf2, &s);
506  VERIFY_CHECK(secp256k1_memcmp_var(buf1, k1_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k1_bound, 32) < 0);
507 
508  secp256k1_scalar_negate(&s, r2);
509  secp256k1_scalar_get_b32(buf1, r2);
510  secp256k1_scalar_get_b32(buf2, &s);
511  VERIFY_CHECK(secp256k1_memcmp_var(buf1, k2_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k2_bound, 32) < 0);
512 }
513 #endif /* VERIFY */
514 #endif /* !defined(EXHAUSTIVE_TEST_ORDER) */
515 
516 #endif /* SECP256K1_SCALAR_IMPL_H */
static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b)
Compare two scalars.
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
Multiply two scalars (modulo the group order).
#define VERIFY_CHECK(cond)
Definition: util.h:68
static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned char *bin)
Definition: scalar_impl.h:62
static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen)
Set a number to the value of a binary big-endian string.
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x)
Definition: scalar_impl.h:68
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a)
Compute the complement of a scalar (modulo the group order).
static int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
Check whether a scalar equals zero.
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *bin, int *overflow)
Set a scalar from a big endian byte array.
static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m)
Compute a modular inverse.
static const secp256k1_scalar secp256k1_scalar_zero
Definition: scalar_impl.h:32
static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift)
Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer...
#define SECP256K1_INLINE
Definition: secp256k1.h:124
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x)
Definition: scalar_impl.h:238
#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0)
Definition: scalar_4x64.h:17
static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a)
Compute the square of a scalar (modulo the group order).
static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k)
Definition: scalar_impl.h:342
#define CHECK(cond)
Definition: util.h:53
A scalar modulo the group order of the secp256k1 curve.
Definition: scalar_4x64.h:13
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar *a)
Convert a scalar to a byte array.
uint64_t d[4]
Definition: scalar_4x64.h:14
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
Add two scalars together (modulo the group order).
#define EXHAUSTIVE_TEST_ORDER
static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a)
Definition: scalar_impl.h:35
static SECP256K1_INLINE int secp256k1_memcmp_var(const void *s1, const void *s2, size_t n)
Semantics like memcmp.
Definition: util.h:224
static void secp256k1_scalar_order_get_num(secp256k1_num *r)
secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h
Definition: scalar_impl.h:42
static SECP256K1_INLINE int secp256k1_scalar_is_even(const secp256k1_scalar *a)
Definition: scalar_impl.h:233
static const secp256k1_scalar secp256k1_const_lambda
The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where lambda is: ...
Definition: scalar_impl.h:283
static const secp256k1_scalar secp256k1_scalar_one
Definition: scalar_impl.h:31
static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a)
Convert a number&#39;s absolute value to a binary big-endian string.
static int secp256k1_scalar_is_one(const secp256k1_scalar *a)
Check whether a scalar equals one.