Bitcoin Core  0.20.99
P2P Digital Currency
scalar_impl.h
Go to the documentation of this file.
1 /**********************************************************************
2  * Copyright (c) 2014 Pieter Wuille *
3  * Distributed under the MIT software license, see the accompanying *
4  * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5  **********************************************************************/
6 
7 #ifndef SECP256K1_SCALAR_IMPL_H
8 #define SECP256K1_SCALAR_IMPL_H
9 
10 #include "group.h"
11 #include "scalar.h"
12 
13 #if defined HAVE_CONFIG_H
14 #include "libsecp256k1-config.h"
15 #endif
16 
17 #if defined(EXHAUSTIVE_TEST_ORDER)
18 #include "scalar_low_impl.h"
19 #elif defined(USE_SCALAR_4X64)
20 #include "scalar_4x64_impl.h"
21 #elif defined(USE_SCALAR_8X32)
22 #include "scalar_8x32_impl.h"
23 #else
24 #error "Please select scalar implementation"
25 #endif
26 
27 #ifndef USE_NUM_NONE
29  unsigned char c[32];
31  secp256k1_num_set_bin(r, c, 32);
32 }
33 
36 #if defined(EXHAUSTIVE_TEST_ORDER)
37  static const unsigned char order[32] = {
38  0,0,0,0,0,0,0,0,
39  0,0,0,0,0,0,0,0,
40  0,0,0,0,0,0,0,0,
41  0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER
42  };
43 #else
44  static const unsigned char order[32] = {
45  0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
46  0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
47  0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
48  0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
49  };
50 #endif
51  secp256k1_num_set_bin(r, order, 32);
52 }
53 #endif
54 
56 #if defined(EXHAUSTIVE_TEST_ORDER)
57  int i;
58  *r = 0;
59  for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
60  if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
61  *r = i;
62  /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
63  * have a composite group order; fix it in exhaustive_tests.c). */
64  VERIFY_CHECK(*r != 0);
65 }
66 #else
68  int i;
69  /* First compute xN as x ^ (2^N - 1) for some values of N,
70  * and uM as x ^ M for some values of M. */
71  secp256k1_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126;
72  secp256k1_scalar u2, u5, u9, u11, u13;
73 
74  secp256k1_scalar_sqr(&u2, x);
75  secp256k1_scalar_mul(&x2, &u2, x);
76  secp256k1_scalar_mul(&u5, &u2, &x2);
77  secp256k1_scalar_mul(&x3, &u5, &u2);
78  secp256k1_scalar_mul(&u9, &x3, &u2);
79  secp256k1_scalar_mul(&u11, &u9, &u2);
80  secp256k1_scalar_mul(&u13, &u11, &u2);
81 
82  secp256k1_scalar_sqr(&x6, &u13);
83  secp256k1_scalar_sqr(&x6, &x6);
84  secp256k1_scalar_mul(&x6, &x6, &u11);
85 
86  secp256k1_scalar_sqr(&x8, &x6);
87  secp256k1_scalar_sqr(&x8, &x8);
88  secp256k1_scalar_mul(&x8, &x8, &x2);
89 
90  secp256k1_scalar_sqr(&x14, &x8);
91  for (i = 0; i < 5; i++) {
92  secp256k1_scalar_sqr(&x14, &x14);
93  }
94  secp256k1_scalar_mul(&x14, &x14, &x6);
95 
96  secp256k1_scalar_sqr(&x28, &x14);
97  for (i = 0; i < 13; i++) {
98  secp256k1_scalar_sqr(&x28, &x28);
99  }
100  secp256k1_scalar_mul(&x28, &x28, &x14);
101 
102  secp256k1_scalar_sqr(&x56, &x28);
103  for (i = 0; i < 27; i++) {
104  secp256k1_scalar_sqr(&x56, &x56);
105  }
106  secp256k1_scalar_mul(&x56, &x56, &x28);
107 
108  secp256k1_scalar_sqr(&x112, &x56);
109  for (i = 0; i < 55; i++) {
110  secp256k1_scalar_sqr(&x112, &x112);
111  }
112  secp256k1_scalar_mul(&x112, &x112, &x56);
113 
114  secp256k1_scalar_sqr(&x126, &x112);
115  for (i = 0; i < 13; i++) {
116  secp256k1_scalar_sqr(&x126, &x126);
117  }
118  secp256k1_scalar_mul(&x126, &x126, &x14);
119 
120  /* Then accumulate the final result (t starts at x126). */
121  t = &x126;
122  for (i = 0; i < 3; i++) {
123  secp256k1_scalar_sqr(t, t);
124  }
125  secp256k1_scalar_mul(t, t, &u5); /* 101 */
126  for (i = 0; i < 4; i++) { /* 0 */
127  secp256k1_scalar_sqr(t, t);
128  }
129  secp256k1_scalar_mul(t, t, &x3); /* 111 */
130  for (i = 0; i < 4; i++) { /* 0 */
131  secp256k1_scalar_sqr(t, t);
132  }
133  secp256k1_scalar_mul(t, t, &u5); /* 101 */
134  for (i = 0; i < 5; i++) { /* 0 */
135  secp256k1_scalar_sqr(t, t);
136  }
137  secp256k1_scalar_mul(t, t, &u11); /* 1011 */
138  for (i = 0; i < 4; i++) {
139  secp256k1_scalar_sqr(t, t);
140  }
141  secp256k1_scalar_mul(t, t, &u11); /* 1011 */
142  for (i = 0; i < 4; i++) { /* 0 */
143  secp256k1_scalar_sqr(t, t);
144  }
145  secp256k1_scalar_mul(t, t, &x3); /* 111 */
146  for (i = 0; i < 5; i++) { /* 00 */
147  secp256k1_scalar_sqr(t, t);
148  }
149  secp256k1_scalar_mul(t, t, &x3); /* 111 */
150  for (i = 0; i < 6; i++) { /* 00 */
151  secp256k1_scalar_sqr(t, t);
152  }
153  secp256k1_scalar_mul(t, t, &u13); /* 1101 */
154  for (i = 0; i < 4; i++) { /* 0 */
155  secp256k1_scalar_sqr(t, t);
156  }
157  secp256k1_scalar_mul(t, t, &u5); /* 101 */
158  for (i = 0; i < 3; i++) {
159  secp256k1_scalar_sqr(t, t);
160  }
161  secp256k1_scalar_mul(t, t, &x3); /* 111 */
162  for (i = 0; i < 5; i++) { /* 0 */
163  secp256k1_scalar_sqr(t, t);
164  }
165  secp256k1_scalar_mul(t, t, &u9); /* 1001 */
166  for (i = 0; i < 6; i++) { /* 000 */
167  secp256k1_scalar_sqr(t, t);
168  }
169  secp256k1_scalar_mul(t, t, &u5); /* 101 */
170  for (i = 0; i < 10; i++) { /* 0000000 */
171  secp256k1_scalar_sqr(t, t);
172  }
173  secp256k1_scalar_mul(t, t, &x3); /* 111 */
174  for (i = 0; i < 4; i++) { /* 0 */
175  secp256k1_scalar_sqr(t, t);
176  }
177  secp256k1_scalar_mul(t, t, &x3); /* 111 */
178  for (i = 0; i < 9; i++) { /* 0 */
179  secp256k1_scalar_sqr(t, t);
180  }
181  secp256k1_scalar_mul(t, t, &x8); /* 11111111 */
182  for (i = 0; i < 5; i++) { /* 0 */
183  secp256k1_scalar_sqr(t, t);
184  }
185  secp256k1_scalar_mul(t, t, &u9); /* 1001 */
186  for (i = 0; i < 6; i++) { /* 00 */
187  secp256k1_scalar_sqr(t, t);
188  }
189  secp256k1_scalar_mul(t, t, &u11); /* 1011 */
190  for (i = 0; i < 4; i++) {
191  secp256k1_scalar_sqr(t, t);
192  }
193  secp256k1_scalar_mul(t, t, &u13); /* 1101 */
194  for (i = 0; i < 5; i++) {
195  secp256k1_scalar_sqr(t, t);
196  }
197  secp256k1_scalar_mul(t, t, &x2); /* 11 */
198  for (i = 0; i < 6; i++) { /* 00 */
199  secp256k1_scalar_sqr(t, t);
200  }
201  secp256k1_scalar_mul(t, t, &u13); /* 1101 */
202  for (i = 0; i < 10; i++) { /* 000000 */
203  secp256k1_scalar_sqr(t, t);
204  }
205  secp256k1_scalar_mul(t, t, &u13); /* 1101 */
206  for (i = 0; i < 4; i++) {
207  secp256k1_scalar_sqr(t, t);
208  }
209  secp256k1_scalar_mul(t, t, &u9); /* 1001 */
210  for (i = 0; i < 6; i++) { /* 00000 */
211  secp256k1_scalar_sqr(t, t);
212  }
213  secp256k1_scalar_mul(t, t, x); /* 1 */
214  for (i = 0; i < 8; i++) { /* 00 */
215  secp256k1_scalar_sqr(t, t);
216  }
217  secp256k1_scalar_mul(r, t, &x6); /* 111111 */
218 }
219 
221  return !(a->d[0] & 1);
222 }
223 #endif
224 
226 #if defined(USE_SCALAR_INV_BUILTIN)
228 #elif defined(USE_SCALAR_INV_NUM)
229  unsigned char b[32];
230  secp256k1_num n, m;
231  secp256k1_scalar t = *x;
233  secp256k1_num_set_bin(&n, b, 32);
235  secp256k1_num_mod_inverse(&n, &n, &m);
236  secp256k1_num_get_bin(b, 32, &n);
237  secp256k1_scalar_set_b32(r, b, NULL);
238  /* Verify that the inverse was computed correctly, without GMP code. */
239  secp256k1_scalar_mul(&t, &t, r);
241 #else
242 #error "Please select scalar inverse implementation"
243 #endif
244 }
245 
246 #ifdef USE_ENDOMORPHISM
247 #if defined(EXHAUSTIVE_TEST_ORDER)
248 
254 static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
255  *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER;
257 }
258 #else
259 
297 static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
298  secp256k1_scalar c1, c2;
299  static const secp256k1_scalar minus_lambda = SECP256K1_SCALAR_CONST(
300  0xAC9C52B3UL, 0x3FA3CF1FUL, 0x5AD9E3FDUL, 0x77ED9BA4UL,
301  0xA880B9FCUL, 0x8EC739C2UL, 0xE0CFC810UL, 0xB51283CFUL
302  );
303  static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST(
304  0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL,
305  0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL
306  );
307  static const secp256k1_scalar minus_b2 = SECP256K1_SCALAR_CONST(
308  0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
309  0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL
310  );
311  static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST(
312  0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00003086UL,
313  0xD221A7D4UL, 0x6BCDE86CUL, 0x90E49284UL, 0xEB153DABUL
314  );
315  static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST(
316  0x00000000UL, 0x00000000UL, 0x00000000UL, 0x0000E443UL,
317  0x7ED6010EUL, 0x88286F54UL, 0x7FA90ABFUL, 0xE4C42212UL
318  );
319  VERIFY_CHECK(r1 != a);
320  VERIFY_CHECK(r2 != a);
321  /* these _var calls are constant time since the shift amount is constant */
322  secp256k1_scalar_mul_shift_var(&c1, a, &g1, 272);
323  secp256k1_scalar_mul_shift_var(&c2, a, &g2, 272);
324  secp256k1_scalar_mul(&c1, &c1, &minus_b1);
325  secp256k1_scalar_mul(&c2, &c2, &minus_b2);
326  secp256k1_scalar_add(r2, &c1, &c2);
327  secp256k1_scalar_mul(r1, r2, &minus_lambda);
328  secp256k1_scalar_add(r1, r1, a);
329 }
330 #endif
331 #endif
332 
333 #endif /* SECP256K1_SCALAR_IMPL_H */
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
Multiply two scalars (modulo the group order).
#define VERIFY_CHECK(cond)
Definition: util.h:67
static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen)
Set a number to the value of a binary big-endian string.
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x)
Definition: scalar_impl.h:55
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *bin, int *overflow)
Set a scalar from a big endian byte array.
static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m)
Compute a modular inverse.
static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift)
Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer...
#define EXHAUSTIVE_TEST_LAMBDA
#define SECP256K1_INLINE
Definition: secp256k1.h:123
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x)
Definition: scalar_impl.h:225
#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0)
Definition: scalar_4x64.h:17
static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a)
Compute the square of a scalar (modulo the group order).
#define CHECK(cond)
Definition: util.h:52
A scalar modulo the group order of the secp256k1 curve.
Definition: scalar_4x64.h:13
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar *a)
Convert a scalar to a byte array.
uint64_t d[4]
Definition: scalar_4x64.h:14
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
Add two scalars together (modulo the group order).
#define EXHAUSTIVE_TEST_ORDER
static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a)
Definition: scalar_impl.h:28
static void secp256k1_scalar_order_get_num(secp256k1_num *r)
secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h
Definition: scalar_impl.h:35
static SECP256K1_INLINE int secp256k1_scalar_is_even(const secp256k1_scalar *a)
Definition: scalar_impl.h:220
static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a)
Convert a number&#39;s absolute value to a binary big-endian string.
static int secp256k1_scalar_is_one(const secp256k1_scalar *a)
Check whether a scalar equals one.