7#ifndef SECP256K1_FIELD_REPR_IMPL_H
8#define SECP256K1_FIELD_REPR_IMPL_H
16static void secp256k1_fe_impl_verify(
const secp256k1_fe *a) {
17 const uint32_t *d = a->
n;
18 int m = a->normalized ? 1 : 2 * a->magnitude;
30 if (d[9] == 0x03FFFFFUL) {
31 uint32_t mid = d[8] & d[7] & d[6] & d[5] & d[4] & d[3] & d[2];
32 if (mid == 0x3FFFFFFUL) {
33 VERIFY_CHECK((d[1] + 0x40UL + ((d[0] + 0x3D1UL) >> 26)) <= 0x3FFFFFFUL);
41 r->
n[0] = 0x3FFFFFFUL * 2 *
m;
42 r->
n[1] = 0x3FFFFFFUL * 2 *
m;
43 r->
n[2] = 0x3FFFFFFUL * 2 *
m;
44 r->
n[3] = 0x3FFFFFFUL * 2 *
m;
45 r->
n[4] = 0x3FFFFFFUL * 2 *
m;
46 r->
n[5] = 0x3FFFFFFUL * 2 *
m;
47 r->
n[6] = 0x3FFFFFFUL * 2 *
m;
48 r->
n[7] = 0x3FFFFFFUL * 2 *
m;
49 r->
n[8] = 0x3FFFFFFUL * 2 *
m;
50 r->
n[9] = 0x03FFFFFUL * 2 *
m;
54 uint32_t t0 = r->
n[0], t1 = r->
n[1], t2 = r->
n[2], t3 = r->
n[3], t4 = r->
n[4],
55 t5 = r->
n[5], t6 = r->
n[6], t7 = r->
n[7], t8 = r->
n[8], t9 = r->
n[9];
59 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
62 t0 += x * 0x3D1UL; t1 += (x << 6);
63 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
64 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
65 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
m = t2;
66 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
m &= t3;
67 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
m &= t4;
68 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
m &= t5;
69 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
m &= t6;
70 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
m &= t7;
71 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
m &= t8;
77 x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (
m == 0x3FFFFFFUL)
78 & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
81 t0 += x * 0x3D1UL; t1 += (x << 6);
82 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
83 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
84 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
85 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
86 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
87 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
88 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
89 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
90 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
98 r->
n[0] = t0; r->
n[1] = t1; r->
n[2] = t2; r->
n[3] = t3; r->
n[4] = t4;
99 r->
n[5] = t5; r->
n[6] = t6; r->
n[7] = t7; r->
n[8] = t8; r->
n[9] = t9;
103 uint32_t t0 = r->
n[0], t1 = r->
n[1], t2 = r->
n[2], t3 = r->
n[3], t4 = r->
n[4],
104 t5 = r->
n[5], t6 = r->
n[6], t7 = r->
n[7], t8 = r->
n[8], t9 = r->
n[9];
107 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
110 t0 += x * 0x3D1UL; t1 += (x << 6);
111 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
112 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
113 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
114 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
115 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
116 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
117 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
118 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
119 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
124 r->
n[0] = t0; r->
n[1] = t1; r->
n[2] = t2; r->
n[3] = t3; r->
n[4] = t4;
125 r->
n[5] = t5; r->
n[6] = t6; r->
n[7] = t7; r->
n[8] = t8; r->
n[9] = t9;
129 uint32_t t0 = r->
n[0], t1 = r->
n[1], t2 = r->
n[2], t3 = r->
n[3], t4 = r->
n[4],
130 t5 = r->
n[5], t6 = r->
n[6], t7 = r->
n[7], t8 = r->
n[8], t9 = r->
n[9];
134 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
137 t0 += x * 0x3D1UL; t1 += (x << 6);
138 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
139 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
140 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
m = t2;
141 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
m &= t3;
142 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
m &= t4;
143 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
m &= t5;
144 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
m &= t6;
145 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
m &= t7;
146 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
m &= t8;
152 x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (
m == 0x3FFFFFFUL)
153 & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
156 t0 += 0x3D1UL; t1 += (x << 6);
157 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
158 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
159 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
160 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
161 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
162 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
163 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
164 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
165 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
174 r->
n[0] = t0; r->
n[1] = t1; r->
n[2] = t2; r->
n[3] = t3; r->
n[4] = t4;
175 r->
n[5] = t5; r->
n[6] = t6; r->
n[7] = t7; r->
n[8] = t8; r->
n[9] = t9;
179 uint32_t t0 = r->
n[0], t1 = r->
n[1], t2 = r->
n[2], t3 = r->
n[3], t4 = r->
n[4],
180 t5 = r->
n[5], t6 = r->
n[6], t7 = r->
n[7], t8 = r->
n[8], t9 = r->
n[9];
186 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
189 t0 += x * 0x3D1UL; t1 += (x << 6);
190 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; z0 = t0; z1 = t0 ^ 0x3D0UL;
191 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL;
192 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2;
193 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3;
194 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4;
195 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5;
196 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6;
197 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7;
198 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8;
199 z0 |= t9; z1 &= t9 ^ 0x3C00000UL;
204 return (z0 == 0) | (z1 == 0x3FFFFFFUL);
208 uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9;
222 z0 = t0 & 0x3FFFFFFUL;
226 if ((z0 != 0UL) & (z1 != 0x3FFFFFFUL)) {
243 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL;
244 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2;
245 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3;
246 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4;
247 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5;
248 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6;
249 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7;
250 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8;
251 z0 |= t9; z1 &= t9 ^ 0x3C00000UL;
256 return (z0 == 0) | (z1 == 0x3FFFFFFUL);
261 r->
n[1] = r->
n[2] = r->
n[3] = r->
n[4] = r->
n[5] = r->
n[6] = r->
n[7] = r->
n[8] = r->
n[9] = 0;
265 const uint32_t *
t = a->
n;
266 return (
t[0] |
t[1] |
t[2] |
t[3] |
t[4] |
t[5] |
t[6] |
t[7] |
t[8] |
t[9]) == 0;
275 for (i = 9; i >= 0; i--) {
276 if (a->
n[i] > b->
n[i]) {
279 if (a->
n[i] < b->
n[i]) {
287 r->
n[0] = (uint32_t)a[31] | ((uint32_t)a[30] << 8) | ((uint32_t)a[29] << 16) | ((uint32_t)(a[28] & 0x3) << 24);
288 r->
n[1] = (uint32_t)((a[28] >> 2) & 0x3f) | ((uint32_t)a[27] << 6) | ((uint32_t)a[26] << 14) | ((uint32_t)(a[25] & 0xf) << 22);
289 r->
n[2] = (uint32_t)((a[25] >> 4) & 0xf) | ((uint32_t)a[24] << 4) | ((uint32_t)a[23] << 12) | ((uint32_t)(a[22] & 0x3f) << 20);
290 r->
n[3] = (uint32_t)((a[22] >> 6) & 0x3) | ((uint32_t)a[21] << 2) | ((uint32_t)a[20] << 10) | ((uint32_t)a[19] << 18);
291 r->
n[4] = (uint32_t)a[18] | ((uint32_t)a[17] << 8) | ((uint32_t)a[16] << 16) | ((uint32_t)(a[15] & 0x3) << 24);
292 r->
n[5] = (uint32_t)((a[15] >> 2) & 0x3f) | ((uint32_t)a[14] << 6) | ((uint32_t)a[13] << 14) | ((uint32_t)(a[12] & 0xf) << 22);
293 r->
n[6] = (uint32_t)((a[12] >> 4) & 0xf) | ((uint32_t)a[11] << 4) | ((uint32_t)a[10] << 12) | ((uint32_t)(a[9] & 0x3f) << 20);
294 r->
n[7] = (uint32_t)((a[9] >> 6) & 0x3) | ((uint32_t)a[8] << 2) | ((uint32_t)a[7] << 10) | ((uint32_t)a[6] << 18);
295 r->
n[8] = (uint32_t)a[5] | ((uint32_t)a[4] << 8) | ((uint32_t)a[3] << 16) | ((uint32_t)(a[2] & 0x3) << 24);
296 r->
n[9] = (uint32_t)((a[2] >> 2) & 0x3f) | ((uint32_t)a[1] << 6) | ((uint32_t)a[0] << 14);
301 return !((r->
n[9] == 0x3FFFFFUL) & ((r->
n[8] & r->
n[7] & r->
n[6] & r->
n[5] & r->
n[4] & r->
n[3] & r->
n[2]) == 0x3FFFFFFUL) & ((r->
n[1] + 0x40UL + ((r->
n[0] + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
306 r[0] = (a->
n[9] >> 14) & 0xff;
307 r[1] = (a->
n[9] >> 6) & 0xff;
308 r[2] = ((a->
n[9] & 0x3F) << 2) | ((a->
n[8] >> 24) & 0x3);
309 r[3] = (a->
n[8] >> 16) & 0xff;
310 r[4] = (a->
n[8] >> 8) & 0xff;
311 r[5] = a->
n[8] & 0xff;
312 r[6] = (a->
n[7] >> 18) & 0xff;
313 r[7] = (a->
n[7] >> 10) & 0xff;
314 r[8] = (a->
n[7] >> 2) & 0xff;
315 r[9] = ((a->
n[7] & 0x3) << 6) | ((a->
n[6] >> 20) & 0x3f);
316 r[10] = (a->
n[6] >> 12) & 0xff;
317 r[11] = (a->
n[6] >> 4) & 0xff;
318 r[12] = ((a->
n[6] & 0xf) << 4) | ((a->
n[5] >> 22) & 0xf);
319 r[13] = (a->
n[5] >> 14) & 0xff;
320 r[14] = (a->
n[5] >> 6) & 0xff;
321 r[15] = ((a->
n[5] & 0x3f) << 2) | ((a->
n[4] >> 24) & 0x3);
322 r[16] = (a->
n[4] >> 16) & 0xff;
323 r[17] = (a->
n[4] >> 8) & 0xff;
324 r[18] = a->
n[4] & 0xff;
325 r[19] = (a->
n[3] >> 18) & 0xff;
326 r[20] = (a->
n[3] >> 10) & 0xff;
327 r[21] = (a->
n[3] >> 2) & 0xff;
328 r[22] = ((a->
n[3] & 0x3) << 6) | ((a->
n[2] >> 20) & 0x3f);
329 r[23] = (a->
n[2] >> 12) & 0xff;
330 r[24] = (a->
n[2] >> 4) & 0xff;
331 r[25] = ((a->
n[2] & 0xf) << 4) | ((a->
n[1] >> 22) & 0xf);
332 r[26] = (a->
n[1] >> 14) & 0xff;
333 r[27] = (a->
n[1] >> 6) & 0xff;
334 r[28] = ((a->
n[1] & 0x3f) << 2) | ((a->
n[0] >> 24) & 0x3);
335 r[29] = (a->
n[0] >> 16) & 0xff;
336 r[30] = (a->
n[0] >> 8) & 0xff;
337 r[31] = a->
n[0] & 0xff;
349 r->
n[0] = 0x3FFFC2FUL * 2 * (
m + 1) - a->
n[0];
350 r->
n[1] = 0x3FFFFBFUL * 2 * (
m + 1) - a->
n[1];
351 r->
n[2] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[2];
352 r->
n[3] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[3];
353 r->
n[4] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[4];
354 r->
n[5] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[5];
355 r->
n[6] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[6];
356 r->
n[7] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[7];
357 r->
n[8] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[8];
358 r->
n[9] = 0x03FFFFFUL * 2 * (
m + 1) - a->
n[9];
391#if defined(USE_EXTERNAL_ASM)
399#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
403 uint64_t u0, u1, u2, u3, u4, u5, u6, u7,
u8;
404 uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7;
405 const uint32_t
M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL;
434 d = (uint64_t)a[0] * b[9]
435 + (uint64_t)a[1] * b[8]
436 + (uint64_t)a[2] * b[7]
437 + (uint64_t)a[3] * b[6]
438 + (uint64_t)a[4] * b[5]
439 + (uint64_t)a[5] * b[4]
440 + (uint64_t)a[6] * b[3]
441 + (uint64_t)a[7] * b[2]
442 + (uint64_t)a[8] * b[1]
443 + (uint64_t)a[9] * b[0];
446 t9 = d &
M; d >>= 26;
451 c = (uint64_t)a[0] * b[0];
454 d += (uint64_t)a[1] * b[9]
455 + (uint64_t)a[2] * b[8]
456 + (uint64_t)a[3] * b[7]
457 + (uint64_t)a[4] * b[6]
458 + (uint64_t)a[5] * b[5]
459 + (uint64_t)a[6] * b[4]
460 + (uint64_t)a[7] * b[3]
461 + (uint64_t)a[8] * b[2]
462 + (uint64_t)a[9] * b[1];
465 u0 = d &
M; d >>= 26; c += u0 * R0;
470 t0 = c &
M; c >>= 26; c += u0 * R1;
476 c += (uint64_t)a[0] * b[1]
477 + (uint64_t)a[1] * b[0];
480 d += (uint64_t)a[2] * b[9]
481 + (uint64_t)a[3] * b[8]
482 + (uint64_t)a[4] * b[7]
483 + (uint64_t)a[5] * b[6]
484 + (uint64_t)a[6] * b[5]
485 + (uint64_t)a[7] * b[4]
486 + (uint64_t)a[8] * b[3]
487 + (uint64_t)a[9] * b[2];
490 u1 = d &
M; d >>= 26; c += u1 * R0;
495 t1 = c &
M; c >>= 26; c += u1 * R1;
501 c += (uint64_t)a[0] * b[2]
502 + (uint64_t)a[1] * b[1]
503 + (uint64_t)a[2] * b[0];
506 d += (uint64_t)a[3] * b[9]
507 + (uint64_t)a[4] * b[8]
508 + (uint64_t)a[5] * b[7]
509 + (uint64_t)a[6] * b[6]
510 + (uint64_t)a[7] * b[5]
511 + (uint64_t)a[8] * b[4]
512 + (uint64_t)a[9] * b[3];
515 u2 = d &
M; d >>= 26; c += u2 * R0;
520 t2 = c &
M; c >>= 26; c += u2 * R1;
526 c += (uint64_t)a[0] * b[3]
527 + (uint64_t)a[1] * b[2]
528 + (uint64_t)a[2] * b[1]
529 + (uint64_t)a[3] * b[0];
532 d += (uint64_t)a[4] * b[9]
533 + (uint64_t)a[5] * b[8]
534 + (uint64_t)a[6] * b[7]
535 + (uint64_t)a[7] * b[6]
536 + (uint64_t)a[8] * b[5]
537 + (uint64_t)a[9] * b[4];
540 u3 = d &
M; d >>= 26; c += u3 * R0;
545 t3 = c &
M; c >>= 26; c += u3 * R1;
551 c += (uint64_t)a[0] * b[4]
552 + (uint64_t)a[1] * b[3]
553 + (uint64_t)a[2] * b[2]
554 + (uint64_t)a[3] * b[1]
555 + (uint64_t)a[4] * b[0];
558 d += (uint64_t)a[5] * b[9]
559 + (uint64_t)a[6] * b[8]
560 + (uint64_t)a[7] * b[7]
561 + (uint64_t)a[8] * b[6]
562 + (uint64_t)a[9] * b[5];
565 u4 = d &
M; d >>= 26; c += u4 * R0;
570 t4 = c &
M; c >>= 26; c += u4 * R1;
576 c += (uint64_t)a[0] * b[5]
577 + (uint64_t)a[1] * b[4]
578 + (uint64_t)a[2] * b[3]
579 + (uint64_t)a[3] * b[2]
580 + (uint64_t)a[4] * b[1]
581 + (uint64_t)a[5] * b[0];
584 d += (uint64_t)a[6] * b[9]
585 + (uint64_t)a[7] * b[8]
586 + (uint64_t)a[8] * b[7]
587 + (uint64_t)a[9] * b[6];
590 u5 = d &
M; d >>= 26; c += u5 * R0;
595 t5 = c &
M; c >>= 26; c += u5 * R1;
601 c += (uint64_t)a[0] * b[6]
602 + (uint64_t)a[1] * b[5]
603 + (uint64_t)a[2] * b[4]
604 + (uint64_t)a[3] * b[3]
605 + (uint64_t)a[4] * b[2]
606 + (uint64_t)a[5] * b[1]
607 + (uint64_t)a[6] * b[0];
610 d += (uint64_t)a[7] * b[9]
611 + (uint64_t)a[8] * b[8]
612 + (uint64_t)a[9] * b[7];
615 u6 = d &
M; d >>= 26; c += u6 * R0;
620 t6 = c &
M; c >>= 26; c += u6 * R1;
626 c += (uint64_t)a[0] * b[7]
627 + (uint64_t)a[1] * b[6]
628 + (uint64_t)a[2] * b[5]
629 + (uint64_t)a[3] * b[4]
630 + (uint64_t)a[4] * b[3]
631 + (uint64_t)a[5] * b[2]
632 + (uint64_t)a[6] * b[1]
633 + (uint64_t)a[7] * b[0];
637 d += (uint64_t)a[8] * b[9]
638 + (uint64_t)a[9] * b[8];
641 u7 = d &
M; d >>= 26; c += u7 * R0;
647 t7 = c &
M; c >>= 26; c += u7 * R1;
653 c += (uint64_t)a[0] * b[8]
654 + (uint64_t)a[1] * b[7]
655 + (uint64_t)a[2] * b[6]
656 + (uint64_t)a[3] * b[5]
657 + (uint64_t)a[4] * b[4]
658 + (uint64_t)a[5] * b[3]
659 + (uint64_t)a[6] * b[2]
660 + (uint64_t)a[7] * b[1]
661 + (uint64_t)a[8] * b[0];
665 d += (uint64_t)a[9] * b[9];
668 u8 = d &
M; d >>= 26; c +=
u8 * R0;
691 r[8] = c &
M; c >>= 26; c +=
u8 * R1;
699 r[9] = c & (
M >> 4); c >>= 22; c += d * (R1 << 4);
706 d = c * (R0 >> 4) + t0;
709 r[0] = d &
M; d >>= 26;
713 d += c * (R1 >> 4) + t1;
718 r[1] = d &
M; d >>= 26;
733 uint64_t u0, u1, u2, u3, u4, u5, u6, u7,
u8;
734 uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7;
735 const uint32_t
M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL;
753 d = (uint64_t)(a[0]*2) * a[9]
754 + (uint64_t)(a[1]*2) * a[8]
755 + (uint64_t)(a[2]*2) * a[7]
756 + (uint64_t)(a[3]*2) * a[6]
757 + (uint64_t)(a[4]*2) * a[5];
760 t9 = d &
M; d >>= 26;
765 c = (uint64_t)a[0] * a[0];
768 d += (uint64_t)(a[1]*2) * a[9]
769 + (uint64_t)(a[2]*2) * a[8]
770 + (uint64_t)(a[3]*2) * a[7]
771 + (uint64_t)(a[4]*2) * a[6]
772 + (uint64_t)a[5] * a[5];
775 u0 = d &
M; d >>= 26; c += u0 * R0;
780 t0 = c &
M; c >>= 26; c += u0 * R1;
786 c += (uint64_t)(a[0]*2) * a[1];
789 d += (uint64_t)(a[2]*2) * a[9]
790 + (uint64_t)(a[3]*2) * a[8]
791 + (uint64_t)(a[4]*2) * a[7]
792 + (uint64_t)(a[5]*2) * a[6];
795 u1 = d &
M; d >>= 26; c += u1 * R0;
800 t1 = c &
M; c >>= 26; c += u1 * R1;
806 c += (uint64_t)(a[0]*2) * a[2]
807 + (uint64_t)a[1] * a[1];
810 d += (uint64_t)(a[3]*2) * a[9]
811 + (uint64_t)(a[4]*2) * a[8]
812 + (uint64_t)(a[5]*2) * a[7]
813 + (uint64_t)a[6] * a[6];
816 u2 = d &
M; d >>= 26; c += u2 * R0;
821 t2 = c &
M; c >>= 26; c += u2 * R1;
827 c += (uint64_t)(a[0]*2) * a[3]
828 + (uint64_t)(a[1]*2) * a[2];
831 d += (uint64_t)(a[4]*2) * a[9]
832 + (uint64_t)(a[5]*2) * a[8]
833 + (uint64_t)(a[6]*2) * a[7];
836 u3 = d &
M; d >>= 26; c += u3 * R0;
841 t3 = c &
M; c >>= 26; c += u3 * R1;
847 c += (uint64_t)(a[0]*2) * a[4]
848 + (uint64_t)(a[1]*2) * a[3]
849 + (uint64_t)a[2] * a[2];
852 d += (uint64_t)(a[5]*2) * a[9]
853 + (uint64_t)(a[6]*2) * a[8]
854 + (uint64_t)a[7] * a[7];
857 u4 = d &
M; d >>= 26; c += u4 * R0;
862 t4 = c &
M; c >>= 26; c += u4 * R1;
868 c += (uint64_t)(a[0]*2) * a[5]
869 + (uint64_t)(a[1]*2) * a[4]
870 + (uint64_t)(a[2]*2) * a[3];
873 d += (uint64_t)(a[6]*2) * a[9]
874 + (uint64_t)(a[7]*2) * a[8];
877 u5 = d &
M; d >>= 26; c += u5 * R0;
882 t5 = c &
M; c >>= 26; c += u5 * R1;
888 c += (uint64_t)(a[0]*2) * a[6]
889 + (uint64_t)(a[1]*2) * a[5]
890 + (uint64_t)(a[2]*2) * a[4]
891 + (uint64_t)a[3] * a[3];
894 d += (uint64_t)(a[7]*2) * a[9]
895 + (uint64_t)a[8] * a[8];
898 u6 = d &
M; d >>= 26; c += u6 * R0;
903 t6 = c &
M; c >>= 26; c += u6 * R1;
909 c += (uint64_t)(a[0]*2) * a[7]
910 + (uint64_t)(a[1]*2) * a[6]
911 + (uint64_t)(a[2]*2) * a[5]
912 + (uint64_t)(a[3]*2) * a[4];
916 d += (uint64_t)(a[8]*2) * a[9];
919 u7 = d &
M; d >>= 26; c += u7 * R0;
925 t7 = c &
M; c >>= 26; c += u7 * R1;
931 c += (uint64_t)(a[0]*2) * a[8]
932 + (uint64_t)(a[1]*2) * a[7]
933 + (uint64_t)(a[2]*2) * a[6]
934 + (uint64_t)(a[3]*2) * a[5]
935 + (uint64_t)a[4] * a[4];
939 d += (uint64_t)a[9] * a[9];
942 u8 = d &
M; d >>= 26; c +=
u8 * R0;
965 r[8] = c &
M; c >>= 26; c +=
u8 * R1;
973 r[9] = c & (
M >> 4); c >>= 22; c += d * (R1 << 4);
980 d = c * (R0 >> 4) + t0;
983 r[0] = d &
M; d >>= 26;
987 d += c * (R1 >> 4) + t1;
992 r[1] = d &
M; d >>= 26;
1015 uint32_t mask0, mask1;
1016 volatile int vflag = flag;
1018 mask0 = vflag + ~((uint32_t)0);
1020 r->
n[0] = (r->
n[0] & mask0) | (a->
n[0] & mask1);
1021 r->
n[1] = (r->
n[1] & mask0) | (a->
n[1] & mask1);
1022 r->
n[2] = (r->
n[2] & mask0) | (a->
n[2] & mask1);
1023 r->
n[3] = (r->
n[3] & mask0) | (a->
n[3] & mask1);
1024 r->
n[4] = (r->
n[4] & mask0) | (a->
n[4] & mask1);
1025 r->
n[5] = (r->
n[5] & mask0) | (a->
n[5] & mask1);
1026 r->
n[6] = (r->
n[6] & mask0) | (a->
n[6] & mask1);
1027 r->
n[7] = (r->
n[7] & mask0) | (a->
n[7] & mask1);
1028 r->
n[8] = (r->
n[8] & mask0) | (a->
n[8] & mask1);
1029 r->
n[9] = (r->
n[9] & mask0) | (a->
n[9] & mask1);
1033 uint32_t t0 = r->
n[0], t1 = r->
n[1], t2 = r->
n[2], t3 = r->
n[3], t4 = r->
n[4],
1034 t5 = r->
n[5], t6 = r->
n[6], t7 = r->
n[7], t8 = r->
n[8], t9 = r->
n[9];
1035 uint32_t one = (uint32_t)1;
1036 uint32_t mask = -(t0 & one) >> 6;
1048 t0 += 0x3FFFC2FUL & mask;
1049 t1 += 0x3FFFFBFUL & mask;
1068 r->
n[0] = (t0 >> 1) + ((t1 & one) << 25);
1069 r->
n[1] = (t1 >> 1) + ((t2 & one) << 25);
1070 r->
n[2] = (t2 >> 1) + ((t3 & one) << 25);
1071 r->
n[3] = (t3 >> 1) + ((t4 & one) << 25);
1072 r->
n[4] = (t4 >> 1) + ((t5 & one) << 25);
1073 r->
n[5] = (t5 >> 1) + ((t6 & one) << 25);
1074 r->
n[6] = (t6 >> 1) + ((t7 & one) << 25);
1075 r->
n[7] = (t7 >> 1) + ((t8 & one) << 25);
1076 r->
n[8] = (t8 >> 1) + ((t9 & one) << 25);
1077 r->
n[9] = (t9 >> 1);
1098 uint32_t mask0, mask1;
1099 volatile int vflag = flag;
1101 mask0 = vflag + ~((uint32_t)0);
1103 r->
n[0] = (r->
n[0] & mask0) | (a->
n[0] & mask1);
1104 r->
n[1] = (r->
n[1] & mask0) | (a->
n[1] & mask1);
1105 r->
n[2] = (r->
n[2] & mask0) | (a->
n[2] & mask1);
1106 r->
n[3] = (r->
n[3] & mask0) | (a->
n[3] & mask1);
1107 r->
n[4] = (r->
n[4] & mask0) | (a->
n[4] & mask1);
1108 r->
n[5] = (r->
n[5] & mask0) | (a->
n[5] & mask1);
1109 r->
n[6] = (r->
n[6] & mask0) | (a->
n[6] & mask1);
1110 r->
n[7] = (r->
n[7] & mask0) | (a->
n[7] & mask1);
1114 r->
n[0] = a->
n[0] | a->
n[1] << 26;
1115 r->
n[1] = a->
n[1] >> 6 | a->
n[2] << 20;
1116 r->
n[2] = a->
n[2] >> 12 | a->
n[3] << 14;
1117 r->
n[3] = a->
n[3] >> 18 | a->
n[4] << 8;
1118 r->
n[4] = a->
n[4] >> 24 | a->
n[5] << 2 | a->
n[6] << 28;
1119 r->
n[5] = a->
n[6] >> 4 | a->
n[7] << 22;
1120 r->
n[6] = a->
n[7] >> 10 | a->
n[8] << 16;
1121 r->
n[7] = a->
n[8] >> 16 | a->
n[9] << 10;
1125 r->
n[0] = a->
n[0] & 0x3FFFFFFUL;
1126 r->
n[1] = a->
n[0] >> 26 | ((a->
n[1] << 6) & 0x3FFFFFFUL);
1127 r->
n[2] = a->
n[1] >> 20 | ((a->
n[2] << 12) & 0x3FFFFFFUL);
1128 r->
n[3] = a->
n[2] >> 14 | ((a->
n[3] << 18) & 0x3FFFFFFUL);
1129 r->
n[4] = a->
n[3] >> 8 | ((a->
n[4] << 24) & 0x3FFFFFFUL);
1130 r->
n[5] = (a->
n[4] >> 2) & 0x3FFFFFFUL;
1131 r->
n[6] = a->
n[4] >> 28 | ((a->
n[5] << 4) & 0x3FFFFFFUL);
1132 r->
n[7] = a->
n[5] >> 22 | ((a->
n[6] << 10) & 0x3FFFFFFUL);
1133 r->
n[8] = a->
n[6] >> 16 | ((a->
n[7] << 16) & 0x3FFFFFFUL);
1134 r->
n[9] = a->
n[7] >> 10;
1138 const uint32_t M26 = UINT32_MAX >> 6;
1139 const uint32_t a0 = a->
v[0], a1 = a->
v[1], a2 = a->
v[2], a3 = a->
v[3], a4 = a->
v[4],
1140 a5 = a->
v[5], a6 = a->
v[6], a7 = a->
v[7], a8 = a->
v[8];
1156 r->
n[1] = (a0 >> 26 | a1 << 4) & M26;
1157 r->
n[2] = (a1 >> 22 | a2 << 8) & M26;
1158 r->
n[3] = (a2 >> 18 | a3 << 12) & M26;
1159 r->
n[4] = (a3 >> 14 | a4 << 16) & M26;
1160 r->
n[5] = (a4 >> 10 | a5 << 20) & M26;
1161 r->
n[6] = (a5 >> 6 | a6 << 24) & M26;
1162 r->
n[7] = (a6 >> 2 ) & M26;
1163 r->
n[8] = (a6 >> 28 | a7 << 2) & M26;
1164 r->
n[9] = (a7 >> 24 | a8 << 6);
1168 const uint32_t M30 = UINT32_MAX >> 2;
1169 const uint64_t a0 = a->
n[0], a1 = a->
n[1], a2 = a->
n[2], a3 = a->
n[3], a4 = a->
n[4],
1170 a5 = a->
n[5], a6 = a->
n[6], a7 = a->
n[7], a8 = a->
n[8], a9 = a->
n[9];
1172 r->
v[0] = (a0 | a1 << 26) & M30;
1173 r->
v[1] = (a1 >> 4 | a2 << 22) & M30;
1174 r->
v[2] = (a2 >> 8 | a3 << 18) & M30;
1175 r->
v[3] = (a3 >> 12 | a4 << 14) & M30;
1176 r->
v[4] = (a4 >> 16 | a5 << 10) & M30;
1177 r->
v[5] = (a5 >> 20 | a6 << 6) & M30;
1178 r->
v[6] = (a6 >> 24 | a7 << 2
1180 r->
v[7] = (a8 >> 2 | a9 << 24) & M30;
1185 {{-0x3D1, -4, 0, 0, 0, 0, 0, 0, 65536}},
#define SECP256K1_CHECKMEM_CHECK_VERIFY(p, len)
static int secp256k1_fe_sqrt(secp256k1_fe *SECP256K1_RESTRICT r, const secp256k1_fe *SECP256K1_RESTRICT a)
Compute a square root of a field element.
#define secp256k1_fe_normalize_var
#define secp256k1_fe_is_zero
#define secp256k1_fe_normalize
static SECP256K1_INLINE void secp256k1_fe_impl_half(secp256k1_fe *r)
static void secp256k1_fe_impl_set_b32_mod(secp256k1_fe *r, const unsigned char *a)
static void secp256k1_fe_impl_normalize_weak(secp256k1_fe *r)
static int secp256k1_fe_impl_is_square_var(const secp256k1_fe *x)
static void secp256k1_fe_impl_get_b32(unsigned char *r, const secp256k1_fe *a)
Convert a field element to a 32-byte big endian value.
static SECP256K1_INLINE void secp256k1_fe_impl_add(secp256k1_fe *r, const secp256k1_fe *a)
static SECP256K1_INLINE void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a)
static SECP256K1_INLINE void secp256k1_fe_impl_set_int(secp256k1_fe *r, int a)
static const secp256k1_modinv32_modinfo secp256k1_const_modinfo_fe
static SECP256K1_INLINE int secp256k1_fe_impl_is_zero(const secp256k1_fe *a)
static void secp256k1_fe_impl_get_bounds(secp256k1_fe *r, int m)
static int secp256k1_fe_impl_set_b32_limit(secp256k1_fe *r, const unsigned char *a)
static SECP256K1_INLINE void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t *SECP256K1_RESTRICT b)
static void secp256k1_fe_from_signed30(secp256k1_fe *r, const secp256k1_modinv32_signed30 *a)
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag)
static SECP256K1_INLINE void secp256k1_fe_impl_negate_unchecked(secp256k1_fe *r, const secp256k1_fe *a, int m)
static SECP256K1_INLINE void secp256k1_fe_impl_mul_int_unchecked(secp256k1_fe *r, int a)
static int secp256k1_fe_impl_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b)
#define VERIFY_BITS(x, n)
static int secp256k1_fe_impl_normalizes_to_zero(const secp256k1_fe *r)
static void secp256k1_fe_impl_inv_var(secp256k1_fe *r, const secp256k1_fe *x)
static SECP256K1_INLINE void secp256k1_fe_impl_sqr(secp256k1_fe *r, const secp256k1_fe *a)
static SECP256K1_INLINE void secp256k1_fe_impl_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a)
static void secp256k1_fe_to_signed30(secp256k1_modinv32_signed30 *r, const secp256k1_fe *a)
static void secp256k1_fe_impl_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a)
static SECP256K1_INLINE void secp256k1_fe_impl_add_int(secp256k1_fe *r, int a)
static int secp256k1_fe_impl_normalizes_to_zero_var(const secp256k1_fe *r)
static void secp256k1_fe_impl_normalize(secp256k1_fe *r)
static SECP256K1_INLINE void secp256k1_fe_impl_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag)
static void secp256k1_fe_impl_inv(secp256k1_fe *r, const secp256k1_fe *x)
static void secp256k1_fe_impl_normalize_var(secp256k1_fe *r)
static SECP256K1_INLINE int secp256k1_fe_impl_is_odd(const secp256k1_fe *a)
static SECP256K1_INLINE void secp256k1_fe_impl_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe *SECP256K1_RESTRICT b)
static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256k1_modinv32_modinfo *modinfo)
static void secp256k1_modinv32(secp256k1_modinv32_signed30 *x, const secp256k1_modinv32_modinfo *modinfo)
static int secp256k1_jacobi32_maybe_var(const secp256k1_modinv32_signed30 *x, const secp256k1_modinv32_modinfo *modinfo)
#define VERIFY_CHECK(cond)
#define SECP256K1_RESTRICT
This field implementation represents the value as 10 uint32_t limbs in base 2^26.