7#ifndef SECP256K1_ECMULT_CONST_IMPL_H
8#define SECP256K1_ECMULT_CONST_IMPL_H
15#if defined(EXHAUSTIVE_TEST_ORDER)
19# if EXHAUSTIVE_TEST_ORDER == 199
20# define ECMULT_CONST_GROUP_SIZE 4
21# elif EXHAUSTIVE_TEST_ORDER == 13
22# define ECMULT_CONST_GROUP_SIZE 3
23# elif EXHAUSTIVE_TEST_ORDER == 7
24# define ECMULT_CONST_GROUP_SIZE 2
26# error "Unknown EXHAUSTIVE_TEST_ORDER"
30# define ECMULT_CONST_GROUP_SIZE 5
33#define ECMULT_CONST_TABLE_SIZE (1L << (ECMULT_CONST_GROUP_SIZE - 1))
34#define ECMULT_CONST_GROUPS ((129 + ECMULT_CONST_GROUP_SIZE - 1) / ECMULT_CONST_GROUP_SIZE)
35#define ECMULT_CONST_BITS (ECMULT_CONST_GROUPS * ECMULT_CONST_GROUP_SIZE)
61#define ECMULT_CONST_TABLE_GET_GE(r,pre,n) do { \
64 volatile unsigned int negative = ((n) >> (ECMULT_CONST_GROUP_SIZE - 1)) ^ 1; \
86 unsigned int index = ((unsigned int)(-negative) ^ n) & ((1U << (ECMULT_CONST_GROUP_SIZE - 1)) - 1U); \
88 VERIFY_CHECK((n) < (1U << ECMULT_CONST_GROUP_SIZE)); \
89 VERIFY_CHECK(index < (1U << (ECMULT_CONST_GROUP_SIZE - 1))); \
92 (r)->x = (pre)[m].x; \
93 (r)->y = (pre)[m].y; \
94 for (m = 1; m < ECMULT_CONST_TABLE_SIZE; m++) { \
97 secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == index); \
98 secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == index); \
101 secp256k1_fe_negate(&neg_y, &(r)->y, 1); \
102 secp256k1_fe_cmov(&(r)->y, &neg_y, negative); \
108#ifdef EXHAUSTIVE_TEST_ORDER
111#elif ECMULT_CONST_BITS == 129
114#elif ECMULT_CONST_BITS == 130
117#elif ECMULT_CONST_BITS == 132
121# error "Unknown ECMULT_CONST_BITS"
216 for (i = 129; i < 256; ++i) {
356 if (!known_on_curve) {
370 if (!known_on_curve) {
static int secp256k1_ecmult_const_xonly(secp256k1_fe *r, const secp256k1_fe *n, const secp256k1_fe *d, const secp256k1_scalar *q, int known_on_curve)
static void secp256k1_ecmult_const_odd_multiples_table_globalz(secp256k1_ge *pre, secp256k1_fe *globalz, const secp256k1_gej *a)
Fill a table 'pre' with precomputed odd multiples of a.
static const secp256k1_scalar secp256k1_ecmult_const_K
#define ECMULT_CONST_GROUP_SIZE
static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *q)
#define ECMULT_CONST_TABLE_GET_GE(r, pre, n)
#define ECMULT_CONST_TABLE_SIZE
#define ECMULT_CONST_GROUPS
#define ECMULT_CONST_BITS
static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_ge *pre_a, secp256k1_fe *zr, secp256k1_fe *z, const secp256k1_gej *a)
Fill a table 'pre_a' with precomputed odd multiples of a.
#define secp256k1_fe_mul_int(r, a)
Multiply a field element with a small integer.
#define secp256k1_fe_is_square_var
#define secp256k1_fe_normalizes_to_zero
#define secp256k1_fe_add_int
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a)
Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast.
static void secp256k1_gej_set_infinity(secp256k1_gej *r)
Set a group element (jacobian) equal to the point at infinity.
static int secp256k1_gej_is_infinity(const secp256k1_gej *a)
Check whether a group element is the point at infinity.
static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b)
Set r equal to the sum of a and b (with b given in affine coordinates, and not infinity).
static void secp256k1_ge_table_set_globalz(size_t len, secp256k1_ge *a, const secp256k1_fe *zr)
Bring a batch of inputs to the same global z "denominator", based on ratios between (omitted) z coord...
static int secp256k1_ge_is_infinity(const secp256k1_ge *a)
Check whether a group element is the point at infinity.
static void secp256k1_gej_double(secp256k1_gej *r, const secp256k1_gej *a)
Set r equal to the double of a.
static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a)
Set a group element (jacobian) equal to another which is given in affine coordinates.
static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a)
Multiply a scalar with the multiplicative inverse of 2.
static int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
Check whether a scalar equals zero.
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
Add two scalars together (modulo the group order).
static uint32_t secp256k1_scalar_get_bits_limb32(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
Access bits (1 < count <= 32) from a scalar.
static void secp256k1_scalar_split_lambda(secp256k1_scalar *SECP256K1_RESTRICT r1, secp256k1_scalar *SECP256K1_RESTRICT r2, const secp256k1_scalar *SECP256K1_RESTRICT k)
Find r1 and r2 such that r1+r2*lambda = k, where r1 and r2 or their negations are maximum 128 bits lo...
static uint32_t secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
Access bits (1 < count <= 32) from a scalar.
#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0)
#define VERIFY_CHECK(cond)
This field implementation represents the value as 10 uint32_t limbs in base 2^26.
A group element in affine coordinates on the secp256k1 curve, or occasionally on an isomorphic curve ...
A group element of the secp256k1 curve, in jacobian coordinates.
A scalar modulo the group order of the secp256k1 curve.
#define EXHAUSTIVE_TEST_ORDER