7#ifndef SECP256K1_ECMULT_IMPL_H
8#define SECP256K1_ECMULT_IMPL_H
19#if defined(EXHAUSTIVE_TEST_ORDER)
23# if EXHAUSTIVE_TEST_ORDER > 128
25# elif EXHAUSTIVE_TEST_ORDER > 8
45#define WNAF_SIZE_BITS(bits, w) CEIL_DIV(bits, w)
46#define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w)
49#define PIPPENGER_SCRATCH_OBJECTS 6
50#define STRAUSS_SCRATCH_OBJECTS 5
52#define PIPPENGER_MAX_BUCKET_WINDOW 12
55#define ECMULT_PIPPENGER_THRESHOLD 88
57#define ECMULT_MAX_POINTS_PER_BATCH 5000000
105 for (i = 1; i < n; i++) {
164 int last_set_bit = -1;
174 for (bit = 0; bit < len; bit++) {
194 if (now > len - bit) {
200 carry = (word >> (w-1)) & 1;
203 wnaf[bit] =
sign * word;
210 int verify_bit = bit;
214 while (verify_bit < 256) {
220 return last_set_bit + 1;
244 int wnaf_ng_128[129];
252 for (np = 0; np < num; ++np) {
298 for (np = 0; np < no; ++np) {
311 if (bits_ng_1 > bits) {
314 if (bits_ng_128 > bits) {
321 for (i = bits - 1; i >= 0; i--) {
324 for (np = 0; np < no; ++np) {
334 if (i < bits_ng_1 && (n = wnaf_ng_1[i])) {
338 if (i < bits_ng_128 && (n = wnaf_ng_128[i])) {
363 return n_points*point_size;
374 if (inp_g_sc == NULL && n_points == 0) {
387 if (points == NULL || scalars == NULL || state.
aux == NULL || state.
pre_a == NULL || state.
ps == NULL) {
392 for (i = 0; i < n_points; i++) {
394 if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) {
429 for (pos = 0; pos <
WNAF_SIZE(w); pos++) {
446 for (pos =
WNAF_SIZE(w) - 1; pos > 0; pos--) {
456 while (pos <= max_pos) {
458 if ((val & 1) == 0) {
459 wnaf[pos - 1] -= (1 << w);
460 wnaf[pos] = (val + 1);
469 if (pos >= 2 && ((wnaf[pos - 1] == 1 && wnaf[pos - 2] < 0) || (wnaf[pos - 1] == -1 && wnaf[pos - 2] > 0))) {
470 if (wnaf[pos - 1] == 1) {
471 wnaf[pos - 2] += 1 << w;
473 wnaf[pos - 2] -= 1 << w;
501 size_t n_wnaf =
WNAF_SIZE(bucket_window+1);
507 for (np = 0; np < num; ++np) {
521 for (i = n_wnaf - 1; i >= 0; i--) {
528 for (np = 0; np < no; ++np) {
529 int n = state->
wnaf_na[np*n_wnaf + i];
536 int skew = point_state.
skew_na;
552 for(j = 0; j < bucket_window; j++) {
586 }
else if (n <= 20) {
588 }
else if (n <= 57) {
590 }
else if (n <= 136) {
592 }
else if (n <= 235) {
594 }
else if (n <= 1260) {
596 }
else if (n <= 4420) {
598 }
else if (n <= 7880) {
600 }
else if (n <= 16050) {
611 switch(bucket_window) {
621 case 10:
return 7880;
622 case 11:
return 16050;
649 size_t entries = 2*n_points + 2;
659 size_t entries = 2*n_points + 2;
665 size_t point_idx = 0;
669 if (inp_g_sc == NULL && n_points == 0) {
681 if (points == NULL || scalars == NULL || state_space == NULL) {
688 if (state_space->
ps == NULL || state_space->
wnaf_na == NULL || buckets == NULL) {
693 if (inp_g_sc != NULL) {
694 scalars[0] = *inp_g_sc;
701 while (point_idx < n_points) {
702 if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) {
735 size_t space_for_points;
736 size_t space_overhead;
739 entry_size = 2*entry_size;
741 if (space_overhead > max_alloc) {
744 space_for_points = max_alloc - space_overhead;
746 n_points = space_for_points/entry_size;
747 n_points = n_points > max_points ? max_points : n_points;
748 if (n_points > res) {
751 if (n_points < max_points) {
771 for (point_idx = 0; point_idx < n_points; point_idx++) {
775 if (!cb(&scalar, &point, point_idx, cbdata)) {
789 if (max_n_batch_points == 0) {
801 *n_batches =
CEIL_DIV(n, max_n_batch_points);
802 *n_batch_points =
CEIL_DIV(n, *n_batches);
812 size_t n_batch_points;
815 if (inp_g_sc == NULL && n == 0) {
821 if (scratch == NULL) {
840 for(i = 0; i < n_batches; i++) {
841 size_t nbp = n < n_batch_points ? n : n_batch_points;
842 size_t offset = n_batch_points*i;
844 if (!f(error_callback, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) {
#define ECMULT_TABLE_SIZE(w)
The number of entries a table with precomputed multiples needs to have.
int() secp256k1_ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data)
#define STRAUSS_SCRATCH_OBJECTS
static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window)
Returns the maximum optimal number of points for a bucket_window.
static size_t secp256k1_pippenger_max_points(const secp256k1_callback *error_callback, secp256k1_scratch *scratch)
Returns the maximum number of points in addition to G that can be used with a given scratch space.
static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset)
static int secp256k1_ecmult_strauss_batch_single(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n)
static size_t secp256k1_strauss_max_points(const secp256k1_callback *error_callback, secp256k1_scratch *scratch)
static int secp256k1_wnaf_fixed(int *wnaf, const secp256k1_scalar *s, int w)
Convert a number to WNAF notation.
static SECP256K1_INLINE void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2)
static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, int w)
Convert a number to WNAF notation.
static SECP256K1_INLINE void secp256k1_ecmult_table_get_ge_storage(secp256k1_ge *r, const secp256k1_ge_storage *pre, int n, int w)
static int secp256k1_ecmult_multi_var(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n)
static SECP256K1_INLINE void secp256k1_ecmult_table_get_ge_lambda(secp256k1_ge *r, const secp256k1_ge *pre, const secp256k1_fe *x, int n, int w)
static size_t secp256k1_strauss_scratch_size(size_t n_points)
#define ECMULT_PIPPENGER_THRESHOLD
static int secp256k1_ecmult_multi_simple_var(secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points)
static int secp256k1_pippenger_bucket_window(size_t n)
Returns optimal bucket_window (number of bits of a scalar represented by a set of buckets) for a give...
static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n)
#define WNAF_BITS
Larger values for ECMULT_WINDOW_SIZE result in possibly better performance at the cost of an exponent...
#define ECMULT_MAX_POINTS_PER_BATCH
#define PIPPENGER_MAX_BUCKET_WINDOW
#define PIPPENGER_SCRATCH_OBJECTS
static int secp256k1_ecmult_strauss_batch(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset)
static void secp256k1_ecmult(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng)
static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n)
static SECP256K1_INLINE void secp256k1_ecmult_table_verify(int n, int w)
static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_window, struct secp256k1_pippenger_state *state, secp256k1_gej *r, const secp256k1_scalar *sc, const secp256k1_ge *pt, size_t num)
static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window)
Returns the scratch size required for a given number of points (excluding base point G) without consi...
static SECP256K1_INLINE void secp256k1_ecmult_table_get_ge(secp256k1_ge *r, const secp256k1_ge *pre, int n, int w)
static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_ge *pre_a, secp256k1_fe *zr, secp256k1_fe *z, const secp256k1_gej *a)
Fill a table 'pre_a' with precomputed odd multiples of a.
static void secp256k1_ecmult_strauss_wnaf(const struct secp256k1_strauss_state *state, secp256k1_gej *r, size_t num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng)
int(* secp256k1_ecmult_multi_func)(const secp256k1_callback *error_callback, secp256k1_scratch *, secp256k1_gej *, const secp256k1_scalar *, secp256k1_ecmult_multi_callback cb, void *, size_t)
#define secp256k1_fe_negate(r, a, m)
Negate a field element.
static const secp256k1_fe secp256k1_const_beta
#define secp256k1_fe_set_int
static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr)
Set r equal to the double of a.
static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv)
Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv).
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a)
Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast.
static void secp256k1_gej_set_infinity(secp256k1_gej *r)
Set a group element (jacobian) equal to the point at infinity.
static int secp256k1_gej_is_infinity(const secp256k1_gej *a)
Check whether a group element is the point at infinity.
static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y)
Set a group element equal to the point with given X and Y coordinates.
static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr)
Set r equal to the sum of a and b (with b given in affine coordinates).
static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a)
Convert a group element back from the storage type.
static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr)
Set r equal to the sum of a and b.
static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *b)
Rescale a jacobian point by b which must be non-zero.
static void secp256k1_ge_table_set_globalz(size_t len, secp256k1_ge *a, const secp256k1_fe *zr)
Bring a batch of inputs to the same global z "denominator", based on ratios between (omitted) z coord...
static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a)
Set r equal to the inverse of a (i.e., mirrored around the X axis)
static int secp256k1_ge_is_infinity(const secp256k1_ge *a)
Check whether a group element is the point at infinity.
static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a)
Set a group element (jacobian) equal to another which is given in affine coordinates.
static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi)
static const secp256k1_ge secp256k1_ge_const_g
static int sign(const secp256k1_context *ctx, struct signer_secrets *signer_secrets, struct signer *signer, const secp256k1_musig_keyagg_cache *cache, const unsigned char *msg32, unsigned char *sig64)
const secp256k1_ge_storage secp256k1_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]
const secp256k1_ge_storage secp256k1_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k)
Find r1 and r2 such that r1+r2*2^128 = k.
static int secp256k1_scalar_is_even(const secp256k1_scalar *a)
Check whether a scalar, considered as an nonnegative integer, is even.
static int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
Check whether a scalar equals zero.
static uint32_t secp256k1_scalar_get_bits_limb32(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
Access bits (1 < count <= 32) from a scalar.
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a)
Compute the complement of a scalar (modulo the group order).
static int secp256k1_scalar_is_high(const secp256k1_scalar *a)
Check whether a scalar is higher than the group order divided by 2.
static void secp256k1_scalar_split_lambda(secp256k1_scalar *SECP256K1_RESTRICT r1, secp256k1_scalar *SECP256K1_RESTRICT r2, const secp256k1_scalar *SECP256K1_RESTRICT k)
Find r1 and r2 such that r1+r2*lambda = k, where r1 and r2 or their negations are maximum 128 bits lo...
static uint32_t secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
Access bits (1 < count <= 32) from a scalar.
static const secp256k1_scalar secp256k1_scalar_zero
static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, size_t checkpoint)
Applies a check point received from secp256k1_scratch_checkpoint, undoing all allocations since that ...
static size_t secp256k1_scratch_max_allocation(const secp256k1_callback *error_callback, const secp256k1_scratch *scratch, size_t n_objects)
Returns the maximum allocation the scratch space will allow.
static void * secp256k1_scratch_alloc(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, size_t n)
Returns a pointer into the most recently allocated frame, or NULL if there is insufficient available ...
static size_t secp256k1_scratch_checkpoint(const secp256k1_callback *error_callback, const secp256k1_scratch *scratch)
Returns an opaque object used to "checkpoint" a scratch space.
#define VERIFY_CHECK(cond)
This field implementation represents the value as 10 uint32_t limbs in base 2^26.
A group element in affine coordinates on the secp256k1 curve, or occasionally on an isomorphic curve ...
A group element of the secp256k1 curve, in jacobian coordinates.
struct secp256k1_pippenger_point_state * ps
A scalar modulo the group order of the secp256k1 curve.
struct secp256k1_strauss_point_state * ps