20typedef unsigned int u32;
21typedef unsigned char u8;
26#define U8V(v) ((u8)(v)&U8C(0xFF))
27#define U32V(v) ((u32)(v)&U32C(0xFFFFFFFF))
29#define ROTL32(v, n) (U32V((v) << (n)) | ((v) >> (32 - (n))))
31#define U8TO32_LITTLE(p) \
32 (((u32)((p)[0])) | ((u32)((p)[1]) << 8) | ((u32)((p)[2]) << 16) | \
33 ((u32)((p)[3]) << 24))
35#define U32TO8_LITTLE(p, v) \
38 (p)[1] = U8V((v) >> 8); \
39 (p)[2] = U8V((v) >> 16); \
40 (p)[3] = U8V((v) >> 24); \
81#define ROTATE(v, c) (ROTL32(v, c))
82#define XOR(v, w) ((v) ^ (w))
83#define PLUS(v, w) (U32V((v) + (w)))
84#define PLUSONE(v) (PLUS((v), 1))
86#define QUARTERROUND(a, b, c, d) \
87 a = PLUS(a, b); d = ROTATE(XOR(d, a), 16); \
88 c = PLUS(c, d); b = ROTATE(XOR(b, c), 12); \
89 a = PLUS(a, b); d = ROTATE(XOR(d, a), 8); \
90 c = PLUS(c, d); b = ROTATE(XOR(b, c), 7);
92static const char sigma[] =
"expand 32-byte k";
93static const char tau[] =
"expand 16-byte k";
97 const char* constants;
129 u32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
130 u32 j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
131 u8* ctarget =
nullptr;
156 for (i = 0; i < bytes; ++i)
178 for (i = 20; i > 0; i -= 2) {
198 x10 =
PLUS(x10, j10);
199 x11 =
PLUS(x11, j11);
200 x12 =
PLUS(x12, j12);
201 x13 =
PLUS(x13, j13);
202 x14 =
PLUS(x14, j14);
203 x15 =
PLUS(x15, j15);
247 for (i = 0; i < bytes; ++i)
263 for (i = 0; i < bytes; ++i)
279 static const uint8_t iv[8] = {0, 0, 0, 0, 0, 0, 0, 0};
286 fuzzed_data_provider,
294 uint8_t iv[8] = {0, 0, 0, 0, 0, 0, 0, 0};
298 uint32_t iv_prefix = fuzzed_data_provider.ConsumeIntegral<uint32_t>();
299 uint64_t iv = fuzzed_data_provider.ConsumeIntegral<uint64_t>();
300 nonce = {iv_prefix, iv};
301 counter = fuzzed_data_provider.ConsumeIntegral<uint32_t>();
302 chacha20.Seek(
nonce, counter);
303 ctx.
input[12] = counter;
304 ctx.
input[13] = iv_prefix;
306 ctx.
input[15] = iv >> 32;
309 uint32_t integralInRange = fuzzed_data_provider.ConsumeIntegralInRange<
size_t>(0, 4096);
310 std::vector<uint8_t> output(integralInRange);
312 std::vector<uint8_t> djb_output(integralInRange);
314 assert(output == djb_output);
316 uint32_t old_counter = counter;
317 counter += (integralInRange + 63) >> 6;
318 if (counter < old_counter) ++
nonce.first;
319 if (integralInRange & 63) {
320 chacha20.Seek(
nonce, counter);
325 uint32_t integralInRange = fuzzed_data_provider.ConsumeIntegralInRange<
size_t>(0, 4096);
326 std::vector<uint8_t> output(integralInRange);
329 std::vector<uint8_t> djb_output(integralInRange);
331 assert(output == djb_output);
333 uint32_t old_counter = counter;
334 counter += (integralInRange + 63) >> 6;
335 if (counter < old_counter) ++
nonce.first;
336 if (integralInRange & 63) {
337 chacha20.Seek(
nonce, counter);
Unrestricted ChaCha20 cipher.
ChaCha20Aligned::Nonce96 Nonce96
96-bit nonce type.
static const char sigma[]
FUZZ_TARGET(crypto_diff_fuzz_chacha20)
void ECRYPT_encrypt_bytes(ECRYPT_ctx *ctx, const u8 *plaintext, u8 *ciphertext, u32 msglen)
void ECRYPT_ivsetup(ECRYPT_ctx *ctx, const u8 *iv)
void ECRYPT_keystream_bytes(ECRYPT_ctx *ctx, u8 *keystream, u32 length)
void ECRYPT_keysetup(ECRYPT_ctx *ctx, const u8 *key, u32 keysize, u32 ivsize)
#define U32TO8_LITTLE(p, v)
#define QUARTERROUND(a, b, c, d)
#define LIMITED_WHILE(condition, limit)
Can be used to limit a theoretically unbounded loop.
Span< const std::byte > MakeByteSpan(V &&v) noexcept
Span< std::byte > MakeWritableByteSpan(V &&v) noexcept
std::vector< B > ConsumeFixedLengthByteVector(FuzzedDataProvider &fuzzed_data_provider, const size_t length) noexcept
Returns a byte vector of specified size regardless of the number of remaining bytes available from th...
size_t CallOneOf(FuzzedDataProvider &fuzzed_data_provider, Callables... callables)