Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/lib/crypto/tests/hash-test-template.h
29286 views
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
/*
3
* Test cases for hash functions, including a benchmark. This is included by
4
* KUnit test suites that want to use it. See sha512_kunit.c for an example.
5
*
6
* Copyright 2025 Google LLC
7
*/
8
#include <kunit/run-in-irq-context.h>
9
#include <kunit/test.h>
10
#include <linux/vmalloc.h>
11
12
/* test_buf is a guarded buffer, i.e. &test_buf[TEST_BUF_LEN] is not mapped. */
13
#define TEST_BUF_LEN 16384
14
static u8 *test_buf;
15
16
static u8 *orig_test_buf;
17
18
static u64 random_seed;
19
20
/*
21
* This is a simple linear congruential generator. It is used only for testing,
22
* which does not require cryptographically secure random numbers. A hard-coded
23
* algorithm is used instead of <linux/prandom.h> so that it matches the
24
* algorithm used by the test vector generation script. This allows the input
25
* data in random test vectors to be concisely stored as just the seed.
26
*/
27
static u32 rand32(void)
28
{
29
random_seed = (random_seed * 25214903917 + 11) & ((1ULL << 48) - 1);
30
return random_seed >> 16;
31
}
32
33
static void rand_bytes(u8 *out, size_t len)
34
{
35
for (size_t i = 0; i < len; i++)
36
out[i] = rand32();
37
}
38
39
static void rand_bytes_seeded_from_len(u8 *out, size_t len)
40
{
41
random_seed = len;
42
rand_bytes(out, len);
43
}
44
45
static bool rand_bool(void)
46
{
47
return rand32() % 2;
48
}
49
50
/* Generate a random length, preferring small lengths. */
51
static size_t rand_length(size_t max_len)
52
{
53
size_t len;
54
55
switch (rand32() % 3) {
56
case 0:
57
len = rand32() % 128;
58
break;
59
case 1:
60
len = rand32() % 3072;
61
break;
62
default:
63
len = rand32();
64
break;
65
}
66
return len % (max_len + 1);
67
}
68
69
static size_t rand_offset(size_t max_offset)
70
{
71
return min(rand32() % 128, max_offset);
72
}
73
74
static int hash_suite_init(struct kunit_suite *suite)
75
{
76
/*
77
* Allocate the test buffer using vmalloc() with a page-aligned length
78
* so that it is immediately followed by a guard page. This allows
79
* buffer overreads to be detected, even in assembly code.
80
*/
81
size_t alloc_len = round_up(TEST_BUF_LEN, PAGE_SIZE);
82
83
orig_test_buf = vmalloc(alloc_len);
84
if (!orig_test_buf)
85
return -ENOMEM;
86
87
test_buf = orig_test_buf + alloc_len - TEST_BUF_LEN;
88
return 0;
89
}
90
91
static void hash_suite_exit(struct kunit_suite *suite)
92
{
93
vfree(orig_test_buf);
94
orig_test_buf = NULL;
95
test_buf = NULL;
96
}
97
98
/*
99
* Test the hash function against a list of test vectors.
100
*
101
* Note that it's only necessary to run each test vector in one way (e.g.,
102
* one-shot instead of incremental), since consistency between different ways of
103
* using the APIs is verified by other test cases.
104
*/
105
static void test_hash_test_vectors(struct kunit *test)
106
{
107
for (size_t i = 0; i < ARRAY_SIZE(hash_testvecs); i++) {
108
size_t data_len = hash_testvecs[i].data_len;
109
u8 actual_hash[HASH_SIZE];
110
111
KUNIT_ASSERT_LE(test, data_len, TEST_BUF_LEN);
112
rand_bytes_seeded_from_len(test_buf, data_len);
113
114
HASH(test_buf, data_len, actual_hash);
115
KUNIT_ASSERT_MEMEQ_MSG(
116
test, actual_hash, hash_testvecs[i].digest, HASH_SIZE,
117
"Wrong result with test vector %zu; data_len=%zu", i,
118
data_len);
119
}
120
}
121
122
/*
123
* Test that the hash function produces correct results for *every* length up to
124
* 4096 bytes. To do this, generate seeded random data, then calculate a hash
125
* value for each length 0..4096, then hash the hash values. Verify just the
126
* final hash value, which should match only when all hash values were correct.
127
*/
128
static void test_hash_all_lens_up_to_4096(struct kunit *test)
129
{
130
struct HASH_CTX ctx;
131
u8 hash[HASH_SIZE];
132
133
static_assert(TEST_BUF_LEN >= 4096);
134
rand_bytes_seeded_from_len(test_buf, 4096);
135
HASH_INIT(&ctx);
136
for (size_t len = 0; len <= 4096; len++) {
137
HASH(test_buf, len, hash);
138
HASH_UPDATE(&ctx, hash, HASH_SIZE);
139
}
140
HASH_FINAL(&ctx, hash);
141
KUNIT_ASSERT_MEMEQ(test, hash, hash_testvec_consolidated, HASH_SIZE);
142
}
143
144
/*
145
* Test that the hash function produces the same result with a one-shot
146
* computation as it does with an incremental computation.
147
*/
148
static void test_hash_incremental_updates(struct kunit *test)
149
{
150
for (int i = 0; i < 1000; i++) {
151
size_t total_len, offset;
152
struct HASH_CTX ctx;
153
u8 hash1[HASH_SIZE];
154
u8 hash2[HASH_SIZE];
155
size_t num_parts = 0;
156
size_t remaining_len, cur_offset;
157
158
total_len = rand_length(TEST_BUF_LEN);
159
offset = rand_offset(TEST_BUF_LEN - total_len);
160
rand_bytes(&test_buf[offset], total_len);
161
162
/* Compute the hash value in one shot. */
163
HASH(&test_buf[offset], total_len, hash1);
164
165
/*
166
* Compute the hash value incrementally, using a randomly
167
* selected sequence of update lengths that sum to total_len.
168
*/
169
HASH_INIT(&ctx);
170
remaining_len = total_len;
171
cur_offset = offset;
172
while (rand_bool()) {
173
size_t part_len = rand_length(remaining_len);
174
175
HASH_UPDATE(&ctx, &test_buf[cur_offset], part_len);
176
num_parts++;
177
cur_offset += part_len;
178
remaining_len -= part_len;
179
}
180
if (remaining_len != 0 || rand_bool()) {
181
HASH_UPDATE(&ctx, &test_buf[cur_offset], remaining_len);
182
num_parts++;
183
}
184
HASH_FINAL(&ctx, hash2);
185
186
/* Verify that the two hash values are the same. */
187
KUNIT_ASSERT_MEMEQ_MSG(
188
test, hash1, hash2, HASH_SIZE,
189
"Incremental test failed with total_len=%zu num_parts=%zu offset=%zu",
190
total_len, num_parts, offset);
191
}
192
}
193
194
/*
195
* Test that the hash function does not overrun any buffers. Uses a guard page
196
* to catch buffer overruns even if they occur in assembly code.
197
*/
198
static void test_hash_buffer_overruns(struct kunit *test)
199
{
200
const size_t max_tested_len = TEST_BUF_LEN - sizeof(struct HASH_CTX);
201
void *const buf_end = &test_buf[TEST_BUF_LEN];
202
struct HASH_CTX *guarded_ctx = buf_end - sizeof(*guarded_ctx);
203
204
rand_bytes(test_buf, TEST_BUF_LEN);
205
206
for (int i = 0; i < 100; i++) {
207
size_t len = rand_length(max_tested_len);
208
struct HASH_CTX ctx;
209
u8 hash[HASH_SIZE];
210
211
/* Check for overruns of the data buffer. */
212
HASH(buf_end - len, len, hash);
213
HASH_INIT(&ctx);
214
HASH_UPDATE(&ctx, buf_end - len, len);
215
HASH_FINAL(&ctx, hash);
216
217
/* Check for overruns of the hash value buffer. */
218
HASH(test_buf, len, buf_end - HASH_SIZE);
219
HASH_INIT(&ctx);
220
HASH_UPDATE(&ctx, test_buf, len);
221
HASH_FINAL(&ctx, buf_end - HASH_SIZE);
222
223
/* Check for overuns of the hash context. */
224
HASH_INIT(guarded_ctx);
225
HASH_UPDATE(guarded_ctx, test_buf, len);
226
HASH_FINAL(guarded_ctx, hash);
227
}
228
}
229
230
/*
231
* Test that the caller is permitted to alias the output digest and source data
232
* buffer, and also modify the source data buffer after it has been used.
233
*/
234
static void test_hash_overlaps(struct kunit *test)
235
{
236
const size_t max_tested_len = TEST_BUF_LEN - HASH_SIZE;
237
struct HASH_CTX ctx;
238
u8 hash[HASH_SIZE];
239
240
rand_bytes(test_buf, TEST_BUF_LEN);
241
242
for (int i = 0; i < 100; i++) {
243
size_t len = rand_length(max_tested_len);
244
size_t offset = HASH_SIZE + rand_offset(max_tested_len - len);
245
bool left_end = rand_bool();
246
u8 *ovl_hash = left_end ? &test_buf[offset] :
247
&test_buf[offset + len - HASH_SIZE];
248
249
HASH(&test_buf[offset], len, hash);
250
HASH(&test_buf[offset], len, ovl_hash);
251
KUNIT_ASSERT_MEMEQ_MSG(
252
test, hash, ovl_hash, HASH_SIZE,
253
"Overlap test 1 failed with len=%zu offset=%zu left_end=%d",
254
len, offset, left_end);
255
256
/* Repeat the above test, but this time use init+update+final */
257
HASH(&test_buf[offset], len, hash);
258
HASH_INIT(&ctx);
259
HASH_UPDATE(&ctx, &test_buf[offset], len);
260
HASH_FINAL(&ctx, ovl_hash);
261
KUNIT_ASSERT_MEMEQ_MSG(
262
test, hash, ovl_hash, HASH_SIZE,
263
"Overlap test 2 failed with len=%zu offset=%zu left_end=%d",
264
len, offset, left_end);
265
266
/* Test modifying the source data after it was used. */
267
HASH(&test_buf[offset], len, hash);
268
HASH_INIT(&ctx);
269
HASH_UPDATE(&ctx, &test_buf[offset], len);
270
rand_bytes(&test_buf[offset], len);
271
HASH_FINAL(&ctx, ovl_hash);
272
KUNIT_ASSERT_MEMEQ_MSG(
273
test, hash, ovl_hash, HASH_SIZE,
274
"Overlap test 3 failed with len=%zu offset=%zu left_end=%d",
275
len, offset, left_end);
276
}
277
}
278
279
/*
280
* Test that if the same data is hashed at different alignments in memory, the
281
* results are the same.
282
*/
283
static void test_hash_alignment_consistency(struct kunit *test)
284
{
285
u8 hash1[128 + HASH_SIZE];
286
u8 hash2[128 + HASH_SIZE];
287
288
for (int i = 0; i < 100; i++) {
289
size_t len = rand_length(TEST_BUF_LEN);
290
size_t data_offs1 = rand_offset(TEST_BUF_LEN - len);
291
size_t data_offs2 = rand_offset(TEST_BUF_LEN - len);
292
size_t hash_offs1 = rand_offset(128);
293
size_t hash_offs2 = rand_offset(128);
294
295
rand_bytes(&test_buf[data_offs1], len);
296
HASH(&test_buf[data_offs1], len, &hash1[hash_offs1]);
297
memmove(&test_buf[data_offs2], &test_buf[data_offs1], len);
298
HASH(&test_buf[data_offs2], len, &hash2[hash_offs2]);
299
KUNIT_ASSERT_MEMEQ_MSG(
300
test, &hash1[hash_offs1], &hash2[hash_offs2], HASH_SIZE,
301
"Alignment consistency test failed with len=%zu data_offs=(%zu,%zu) hash_offs=(%zu,%zu)",
302
len, data_offs1, data_offs2, hash_offs1, hash_offs2);
303
}
304
}
305
306
/* Test that HASH_FINAL zeroizes the context. */
307
static void test_hash_ctx_zeroization(struct kunit *test)
308
{
309
static const u8 zeroes[sizeof(struct HASH_CTX)];
310
struct HASH_CTX ctx;
311
312
rand_bytes(test_buf, 128);
313
HASH_INIT(&ctx);
314
HASH_UPDATE(&ctx, test_buf, 128);
315
HASH_FINAL(&ctx, test_buf);
316
KUNIT_ASSERT_MEMEQ_MSG(test, &ctx, zeroes, sizeof(ctx),
317
"Hash context was not zeroized by finalization");
318
}
319
320
#define IRQ_TEST_DATA_LEN 256
321
#define IRQ_TEST_NUM_BUFFERS 3 /* matches max concurrency level */
322
323
struct hash_irq_test1_state {
324
u8 expected_hashes[IRQ_TEST_NUM_BUFFERS][HASH_SIZE];
325
atomic_t seqno;
326
};
327
328
/*
329
* Compute the hash of one of the test messages and verify that it matches the
330
* expected hash from @state->expected_hashes. To increase the chance of
331
* detecting problems, cycle through multiple messages.
332
*/
333
static bool hash_irq_test1_func(void *state_)
334
{
335
struct hash_irq_test1_state *state = state_;
336
u32 i = (u32)atomic_inc_return(&state->seqno) % IRQ_TEST_NUM_BUFFERS;
337
u8 actual_hash[HASH_SIZE];
338
339
HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN, actual_hash);
340
return memcmp(actual_hash, state->expected_hashes[i], HASH_SIZE) == 0;
341
}
342
343
/*
344
* Test that if hashes are computed in task, softirq, and hardirq context
345
* concurrently, then all results are as expected.
346
*/
347
static void test_hash_interrupt_context_1(struct kunit *test)
348
{
349
struct hash_irq_test1_state state = {};
350
351
/* Prepare some test messages and compute the expected hash of each. */
352
rand_bytes(test_buf, IRQ_TEST_NUM_BUFFERS * IRQ_TEST_DATA_LEN);
353
for (int i = 0; i < IRQ_TEST_NUM_BUFFERS; i++)
354
HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN,
355
state.expected_hashes[i]);
356
357
kunit_run_irq_test(test, hash_irq_test1_func, 100000, &state);
358
}
359
360
struct hash_irq_test2_hash_ctx {
361
struct HASH_CTX hash_ctx;
362
atomic_t in_use;
363
int offset;
364
int step;
365
};
366
367
struct hash_irq_test2_state {
368
struct hash_irq_test2_hash_ctx ctxs[IRQ_TEST_NUM_BUFFERS];
369
u8 expected_hash[HASH_SIZE];
370
u16 update_lens[32];
371
int num_steps;
372
};
373
374
static bool hash_irq_test2_func(void *state_)
375
{
376
struct hash_irq_test2_state *state = state_;
377
struct hash_irq_test2_hash_ctx *ctx;
378
bool ret = true;
379
380
for (ctx = &state->ctxs[0]; ctx < &state->ctxs[ARRAY_SIZE(state->ctxs)];
381
ctx++) {
382
if (atomic_cmpxchg(&ctx->in_use, 0, 1) == 0)
383
break;
384
}
385
if (WARN_ON_ONCE(ctx == &state->ctxs[ARRAY_SIZE(state->ctxs)])) {
386
/*
387
* This should never happen, as the number of contexts is equal
388
* to the maximum concurrency level of kunit_run_irq_test().
389
*/
390
return false;
391
}
392
393
if (ctx->step == 0) {
394
/* Init step */
395
HASH_INIT(&ctx->hash_ctx);
396
ctx->offset = 0;
397
ctx->step++;
398
} else if (ctx->step < state->num_steps - 1) {
399
/* Update step */
400
HASH_UPDATE(&ctx->hash_ctx, &test_buf[ctx->offset],
401
state->update_lens[ctx->step - 1]);
402
ctx->offset += state->update_lens[ctx->step - 1];
403
ctx->step++;
404
} else {
405
/* Final step */
406
u8 actual_hash[HASH_SIZE];
407
408
if (WARN_ON_ONCE(ctx->offset != TEST_BUF_LEN))
409
ret = false;
410
HASH_FINAL(&ctx->hash_ctx, actual_hash);
411
if (memcmp(actual_hash, state->expected_hash, HASH_SIZE) != 0)
412
ret = false;
413
ctx->step = 0;
414
}
415
atomic_set_release(&ctx->in_use, 0);
416
return ret;
417
}
418
419
/*
420
* Test that if hashes are computed in task, softirq, and hardirq context
421
* concurrently, *including doing different parts of the same incremental
422
* computation in different contexts*, then all results are as expected.
423
* Besides detecting bugs similar to those that test_hash_interrupt_context_1
424
* can detect, this test case can also detect bugs where hash function
425
* implementations don't correctly handle these mixed incremental computations.
426
*/
427
static void test_hash_interrupt_context_2(struct kunit *test)
428
{
429
struct hash_irq_test2_state *state;
430
int remaining = TEST_BUF_LEN;
431
432
state = kunit_kzalloc(test, sizeof(*state), GFP_KERNEL);
433
KUNIT_ASSERT_NOT_NULL(test, state);
434
435
rand_bytes(test_buf, TEST_BUF_LEN);
436
HASH(test_buf, TEST_BUF_LEN, state->expected_hash);
437
438
/*
439
* Generate a list of update lengths to use. Ensure that it contains
440
* multiple entries but is limited to a maximum length.
441
*/
442
static_assert(TEST_BUF_LEN / 4096 > 1);
443
for (state->num_steps = 0;
444
state->num_steps < ARRAY_SIZE(state->update_lens) - 1 && remaining;
445
state->num_steps++) {
446
state->update_lens[state->num_steps] =
447
rand_length(min(remaining, 4096));
448
remaining -= state->update_lens[state->num_steps];
449
}
450
if (remaining)
451
state->update_lens[state->num_steps++] = remaining;
452
state->num_steps += 2; /* for init and final */
453
454
kunit_run_irq_test(test, hash_irq_test2_func, 250000, state);
455
}
456
457
#define UNKEYED_HASH_KUNIT_CASES \
458
KUNIT_CASE(test_hash_test_vectors), \
459
KUNIT_CASE(test_hash_all_lens_up_to_4096), \
460
KUNIT_CASE(test_hash_incremental_updates), \
461
KUNIT_CASE(test_hash_buffer_overruns), \
462
KUNIT_CASE(test_hash_overlaps), \
463
KUNIT_CASE(test_hash_alignment_consistency), \
464
KUNIT_CASE(test_hash_ctx_zeroization), \
465
KUNIT_CASE(test_hash_interrupt_context_1), \
466
KUNIT_CASE(test_hash_interrupt_context_2)
467
/* benchmark_hash is omitted so that the suites can put it last. */
468
469
#ifdef HMAC
470
/*
471
* Test the corresponding HMAC variant.
472
*
473
* This test case is fairly short, since HMAC is just a simple C wrapper around
474
* the underlying unkeyed hash function, which is already well-tested by the
475
* other test cases. It's not useful to test things like data alignment or
476
* interrupt context again for HMAC, nor to have a long list of test vectors.
477
*
478
* Thus, just do a single consolidated test, which covers all data lengths up to
479
* 4096 bytes and all key lengths up to 292 bytes. For each data length, select
480
* a key length, generate the inputs from a seed, and compute the HMAC value.
481
* Concatenate all these HMAC values together, and compute the HMAC of that.
482
* Verify that value. If this fails, then the HMAC implementation is wrong.
483
* This won't show which specific input failed, but that should be fine. Any
484
* failure would likely be non-input-specific or also show in the unkeyed tests.
485
*/
486
static void test_hmac(struct kunit *test)
487
{
488
static const u8 zeroes[sizeof(struct HMAC_CTX)];
489
u8 *raw_key;
490
struct HMAC_KEY key;
491
struct HMAC_CTX ctx;
492
u8 mac[HASH_SIZE];
493
u8 mac2[HASH_SIZE];
494
495
static_assert(TEST_BUF_LEN >= 4096 + 293);
496
rand_bytes_seeded_from_len(test_buf, 4096);
497
raw_key = &test_buf[4096];
498
499
rand_bytes_seeded_from_len(raw_key, 32);
500
HMAC_PREPAREKEY(&key, raw_key, 32);
501
HMAC_INIT(&ctx, &key);
502
for (size_t data_len = 0; data_len <= 4096; data_len++) {
503
/*
504
* Cycle through key lengths as well. Somewhat arbitrarily go
505
* up to 293, which is somewhat larger than the largest hash
506
* block size (which is the size at which the key starts being
507
* hashed down to one block); going higher would not be useful.
508
* To reduce correlation with data_len, use a prime number here.
509
*/
510
size_t key_len = data_len % 293;
511
512
HMAC_UPDATE(&ctx, test_buf, data_len);
513
514
rand_bytes_seeded_from_len(raw_key, key_len);
515
HMAC_USINGRAWKEY(raw_key, key_len, test_buf, data_len, mac);
516
HMAC_UPDATE(&ctx, mac, HASH_SIZE);
517
518
/* Verify that HMAC() is consistent with HMAC_USINGRAWKEY(). */
519
HMAC_PREPAREKEY(&key, raw_key, key_len);
520
HMAC(&key, test_buf, data_len, mac2);
521
KUNIT_ASSERT_MEMEQ_MSG(
522
test, mac, mac2, HASH_SIZE,
523
"HMAC gave different results with raw and prepared keys");
524
}
525
HMAC_FINAL(&ctx, mac);
526
KUNIT_EXPECT_MEMEQ_MSG(test, mac, hmac_testvec_consolidated, HASH_SIZE,
527
"HMAC gave wrong result");
528
KUNIT_EXPECT_MEMEQ_MSG(test, &ctx, zeroes, sizeof(ctx),
529
"HMAC context was not zeroized by finalization");
530
}
531
#define HASH_KUNIT_CASES UNKEYED_HASH_KUNIT_CASES, KUNIT_CASE(test_hmac)
532
#else
533
#define HASH_KUNIT_CASES UNKEYED_HASH_KUNIT_CASES
534
#endif
535
536
/* Benchmark the hash function on various data lengths. */
537
static void benchmark_hash(struct kunit *test)
538
{
539
static const size_t lens_to_test[] = {
540
1, 16, 64, 127, 128, 200, 256,
541
511, 512, 1024, 3173, 4096, 16384,
542
};
543
u8 hash[HASH_SIZE];
544
545
if (!IS_ENABLED(CONFIG_CRYPTO_LIB_BENCHMARK))
546
kunit_skip(test, "not enabled");
547
548
/* Warm-up */
549
for (size_t i = 0; i < 10000000; i += TEST_BUF_LEN)
550
HASH(test_buf, TEST_BUF_LEN, hash);
551
552
for (size_t i = 0; i < ARRAY_SIZE(lens_to_test); i++) {
553
size_t len = lens_to_test[i];
554
/* The '+ 128' tries to account for per-message overhead. */
555
size_t num_iters = 10000000 / (len + 128);
556
u64 t;
557
558
KUNIT_ASSERT_LE(test, len, TEST_BUF_LEN);
559
preempt_disable();
560
t = ktime_get_ns();
561
for (size_t j = 0; j < num_iters; j++)
562
HASH(test_buf, len, hash);
563
t = ktime_get_ns() - t;
564
preempt_enable();
565
kunit_info(test, "len=%zu: %llu MB/s", len,
566
div64_u64((u64)len * num_iters * 1000, t ?: 1));
567
}
568
}
569
570