Path: blob/master/lib/crypto/tests/hash-test-template.h
29286 views
/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* Test cases for hash functions, including a benchmark. This is included by3* KUnit test suites that want to use it. See sha512_kunit.c for an example.4*5* Copyright 2025 Google LLC6*/7#include <kunit/run-in-irq-context.h>8#include <kunit/test.h>9#include <linux/vmalloc.h>1011/* test_buf is a guarded buffer, i.e. &test_buf[TEST_BUF_LEN] is not mapped. */12#define TEST_BUF_LEN 1638413static u8 *test_buf;1415static u8 *orig_test_buf;1617static u64 random_seed;1819/*20* This is a simple linear congruential generator. It is used only for testing,21* which does not require cryptographically secure random numbers. A hard-coded22* algorithm is used instead of <linux/prandom.h> so that it matches the23* algorithm used by the test vector generation script. This allows the input24* data in random test vectors to be concisely stored as just the seed.25*/26static u32 rand32(void)27{28random_seed = (random_seed * 25214903917 + 11) & ((1ULL << 48) - 1);29return random_seed >> 16;30}3132static void rand_bytes(u8 *out, size_t len)33{34for (size_t i = 0; i < len; i++)35out[i] = rand32();36}3738static void rand_bytes_seeded_from_len(u8 *out, size_t len)39{40random_seed = len;41rand_bytes(out, len);42}4344static bool rand_bool(void)45{46return rand32() % 2;47}4849/* Generate a random length, preferring small lengths. */50static size_t rand_length(size_t max_len)51{52size_t len;5354switch (rand32() % 3) {55case 0:56len = rand32() % 128;57break;58case 1:59len = rand32() % 3072;60break;61default:62len = rand32();63break;64}65return len % (max_len + 1);66}6768static size_t rand_offset(size_t max_offset)69{70return min(rand32() % 128, max_offset);71}7273static int hash_suite_init(struct kunit_suite *suite)74{75/*76* Allocate the test buffer using vmalloc() with a page-aligned length77* so that it is immediately followed by a guard page. This allows78* buffer overreads to be detected, even in assembly code.79*/80size_t alloc_len = round_up(TEST_BUF_LEN, PAGE_SIZE);8182orig_test_buf = vmalloc(alloc_len);83if (!orig_test_buf)84return -ENOMEM;8586test_buf = orig_test_buf + alloc_len - TEST_BUF_LEN;87return 0;88}8990static void hash_suite_exit(struct kunit_suite *suite)91{92vfree(orig_test_buf);93orig_test_buf = NULL;94test_buf = NULL;95}9697/*98* Test the hash function against a list of test vectors.99*100* Note that it's only necessary to run each test vector in one way (e.g.,101* one-shot instead of incremental), since consistency between different ways of102* using the APIs is verified by other test cases.103*/104static void test_hash_test_vectors(struct kunit *test)105{106for (size_t i = 0; i < ARRAY_SIZE(hash_testvecs); i++) {107size_t data_len = hash_testvecs[i].data_len;108u8 actual_hash[HASH_SIZE];109110KUNIT_ASSERT_LE(test, data_len, TEST_BUF_LEN);111rand_bytes_seeded_from_len(test_buf, data_len);112113HASH(test_buf, data_len, actual_hash);114KUNIT_ASSERT_MEMEQ_MSG(115test, actual_hash, hash_testvecs[i].digest, HASH_SIZE,116"Wrong result with test vector %zu; data_len=%zu", i,117data_len);118}119}120121/*122* Test that the hash function produces correct results for *every* length up to123* 4096 bytes. To do this, generate seeded random data, then calculate a hash124* value for each length 0..4096, then hash the hash values. Verify just the125* final hash value, which should match only when all hash values were correct.126*/127static void test_hash_all_lens_up_to_4096(struct kunit *test)128{129struct HASH_CTX ctx;130u8 hash[HASH_SIZE];131132static_assert(TEST_BUF_LEN >= 4096);133rand_bytes_seeded_from_len(test_buf, 4096);134HASH_INIT(&ctx);135for (size_t len = 0; len <= 4096; len++) {136HASH(test_buf, len, hash);137HASH_UPDATE(&ctx, hash, HASH_SIZE);138}139HASH_FINAL(&ctx, hash);140KUNIT_ASSERT_MEMEQ(test, hash, hash_testvec_consolidated, HASH_SIZE);141}142143/*144* Test that the hash function produces the same result with a one-shot145* computation as it does with an incremental computation.146*/147static void test_hash_incremental_updates(struct kunit *test)148{149for (int i = 0; i < 1000; i++) {150size_t total_len, offset;151struct HASH_CTX ctx;152u8 hash1[HASH_SIZE];153u8 hash2[HASH_SIZE];154size_t num_parts = 0;155size_t remaining_len, cur_offset;156157total_len = rand_length(TEST_BUF_LEN);158offset = rand_offset(TEST_BUF_LEN - total_len);159rand_bytes(&test_buf[offset], total_len);160161/* Compute the hash value in one shot. */162HASH(&test_buf[offset], total_len, hash1);163164/*165* Compute the hash value incrementally, using a randomly166* selected sequence of update lengths that sum to total_len.167*/168HASH_INIT(&ctx);169remaining_len = total_len;170cur_offset = offset;171while (rand_bool()) {172size_t part_len = rand_length(remaining_len);173174HASH_UPDATE(&ctx, &test_buf[cur_offset], part_len);175num_parts++;176cur_offset += part_len;177remaining_len -= part_len;178}179if (remaining_len != 0 || rand_bool()) {180HASH_UPDATE(&ctx, &test_buf[cur_offset], remaining_len);181num_parts++;182}183HASH_FINAL(&ctx, hash2);184185/* Verify that the two hash values are the same. */186KUNIT_ASSERT_MEMEQ_MSG(187test, hash1, hash2, HASH_SIZE,188"Incremental test failed with total_len=%zu num_parts=%zu offset=%zu",189total_len, num_parts, offset);190}191}192193/*194* Test that the hash function does not overrun any buffers. Uses a guard page195* to catch buffer overruns even if they occur in assembly code.196*/197static void test_hash_buffer_overruns(struct kunit *test)198{199const size_t max_tested_len = TEST_BUF_LEN - sizeof(struct HASH_CTX);200void *const buf_end = &test_buf[TEST_BUF_LEN];201struct HASH_CTX *guarded_ctx = buf_end - sizeof(*guarded_ctx);202203rand_bytes(test_buf, TEST_BUF_LEN);204205for (int i = 0; i < 100; i++) {206size_t len = rand_length(max_tested_len);207struct HASH_CTX ctx;208u8 hash[HASH_SIZE];209210/* Check for overruns of the data buffer. */211HASH(buf_end - len, len, hash);212HASH_INIT(&ctx);213HASH_UPDATE(&ctx, buf_end - len, len);214HASH_FINAL(&ctx, hash);215216/* Check for overruns of the hash value buffer. */217HASH(test_buf, len, buf_end - HASH_SIZE);218HASH_INIT(&ctx);219HASH_UPDATE(&ctx, test_buf, len);220HASH_FINAL(&ctx, buf_end - HASH_SIZE);221222/* Check for overuns of the hash context. */223HASH_INIT(guarded_ctx);224HASH_UPDATE(guarded_ctx, test_buf, len);225HASH_FINAL(guarded_ctx, hash);226}227}228229/*230* Test that the caller is permitted to alias the output digest and source data231* buffer, and also modify the source data buffer after it has been used.232*/233static void test_hash_overlaps(struct kunit *test)234{235const size_t max_tested_len = TEST_BUF_LEN - HASH_SIZE;236struct HASH_CTX ctx;237u8 hash[HASH_SIZE];238239rand_bytes(test_buf, TEST_BUF_LEN);240241for (int i = 0; i < 100; i++) {242size_t len = rand_length(max_tested_len);243size_t offset = HASH_SIZE + rand_offset(max_tested_len - len);244bool left_end = rand_bool();245u8 *ovl_hash = left_end ? &test_buf[offset] :246&test_buf[offset + len - HASH_SIZE];247248HASH(&test_buf[offset], len, hash);249HASH(&test_buf[offset], len, ovl_hash);250KUNIT_ASSERT_MEMEQ_MSG(251test, hash, ovl_hash, HASH_SIZE,252"Overlap test 1 failed with len=%zu offset=%zu left_end=%d",253len, offset, left_end);254255/* Repeat the above test, but this time use init+update+final */256HASH(&test_buf[offset], len, hash);257HASH_INIT(&ctx);258HASH_UPDATE(&ctx, &test_buf[offset], len);259HASH_FINAL(&ctx, ovl_hash);260KUNIT_ASSERT_MEMEQ_MSG(261test, hash, ovl_hash, HASH_SIZE,262"Overlap test 2 failed with len=%zu offset=%zu left_end=%d",263len, offset, left_end);264265/* Test modifying the source data after it was used. */266HASH(&test_buf[offset], len, hash);267HASH_INIT(&ctx);268HASH_UPDATE(&ctx, &test_buf[offset], len);269rand_bytes(&test_buf[offset], len);270HASH_FINAL(&ctx, ovl_hash);271KUNIT_ASSERT_MEMEQ_MSG(272test, hash, ovl_hash, HASH_SIZE,273"Overlap test 3 failed with len=%zu offset=%zu left_end=%d",274len, offset, left_end);275}276}277278/*279* Test that if the same data is hashed at different alignments in memory, the280* results are the same.281*/282static void test_hash_alignment_consistency(struct kunit *test)283{284u8 hash1[128 + HASH_SIZE];285u8 hash2[128 + HASH_SIZE];286287for (int i = 0; i < 100; i++) {288size_t len = rand_length(TEST_BUF_LEN);289size_t data_offs1 = rand_offset(TEST_BUF_LEN - len);290size_t data_offs2 = rand_offset(TEST_BUF_LEN - len);291size_t hash_offs1 = rand_offset(128);292size_t hash_offs2 = rand_offset(128);293294rand_bytes(&test_buf[data_offs1], len);295HASH(&test_buf[data_offs1], len, &hash1[hash_offs1]);296memmove(&test_buf[data_offs2], &test_buf[data_offs1], len);297HASH(&test_buf[data_offs2], len, &hash2[hash_offs2]);298KUNIT_ASSERT_MEMEQ_MSG(299test, &hash1[hash_offs1], &hash2[hash_offs2], HASH_SIZE,300"Alignment consistency test failed with len=%zu data_offs=(%zu,%zu) hash_offs=(%zu,%zu)",301len, data_offs1, data_offs2, hash_offs1, hash_offs2);302}303}304305/* Test that HASH_FINAL zeroizes the context. */306static void test_hash_ctx_zeroization(struct kunit *test)307{308static const u8 zeroes[sizeof(struct HASH_CTX)];309struct HASH_CTX ctx;310311rand_bytes(test_buf, 128);312HASH_INIT(&ctx);313HASH_UPDATE(&ctx, test_buf, 128);314HASH_FINAL(&ctx, test_buf);315KUNIT_ASSERT_MEMEQ_MSG(test, &ctx, zeroes, sizeof(ctx),316"Hash context was not zeroized by finalization");317}318319#define IRQ_TEST_DATA_LEN 256320#define IRQ_TEST_NUM_BUFFERS 3 /* matches max concurrency level */321322struct hash_irq_test1_state {323u8 expected_hashes[IRQ_TEST_NUM_BUFFERS][HASH_SIZE];324atomic_t seqno;325};326327/*328* Compute the hash of one of the test messages and verify that it matches the329* expected hash from @state->expected_hashes. To increase the chance of330* detecting problems, cycle through multiple messages.331*/332static bool hash_irq_test1_func(void *state_)333{334struct hash_irq_test1_state *state = state_;335u32 i = (u32)atomic_inc_return(&state->seqno) % IRQ_TEST_NUM_BUFFERS;336u8 actual_hash[HASH_SIZE];337338HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN, actual_hash);339return memcmp(actual_hash, state->expected_hashes[i], HASH_SIZE) == 0;340}341342/*343* Test that if hashes are computed in task, softirq, and hardirq context344* concurrently, then all results are as expected.345*/346static void test_hash_interrupt_context_1(struct kunit *test)347{348struct hash_irq_test1_state state = {};349350/* Prepare some test messages and compute the expected hash of each. */351rand_bytes(test_buf, IRQ_TEST_NUM_BUFFERS * IRQ_TEST_DATA_LEN);352for (int i = 0; i < IRQ_TEST_NUM_BUFFERS; i++)353HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN,354state.expected_hashes[i]);355356kunit_run_irq_test(test, hash_irq_test1_func, 100000, &state);357}358359struct hash_irq_test2_hash_ctx {360struct HASH_CTX hash_ctx;361atomic_t in_use;362int offset;363int step;364};365366struct hash_irq_test2_state {367struct hash_irq_test2_hash_ctx ctxs[IRQ_TEST_NUM_BUFFERS];368u8 expected_hash[HASH_SIZE];369u16 update_lens[32];370int num_steps;371};372373static bool hash_irq_test2_func(void *state_)374{375struct hash_irq_test2_state *state = state_;376struct hash_irq_test2_hash_ctx *ctx;377bool ret = true;378379for (ctx = &state->ctxs[0]; ctx < &state->ctxs[ARRAY_SIZE(state->ctxs)];380ctx++) {381if (atomic_cmpxchg(&ctx->in_use, 0, 1) == 0)382break;383}384if (WARN_ON_ONCE(ctx == &state->ctxs[ARRAY_SIZE(state->ctxs)])) {385/*386* This should never happen, as the number of contexts is equal387* to the maximum concurrency level of kunit_run_irq_test().388*/389return false;390}391392if (ctx->step == 0) {393/* Init step */394HASH_INIT(&ctx->hash_ctx);395ctx->offset = 0;396ctx->step++;397} else if (ctx->step < state->num_steps - 1) {398/* Update step */399HASH_UPDATE(&ctx->hash_ctx, &test_buf[ctx->offset],400state->update_lens[ctx->step - 1]);401ctx->offset += state->update_lens[ctx->step - 1];402ctx->step++;403} else {404/* Final step */405u8 actual_hash[HASH_SIZE];406407if (WARN_ON_ONCE(ctx->offset != TEST_BUF_LEN))408ret = false;409HASH_FINAL(&ctx->hash_ctx, actual_hash);410if (memcmp(actual_hash, state->expected_hash, HASH_SIZE) != 0)411ret = false;412ctx->step = 0;413}414atomic_set_release(&ctx->in_use, 0);415return ret;416}417418/*419* Test that if hashes are computed in task, softirq, and hardirq context420* concurrently, *including doing different parts of the same incremental421* computation in different contexts*, then all results are as expected.422* Besides detecting bugs similar to those that test_hash_interrupt_context_1423* can detect, this test case can also detect bugs where hash function424* implementations don't correctly handle these mixed incremental computations.425*/426static void test_hash_interrupt_context_2(struct kunit *test)427{428struct hash_irq_test2_state *state;429int remaining = TEST_BUF_LEN;430431state = kunit_kzalloc(test, sizeof(*state), GFP_KERNEL);432KUNIT_ASSERT_NOT_NULL(test, state);433434rand_bytes(test_buf, TEST_BUF_LEN);435HASH(test_buf, TEST_BUF_LEN, state->expected_hash);436437/*438* Generate a list of update lengths to use. Ensure that it contains439* multiple entries but is limited to a maximum length.440*/441static_assert(TEST_BUF_LEN / 4096 > 1);442for (state->num_steps = 0;443state->num_steps < ARRAY_SIZE(state->update_lens) - 1 && remaining;444state->num_steps++) {445state->update_lens[state->num_steps] =446rand_length(min(remaining, 4096));447remaining -= state->update_lens[state->num_steps];448}449if (remaining)450state->update_lens[state->num_steps++] = remaining;451state->num_steps += 2; /* for init and final */452453kunit_run_irq_test(test, hash_irq_test2_func, 250000, state);454}455456#define UNKEYED_HASH_KUNIT_CASES \457KUNIT_CASE(test_hash_test_vectors), \458KUNIT_CASE(test_hash_all_lens_up_to_4096), \459KUNIT_CASE(test_hash_incremental_updates), \460KUNIT_CASE(test_hash_buffer_overruns), \461KUNIT_CASE(test_hash_overlaps), \462KUNIT_CASE(test_hash_alignment_consistency), \463KUNIT_CASE(test_hash_ctx_zeroization), \464KUNIT_CASE(test_hash_interrupt_context_1), \465KUNIT_CASE(test_hash_interrupt_context_2)466/* benchmark_hash is omitted so that the suites can put it last. */467468#ifdef HMAC469/*470* Test the corresponding HMAC variant.471*472* This test case is fairly short, since HMAC is just a simple C wrapper around473* the underlying unkeyed hash function, which is already well-tested by the474* other test cases. It's not useful to test things like data alignment or475* interrupt context again for HMAC, nor to have a long list of test vectors.476*477* Thus, just do a single consolidated test, which covers all data lengths up to478* 4096 bytes and all key lengths up to 292 bytes. For each data length, select479* a key length, generate the inputs from a seed, and compute the HMAC value.480* Concatenate all these HMAC values together, and compute the HMAC of that.481* Verify that value. If this fails, then the HMAC implementation is wrong.482* This won't show which specific input failed, but that should be fine. Any483* failure would likely be non-input-specific or also show in the unkeyed tests.484*/485static void test_hmac(struct kunit *test)486{487static const u8 zeroes[sizeof(struct HMAC_CTX)];488u8 *raw_key;489struct HMAC_KEY key;490struct HMAC_CTX ctx;491u8 mac[HASH_SIZE];492u8 mac2[HASH_SIZE];493494static_assert(TEST_BUF_LEN >= 4096 + 293);495rand_bytes_seeded_from_len(test_buf, 4096);496raw_key = &test_buf[4096];497498rand_bytes_seeded_from_len(raw_key, 32);499HMAC_PREPAREKEY(&key, raw_key, 32);500HMAC_INIT(&ctx, &key);501for (size_t data_len = 0; data_len <= 4096; data_len++) {502/*503* Cycle through key lengths as well. Somewhat arbitrarily go504* up to 293, which is somewhat larger than the largest hash505* block size (which is the size at which the key starts being506* hashed down to one block); going higher would not be useful.507* To reduce correlation with data_len, use a prime number here.508*/509size_t key_len = data_len % 293;510511HMAC_UPDATE(&ctx, test_buf, data_len);512513rand_bytes_seeded_from_len(raw_key, key_len);514HMAC_USINGRAWKEY(raw_key, key_len, test_buf, data_len, mac);515HMAC_UPDATE(&ctx, mac, HASH_SIZE);516517/* Verify that HMAC() is consistent with HMAC_USINGRAWKEY(). */518HMAC_PREPAREKEY(&key, raw_key, key_len);519HMAC(&key, test_buf, data_len, mac2);520KUNIT_ASSERT_MEMEQ_MSG(521test, mac, mac2, HASH_SIZE,522"HMAC gave different results with raw and prepared keys");523}524HMAC_FINAL(&ctx, mac);525KUNIT_EXPECT_MEMEQ_MSG(test, mac, hmac_testvec_consolidated, HASH_SIZE,526"HMAC gave wrong result");527KUNIT_EXPECT_MEMEQ_MSG(test, &ctx, zeroes, sizeof(ctx),528"HMAC context was not zeroized by finalization");529}530#define HASH_KUNIT_CASES UNKEYED_HASH_KUNIT_CASES, KUNIT_CASE(test_hmac)531#else532#define HASH_KUNIT_CASES UNKEYED_HASH_KUNIT_CASES533#endif534535/* Benchmark the hash function on various data lengths. */536static void benchmark_hash(struct kunit *test)537{538static const size_t lens_to_test[] = {5391, 16, 64, 127, 128, 200, 256,540511, 512, 1024, 3173, 4096, 16384,541};542u8 hash[HASH_SIZE];543544if (!IS_ENABLED(CONFIG_CRYPTO_LIB_BENCHMARK))545kunit_skip(test, "not enabled");546547/* Warm-up */548for (size_t i = 0; i < 10000000; i += TEST_BUF_LEN)549HASH(test_buf, TEST_BUF_LEN, hash);550551for (size_t i = 0; i < ARRAY_SIZE(lens_to_test); i++) {552size_t len = lens_to_test[i];553/* The '+ 128' tries to account for per-message overhead. */554size_t num_iters = 10000000 / (len + 128);555u64 t;556557KUNIT_ASSERT_LE(test, len, TEST_BUF_LEN);558preempt_disable();559t = ktime_get_ns();560for (size_t j = 0; j < num_iters; j++)561HASH(test_buf, len, hash);562t = ktime_get_ns() - t;563preempt_enable();564kunit_info(test, "len=%zu: %llu MB/s", len,565div64_u64((u64)len * num_iters * 1000, t ?: 1));566}567}568569570