Merge tag 'crc-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux

Pull CRC updates from Eric Biggers:
 "Update crc_kunit to test the CRC functions in softirq and hardirq
  contexts, similar to what the lib/crypto/ KUnit tests do. Move the
  helper function needed to do this into a common header.

  This is useful mainly to test fallback code paths for when
  FPU/SIMD/vector registers are unusable"

* tag 'crc-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux:
  Documentation/staging: Fix typo and incorrect citation in crc32.rst
  lib/crc: Drop inline from all *_mod_init_arch() functions
  lib/crc: Use underlying functions instead of crypto_simd_usable()
  lib/crc: crc_kunit: Test CRC computation in interrupt contexts
  kunit, lib/crypto: Move run_irq_test() to common header
This commit is contained in:
Linus Torvalds
2025-09-29 15:36:42 -07:00
17 changed files with 219 additions and 165 deletions

View File

@@ -34,7 +34,7 @@ do it in the right order, matching the endianness.
Just like with ordinary division, you proceed one digit (bit) at a time.
Each step of the division you take one more digit (bit) of the dividend
and append it to the current remainder. Then you figure out the
appropriate multiple of the divisor to subtract to being the remainder
appropriate multiple of the divisor to subtract to bring the remainder
back into range. In binary, this is easy - it has to be either 0 or 1,
and to make the XOR cancel, it's just a copy of bit 32 of the remainder.
@@ -116,7 +116,7 @@ for any fractional bytes at the end.
To reduce the number of conditional branches, software commonly uses
the byte-at-a-time table method, popularized by Dilip V. Sarwate,
"Computation of Cyclic Redundancy Checks via Table Look-Up", Comm. ACM
v.31 no.8 (August 1998) p. 1008-1013.
v.31 no.8 (August 1988) p. 1008-1013.
Here, rather than just shifting one bit of the remainder to decide
in the correct multiple to subtract, we can shift a byte at a time.

View File

@@ -0,0 +1,129 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Helper function for testing code in interrupt contexts
*
* Copyright 2025 Google LLC
*/
#ifndef _KUNIT_RUN_IN_IRQ_CONTEXT_H
#define _KUNIT_RUN_IN_IRQ_CONTEXT_H
#include <kunit/test.h>
#include <linux/timekeeping.h>
#include <linux/hrtimer.h>
#include <linux/workqueue.h>
#define KUNIT_IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5)
struct kunit_irq_test_state {
bool (*func)(void *test_specific_state);
void *test_specific_state;
bool task_func_reported_failure;
bool hardirq_func_reported_failure;
bool softirq_func_reported_failure;
unsigned long hardirq_func_calls;
unsigned long softirq_func_calls;
struct hrtimer timer;
struct work_struct bh_work;
};
static enum hrtimer_restart kunit_irq_test_timer_func(struct hrtimer *timer)
{
struct kunit_irq_test_state *state =
container_of(timer, typeof(*state), timer);
WARN_ON_ONCE(!in_hardirq());
state->hardirq_func_calls++;
if (!state->func(state->test_specific_state))
state->hardirq_func_reported_failure = true;
hrtimer_forward_now(&state->timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL);
queue_work(system_bh_wq, &state->bh_work);
return HRTIMER_RESTART;
}
static void kunit_irq_test_bh_work_func(struct work_struct *work)
{
struct kunit_irq_test_state *state =
container_of(work, typeof(*state), bh_work);
WARN_ON_ONCE(!in_serving_softirq());
state->softirq_func_calls++;
if (!state->func(state->test_specific_state))
state->softirq_func_reported_failure = true;
}
/*
* Helper function which repeatedly runs the given @func in task, softirq, and
* hardirq context concurrently, and reports a failure to KUnit if any
* invocation of @func in any context returns false. @func is passed
* @test_specific_state as its argument. At most 3 invocations of @func will
* run concurrently: one in each of task, softirq, and hardirq context.
*
* The main purpose of this interrupt context testing is to validate fallback
* code paths that run in contexts where the normal code path cannot be used,
* typically due to the FPU or vector registers already being in-use in kernel
* mode. These code paths aren't covered when the test code is executed only by
* the KUnit test runner thread in task context. The reason for the concurrency
* is because merely using hardirq context is not sufficient to reach a fallback
* code path on some architectures; the hardirq actually has to occur while the
* FPU or vector unit was already in-use in kernel mode.
*
* Another purpose of this testing is to detect issues with the architecture's
* irq_fpu_usable() and kernel_fpu_begin/end() or equivalent functions,
* especially in softirq context when the softirq may have interrupted a task
* already using kernel-mode FPU or vector (if the arch didn't prevent that).
* Crypto functions are often executed in softirqs, so this is important.
*/
static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *),
int max_iterations,
void *test_specific_state)
{
struct kunit_irq_test_state state = {
.func = func,
.test_specific_state = test_specific_state,
};
unsigned long end_jiffies;
/*
* Set up a hrtimer (the way we access hardirq context) and a work
* struct for the BH workqueue (the way we access softirq context).
*/
hrtimer_setup_on_stack(&state.timer, kunit_irq_test_timer_func,
CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
INIT_WORK_ONSTACK(&state.bh_work, kunit_irq_test_bh_work_func);
/* Run for up to max_iterations or 1 second, whichever comes first. */
end_jiffies = jiffies + HZ;
hrtimer_start(&state.timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL,
HRTIMER_MODE_REL_HARD);
for (int i = 0; i < max_iterations && !time_after(jiffies, end_jiffies);
i++) {
if (!func(test_specific_state))
state.task_func_reported_failure = true;
}
/* Cancel the timer and work. */
hrtimer_cancel(&state.timer);
flush_work(&state.bh_work);
/* Sanity check: the timer and BH functions should have been run. */
KUNIT_EXPECT_GT_MSG(test, state.hardirq_func_calls, 0,
"Timer function was not called");
KUNIT_EXPECT_GT_MSG(test, state.softirq_func_calls, 0,
"BH work function was not called");
/* Check for incorrect hash values reported from any context. */
KUNIT_EXPECT_FALSE_MSG(
test, state.task_func_reported_failure,
"Incorrect hash values reported from task context");
KUNIT_EXPECT_FALSE_MSG(
test, state.hardirq_func_reported_failure,
"Incorrect hash values reported from hardirq context");
KUNIT_EXPECT_FALSE_MSG(
test, state.softirq_func_reported_failure,
"Incorrect hash values reported from softirq context");
}
#endif /* _KUNIT_RUN_IN_IRQ_CONTEXT_H */

View File

@@ -5,8 +5,6 @@
* Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
#include <crypto/internal/simd.h>
#include <asm/neon.h>
#include <asm/simd.h>
@@ -23,7 +21,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
{
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) {
if (static_branch_likely(&have_pmull)) {
if (crypto_simd_usable()) {
if (likely(may_use_simd())) {
kernel_neon_begin();
crc = crc_t10dif_pmull64(crc, data, length);
kernel_neon_end();
@@ -31,7 +29,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
}
} else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE &&
static_branch_likely(&have_neon) &&
crypto_simd_usable()) {
likely(may_use_simd())) {
u8 buf[16] __aligned(16);
kernel_neon_begin();
@@ -45,7 +43,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
}
#define crc_t10dif_mod_init_arch crc_t10dif_mod_init_arch
static inline void crc_t10dif_mod_init_arch(void)
static void crc_t10dif_mod_init_arch(void)
{
if (elf_hwcap & HWCAP_NEON) {
static_branch_enable(&have_neon);

View File

@@ -7,8 +7,6 @@
#include <linux/cpufeature.h>
#include <crypto/internal/simd.h>
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
@@ -34,7 +32,7 @@ static inline u32 crc32_le_scalar(u32 crc, const u8 *p, size_t len)
static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
{
if (len >= PMULL_MIN_LEN + 15 &&
static_branch_likely(&have_pmull) && crypto_simd_usable()) {
static_branch_likely(&have_pmull) && likely(may_use_simd())) {
size_t n = -(uintptr_t)p & 15;
/* align p to 16-byte boundary */
@@ -63,7 +61,7 @@ static inline u32 crc32c_scalar(u32 crc, const u8 *p, size_t len)
static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
if (len >= PMULL_MIN_LEN + 15 &&
static_branch_likely(&have_pmull) && crypto_simd_usable()) {
static_branch_likely(&have_pmull) && likely(may_use_simd())) {
size_t n = -(uintptr_t)p & 15;
/* align p to 16-byte boundary */
@@ -85,7 +83,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
#define crc32_be_arch crc32_be_base /* not implemented on this arch */
#define crc32_mod_init_arch crc32_mod_init_arch
static inline void crc32_mod_init_arch(void)
static void crc32_mod_init_arch(void)
{
if (elf_hwcap2 & HWCAP2_CRC32)
static_branch_enable(&have_crc32);

View File

@@ -7,8 +7,6 @@
#include <linux/cpufeature.h>
#include <crypto/internal/simd.h>
#include <asm/neon.h>
#include <asm/simd.h>
@@ -25,7 +23,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
{
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) {
if (static_branch_likely(&have_pmull)) {
if (crypto_simd_usable()) {
if (likely(may_use_simd())) {
kernel_neon_begin();
crc = crc_t10dif_pmull_p64(crc, data, length);
kernel_neon_end();
@@ -33,7 +31,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
}
} else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE &&
static_branch_likely(&have_asimd) &&
crypto_simd_usable()) {
likely(may_use_simd())) {
u8 buf[16];
kernel_neon_begin();
@@ -47,7 +45,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
}
#define crc_t10dif_mod_init_arch crc_t10dif_mod_init_arch
static inline void crc_t10dif_mod_init_arch(void)
static void crc_t10dif_mod_init_arch(void)
{
if (cpu_have_named_feature(ASIMD)) {
static_branch_enable(&have_asimd);

View File

@@ -5,8 +5,6 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/simd.h>
// The minimum input length to consider the 4-way interleaved code path
static const size_t min_len = 1024;
@@ -23,7 +21,8 @@ static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
return crc32_le_base(crc, p, len);
if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
if (len >= min_len && cpu_have_named_feature(PMULL) &&
likely(may_use_simd())) {
kernel_neon_begin();
crc = crc32_le_arm64_4way(crc, p, len);
kernel_neon_end();
@@ -43,7 +42,8 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
return crc32c_base(crc, p, len);
if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
if (len >= min_len && cpu_have_named_feature(PMULL) &&
likely(may_use_simd())) {
kernel_neon_begin();
crc = crc32c_le_arm64_4way(crc, p, len);
kernel_neon_end();
@@ -63,7 +63,8 @@ static inline u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
return crc32_be_base(crc, p, len);
if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
if (len >= min_len && cpu_have_named_feature(PMULL) &&
likely(may_use_simd())) {
kernel_neon_begin();
crc = crc32_be_arm64_4way(crc, p, len);
kernel_neon_end();

View File

@@ -101,7 +101,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
#define crc32_be_arch crc32_be_base /* not implemented on this arch */
#define crc32_mod_init_arch crc32_mod_init_arch
static inline void crc32_mod_init_arch(void)
static void crc32_mod_init_arch(void)
{
if (cpu_has_crc32)
static_branch_enable(&have_crc32);

View File

@@ -148,7 +148,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
#define crc32_be_arch crc32_be_base /* not implemented on this arch */
#define crc32_mod_init_arch crc32_mod_init_arch
static inline void crc32_mod_init_arch(void)
static void crc32_mod_init_arch(void)
{
if (cpu_have_feature(cpu_feature(MIPS_CRC32)))
static_branch_enable(&have_crc32);

View File

@@ -6,8 +6,8 @@
* [based on crc32c-vpmsum_glue.c]
*/
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
#include <linux/jump_label.h>
#include <linux/preempt.h>
@@ -29,7 +29,8 @@ static inline u16 crc_t10dif_arch(u16 crci, const u8 *p, size_t len)
u32 crc = crci;
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) ||
!static_branch_likely(&have_vec_crypto) || !crypto_simd_usable())
!static_branch_likely(&have_vec_crypto) ||
unlikely(!may_use_simd()))
return crc_t10dif_generic(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) {
@@ -61,7 +62,7 @@ static inline u16 crc_t10dif_arch(u16 crci, const u8 *p, size_t len)
}
#define crc_t10dif_mod_init_arch crc_t10dif_mod_init_arch
static inline void crc_t10dif_mod_init_arch(void)
static void crc_t10dif_mod_init_arch(void)
{
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO))

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
#include <linux/jump_label.h>
#include <linux/preempt.h>
@@ -24,7 +24,8 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
unsigned int tail;
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) ||
!static_branch_likely(&have_vec_crypto) || !crypto_simd_usable())
!static_branch_likely(&have_vec_crypto) ||
unlikely(!may_use_simd()))
return crc32c_base(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) {
@@ -54,7 +55,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
}
#define crc32_mod_init_arch crc32_mod_init_arch
static inline void crc32_mod_init_arch(void)
static void crc32_mod_init_arch(void)
{
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO))

View File

@@ -44,7 +44,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *data, size_t len)
}
#define crc32_mod_init_arch crc32_mod_init_arch
static inline void crc32_mod_init_arch(void)
static void crc32_mod_init_arch(void)
{
unsigned long cfr;

View File

@@ -6,6 +6,7 @@
*
* Author: Eric Biggers <ebiggers@google.com>
*/
#include <kunit/run-in-irq-context.h>
#include <kunit/test.h>
#include <linux/crc7.h>
#include <linux/crc16.h>
@@ -141,6 +142,54 @@ static size_t generate_random_length(size_t max_length)
return len % (max_length + 1);
}
#define IRQ_TEST_DATA_LEN 512
#define IRQ_TEST_NUM_BUFFERS 3 /* matches max concurrency level */
struct crc_irq_test_state {
const struct crc_variant *v;
u64 initial_crc;
u64 expected_crcs[IRQ_TEST_NUM_BUFFERS];
atomic_t seqno;
};
/*
* Compute the CRC of one of the test messages and verify that it matches the
* expected CRC from @state->expected_crcs. To increase the chance of detecting
* problems, cycle through multiple messages.
*/
static bool crc_irq_test_func(void *state_)
{
struct crc_irq_test_state *state = state_;
const struct crc_variant *v = state->v;
u32 i = (u32)atomic_inc_return(&state->seqno) % IRQ_TEST_NUM_BUFFERS;
u64 actual_crc = v->func(state->initial_crc,
&test_buffer[i * IRQ_TEST_DATA_LEN],
IRQ_TEST_DATA_LEN);
return actual_crc == state->expected_crcs[i];
}
/*
* Test that if CRCs are computed in task, softirq, and hardirq context
* concurrently, then all results are as expected.
*/
static void crc_interrupt_context_test(struct kunit *test,
const struct crc_variant *v)
{
struct crc_irq_test_state state = {
.v = v,
.initial_crc = generate_random_initial_crc(v),
};
for (int i = 0; i < IRQ_TEST_NUM_BUFFERS; i++) {
state.expected_crcs[i] = crc_ref(
v, state.initial_crc,
&test_buffer[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN);
}
kunit_run_irq_test(test, crc_irq_test_func, 100000, &state);
}
/* Test that v->func gives the same CRCs as a reference implementation. */
static void crc_test(struct kunit *test, const struct crc_variant *v)
{
@@ -149,7 +198,6 @@ static void crc_test(struct kunit *test, const struct crc_variant *v)
for (i = 0; i < CRC_KUNIT_NUM_TEST_ITERS; i++) {
u64 init_crc, expected_crc, actual_crc;
size_t len, offset;
bool nosimd;
init_crc = generate_random_initial_crc(v);
len = generate_random_length(CRC_KUNIT_MAX_LEN);
@@ -168,22 +216,18 @@ static void crc_test(struct kunit *test, const struct crc_variant *v)
/* Refresh the data occasionally. */
prandom_bytes_state(&rng, &test_buffer[offset], len);
nosimd = rand32() % 8 == 0;
/*
* Compute the CRC, and verify that it equals the CRC computed
* by a simple bit-at-a-time reference implementation.
*/
expected_crc = crc_ref(v, init_crc, &test_buffer[offset], len);
if (nosimd)
local_irq_disable();
actual_crc = v->func(init_crc, &test_buffer[offset], len);
if (nosimd)
local_irq_enable();
KUNIT_EXPECT_EQ_MSG(test, expected_crc, actual_crc,
"Wrong result with len=%zu offset=%zu nosimd=%d",
len, offset, nosimd);
"Wrong result with len=%zu offset=%zu",
len, offset);
}
crc_interrupt_context_test(test, v);
}
static __always_inline void

View File

@@ -12,7 +12,6 @@
#include <asm/cpufeatures.h>
#include <asm/simd.h>
#include <crypto/internal/simd.h>
#include <linux/static_call.h>
#include "crc-pclmul-consts.h"
@@ -57,7 +56,7 @@ static inline bool have_avx512(void)
#define CRC_PCLMUL(crc, p, len, prefix, consts, have_pclmulqdq) \
do { \
if ((len) >= 16 && static_branch_likely(&(have_pclmulqdq)) && \
crypto_simd_usable()) { \
likely(irq_fpu_usable())) { \
const void *consts_ptr; \
\
consts_ptr = (consts).fold_across_128_bits_consts; \

View File

@@ -19,7 +19,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *p, size_t len)
}
#define crc_t10dif_mod_init_arch crc_t10dif_mod_init_arch
static inline void crc_t10dif_mod_init_arch(void)
static void crc_t10dif_mod_init_arch(void)
{
if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
static_branch_enable(&have_pclmulqdq);

View File

@@ -44,7 +44,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
return crc32c_base(crc, p, len);
if (IS_ENABLED(CONFIG_X86_64) && len >= CRC32C_PCLMUL_BREAKEVEN &&
static_branch_likely(&have_pclmulqdq) && crypto_simd_usable()) {
static_branch_likely(&have_pclmulqdq) && likely(irq_fpu_usable())) {
/*
* Long length, the vector registers are usable, and the CPU is
* 64-bit and supports both CRC32 and PCLMULQDQ instructions.
@@ -106,7 +106,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
#define crc32_be_arch crc32_be_base /* not implemented on this arch */
#define crc32_mod_init_arch crc32_mod_init_arch
static inline void crc32_mod_init_arch(void)
static void crc32_mod_init_arch(void)
{
if (boot_cpu_has(X86_FEATURE_XMM4_2))
static_branch_enable(&have_crc32);

View File

@@ -27,7 +27,7 @@ static inline u64 crc64_nvme_arch(u64 crc, const u8 *p, size_t len)
}
#define crc64_mod_init_arch crc64_mod_init_arch
static inline void crc64_mod_init_arch(void)
static void crc64_mod_init_arch(void)
{
if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
static_branch_enable(&have_pclmulqdq);

View File

@@ -5,11 +5,9 @@
*
* Copyright 2025 Google LLC
*/
#include <kunit/run-in-irq-context.h>
#include <kunit/test.h>
#include <linux/hrtimer.h>
#include <linux/timekeeping.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
/* test_buf is a guarded buffer, i.e. &test_buf[TEST_BUF_LEN] is not mapped. */
#define TEST_BUF_LEN 16384
@@ -319,119 +317,6 @@ static void test_hash_ctx_zeroization(struct kunit *test)
"Hash context was not zeroized by finalization");
}
#define IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5)
struct hash_irq_test_state {
bool (*func)(void *test_specific_state);
void *test_specific_state;
bool task_func_reported_failure;
bool hardirq_func_reported_failure;
bool softirq_func_reported_failure;
unsigned long hardirq_func_calls;
unsigned long softirq_func_calls;
struct hrtimer timer;
struct work_struct bh_work;
};
static enum hrtimer_restart hash_irq_test_timer_func(struct hrtimer *timer)
{
struct hash_irq_test_state *state =
container_of(timer, typeof(*state), timer);
WARN_ON_ONCE(!in_hardirq());
state->hardirq_func_calls++;
if (!state->func(state->test_specific_state))
state->hardirq_func_reported_failure = true;
hrtimer_forward_now(&state->timer, IRQ_TEST_HRTIMER_INTERVAL);
queue_work(system_bh_wq, &state->bh_work);
return HRTIMER_RESTART;
}
static void hash_irq_test_bh_work_func(struct work_struct *work)
{
struct hash_irq_test_state *state =
container_of(work, typeof(*state), bh_work);
WARN_ON_ONCE(!in_serving_softirq());
state->softirq_func_calls++;
if (!state->func(state->test_specific_state))
state->softirq_func_reported_failure = true;
}
/*
* Helper function which repeatedly runs the given @func in task, softirq, and
* hardirq context concurrently, and reports a failure to KUnit if any
* invocation of @func in any context returns false. @func is passed
* @test_specific_state as its argument. At most 3 invocations of @func will
* run concurrently: one in each of task, softirq, and hardirq context.
*
* The main purpose of this interrupt context testing is to validate fallback
* code paths that run in contexts where the normal code path cannot be used,
* typically due to the FPU or vector registers already being in-use in kernel
* mode. These code paths aren't covered when the test code is executed only by
* the KUnit test runner thread in task context. The reason for the concurrency
* is because merely using hardirq context is not sufficient to reach a fallback
* code path on some architectures; the hardirq actually has to occur while the
* FPU or vector unit was already in-use in kernel mode.
*
* Another purpose of this testing is to detect issues with the architecture's
* irq_fpu_usable() and kernel_fpu_begin/end() or equivalent functions,
* especially in softirq context when the softirq may have interrupted a task
* already using kernel-mode FPU or vector (if the arch didn't prevent that).
* Crypto functions are often executed in softirqs, so this is important.
*/
static void run_irq_test(struct kunit *test, bool (*func)(void *),
int max_iterations, void *test_specific_state)
{
struct hash_irq_test_state state = {
.func = func,
.test_specific_state = test_specific_state,
};
unsigned long end_jiffies;
/*
* Set up a hrtimer (the way we access hardirq context) and a work
* struct for the BH workqueue (the way we access softirq context).
*/
hrtimer_setup_on_stack(&state.timer, hash_irq_test_timer_func,
CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
INIT_WORK_ONSTACK(&state.bh_work, hash_irq_test_bh_work_func);
/* Run for up to max_iterations or 1 second, whichever comes first. */
end_jiffies = jiffies + HZ;
hrtimer_start(&state.timer, IRQ_TEST_HRTIMER_INTERVAL,
HRTIMER_MODE_REL_HARD);
for (int i = 0; i < max_iterations && !time_after(jiffies, end_jiffies);
i++) {
if (!func(test_specific_state))
state.task_func_reported_failure = true;
}
/* Cancel the timer and work. */
hrtimer_cancel(&state.timer);
flush_work(&state.bh_work);
/* Sanity check: the timer and BH functions should have been run. */
KUNIT_EXPECT_GT_MSG(test, state.hardirq_func_calls, 0,
"Timer function was not called");
KUNIT_EXPECT_GT_MSG(test, state.softirq_func_calls, 0,
"BH work function was not called");
/* Check for incorrect hash values reported from any context. */
KUNIT_EXPECT_FALSE_MSG(
test, state.task_func_reported_failure,
"Incorrect hash values reported from task context");
KUNIT_EXPECT_FALSE_MSG(
test, state.hardirq_func_reported_failure,
"Incorrect hash values reported from hardirq context");
KUNIT_EXPECT_FALSE_MSG(
test, state.softirq_func_reported_failure,
"Incorrect hash values reported from softirq context");
}
#define IRQ_TEST_DATA_LEN 256
#define IRQ_TEST_NUM_BUFFERS 3 /* matches max concurrency level */
@@ -469,7 +354,7 @@ static void test_hash_interrupt_context_1(struct kunit *test)
HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN,
state.expected_hashes[i]);
run_irq_test(test, hash_irq_test1_func, 100000, &state);
kunit_run_irq_test(test, hash_irq_test1_func, 100000, &state);
}
struct hash_irq_test2_hash_ctx {
@@ -500,7 +385,7 @@ static bool hash_irq_test2_func(void *state_)
if (WARN_ON_ONCE(ctx == &state->ctxs[ARRAY_SIZE(state->ctxs)])) {
/*
* This should never happen, as the number of contexts is equal
* to the maximum concurrency level of run_irq_test().
* to the maximum concurrency level of kunit_run_irq_test().
*/
return false;
}
@@ -566,7 +451,7 @@ static void test_hash_interrupt_context_2(struct kunit *test)
state->update_lens[state->num_steps++] = remaining;
state->num_steps += 2; /* for init and final */
run_irq_test(test, hash_irq_test2_func, 250000, state);
kunit_run_irq_test(test, hash_irq_test2_func, 250000, state);
}
#define UNKEYED_HASH_KUNIT_CASES \