Merge patch series "RISC-V: Detect and report speed of unaligned vector accesses"

Charlie Jenkins <charlie@rivosinc.com> says:

Adds support for detecting and reporting the speed of unaligned vector
accesses on RISC-V CPUs. Adds vec_misaligned_speed key to the hwprobe
adds Zicclsm to cpufeature and fixes the check for scalar unaligned
emulated all CPUs. The vec_misaligned_speed key keeps the same format
as the scalar unaligned access speed key.

This set does not emulate unaligned vector accesses on CPUs that do not
support them. Only reports if userspace can run them and speed of
unaligned vector accesses if supported.

* b4-shazam-merge:
  RISC-V: hwprobe: Document unaligned vector perf key
  RISC-V: Report vector unaligned access speed hwprobe
  RISC-V: Detect unaligned vector accesses supported
  RISC-V: Replace RISCV_MISALIGNED with RISCV_SCALAR_MISALIGNED
  RISC-V: Scalar unaligned access emulated on hotplug CPUs
  RISC-V: Check scalar unaligned access on all CPUs

Link: https://lore.kernel.org/r/20241017-jesse_unaligned_vector-v10-0-5b33500160f8@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
Palmer Dabbelt
2024-10-18 12:38:36 -07:00
15 changed files with 476 additions and 40 deletions

View File

@@ -274,3 +274,19 @@ The following keys are defined:
represent the highest userspace virtual address usable.
* :c:macro:`RISCV_HWPROBE_KEY_TIME_CSR_FREQ`: Frequency (in Hz) of `time CSR`.
* :c:macro:`RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF`: An enum value describing the
performance of misaligned vector accesses on the selected set of processors.
* :c:macro:`RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN`: The performance of misaligned
vector accesses is unknown.
* :c:macro:`RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW`: 32-bit misaligned accesses using vector
registers are slower than the equivalent quantity of byte accesses via vector registers.
Misaligned accesses may be supported directly in hardware, or trapped and emulated by software.
* :c:macro:`RISCV_HWPROBE_MISALIGNED_VECTOR_FAST`: 32-bit misaligned accesses using vector
registers are faster than the equivalent quantity of byte accesses via vector registers.
* :c:macro:`RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED`: Misaligned vector accesses are
not supported at all and will generate a misaligned address fault.

View File

@@ -786,10 +786,24 @@ config THREAD_SIZE_ORDER
config RISCV_MISALIGNED
bool
help
Embed support for detecting and emulating misaligned
scalar or vector loads and stores.
config RISCV_SCALAR_MISALIGNED
bool
select RISCV_MISALIGNED
select SYSCTL_ARCH_UNALIGN_ALLOW
help
Embed support for emulating misaligned loads and stores.
config RISCV_VECTOR_MISALIGNED
bool
select RISCV_MISALIGNED
depends on RISCV_ISA_V
help
Enable detecting support for vector misaligned loads and stores.
choice
prompt "Unaligned Accesses Support"
default RISCV_PROBE_UNALIGNED_ACCESS
@@ -801,7 +815,7 @@ choice
config RISCV_PROBE_UNALIGNED_ACCESS
bool "Probe for hardware unaligned access support"
select RISCV_MISALIGNED
select RISCV_SCALAR_MISALIGNED
help
During boot, the kernel will run a series of tests to determine the
speed of unaligned accesses. This probing will dynamically determine
@@ -812,7 +826,7 @@ config RISCV_PROBE_UNALIGNED_ACCESS
config RISCV_EMULATED_UNALIGNED_ACCESS
bool "Emulate unaligned access where system support is missing"
select RISCV_MISALIGNED
select RISCV_SCALAR_MISALIGNED
help
If unaligned memory accesses trap into the kernel as they are not
supported by the system, the kernel will emulate the unaligned
@@ -841,6 +855,46 @@ config RISCV_EFFICIENT_UNALIGNED_ACCESS
endchoice
choice
prompt "Vector unaligned Accesses Support"
depends on RISCV_ISA_V
default RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
help
This determines the level of support for vector unaligned accesses. This
information is used by the kernel to perform optimizations. It is also
exposed to user space via the hwprobe syscall. The hardware will be
probed at boot by default.
config RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
bool "Probe speed of vector unaligned accesses"
select RISCV_VECTOR_MISALIGNED
depends on RISCV_ISA_V
help
During boot, the kernel will run a series of tests to determine the
speed of vector unaligned accesses if they are supported. This probing
will dynamically determine the speed of vector unaligned accesses on
the underlying system if they are supported.
config RISCV_SLOW_VECTOR_UNALIGNED_ACCESS
bool "Assume the system supports slow vector unaligned memory accesses"
depends on NONPORTABLE
help
Assume that the system supports slow vector unaligned memory accesses. The
kernel and userspace programs may not be able to run at all on systems
that do not support unaligned memory accesses.
config RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
bool "Assume the system supports fast vector unaligned memory accesses"
depends on NONPORTABLE
help
Assume that the system supports fast vector unaligned memory accesses. When
enabled, this option improves the performance of the kernel on such
systems. However, the kernel and userspace programs will run much more
slowly, or will not be able to run at all, on systems that do not
support efficient unaligned memory accesses.
endchoice
source "arch/riscv/Kconfig.vendor"
endmenu # "Platform type"

View File

@@ -8,6 +8,7 @@
#include <linux/bitmap.h>
#include <linux/jump_label.h>
#include <linux/workqueue.h>
#include <asm/hwcap.h>
#include <asm/alternative-macros.h>
#include <asm/errno.h>
@@ -58,8 +59,9 @@ void __init riscv_user_isa_enable(void);
#define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \
_RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
#if defined(CONFIG_RISCV_MISALIGNED)
bool check_unaligned_access_emulated_all_cpus(void);
#if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
void check_unaligned_access_emulated(struct work_struct *work __always_unused);
void unaligned_emulation_finish(void);
bool unaligned_ctl_available(void);
DECLARE_PER_CPU(long, misaligned_access_speed);
@@ -70,6 +72,12 @@ static inline bool unaligned_ctl_available(void)
}
#endif
bool check_vector_unaligned_access_emulated_all_cpus(void);
#if defined(CONFIG_RISCV_VECTOR_MISALIGNED)
void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused);
DECLARE_PER_CPU(long, vector_misaligned_access);
#endif
#if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
DECLARE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key);

View File

@@ -25,18 +25,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
void handle_page_fault(struct pt_regs *regs);
void handle_break(struct pt_regs *regs);
#ifdef CONFIG_RISCV_MISALIGNED
int handle_misaligned_load(struct pt_regs *regs);
int handle_misaligned_store(struct pt_regs *regs);
#else
static inline int handle_misaligned_load(struct pt_regs *regs)
{
return -1;
}
static inline int handle_misaligned_store(struct pt_regs *regs)
{
return -1;
}
#endif
#endif /* _ASM_RISCV_ENTRY_COMMON_H */

View File

@@ -8,7 +8,7 @@
#include <uapi/asm/hwprobe.h>
#define RISCV_HWPROBE_MAX_KEY 9
#define RISCV_HWPROBE_MAX_KEY 10
static inline bool riscv_hwprobe_key_is_valid(__s64 key)
{

View File

@@ -21,6 +21,7 @@
extern unsigned long riscv_v_vsize;
int riscv_v_setup_vsize(void);
bool insn_is_vector(u32 insn_buf);
bool riscv_v_first_use_handler(struct pt_regs *regs);
void kernel_vector_begin(void);
void kernel_vector_end(void);
@@ -268,6 +269,7 @@ struct pt_regs;
static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; }
static __always_inline bool has_vector(void) { return false; }
static __always_inline bool insn_is_vector(u32 insn_buf) { return false; }
static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }

View File

@@ -88,6 +88,11 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW 2
#define RISCV_HWPROBE_MISALIGNED_SCALAR_FAST 3
#define RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED 4
#define RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF 10
#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN 0
#define RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW 2
#define RISCV_HWPROBE_MISALIGNED_VECTOR_FAST 3
#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
/* Flags */

View File

@@ -70,7 +70,8 @@ obj-$(CONFIG_MMU) += vdso.o vdso/
obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o
obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o
obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o
obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o
obj-$(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS) += vec-copy-unaligned.o
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_FPU) += kernel_mode_fpu.o

View File

@@ -10,4 +10,9 @@
void __riscv_copy_words_unaligned(void *dst, const void *src, size_t size);
void __riscv_copy_bytes_unaligned(void *dst, const void *src, size_t size);
#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
void __riscv_copy_vec_words_unaligned(void *dst, const void *src, size_t size);
void __riscv_copy_vec_bytes_unaligned(void *dst, const void *src, size_t size);
#endif
#endif /* __RISCV_KERNEL_COPY_UNALIGNED_H */

View File

@@ -170,7 +170,7 @@ SYM_FUNC_END(__fstate_restore)
__access_func(f31)
#ifdef CONFIG_RISCV_MISALIGNED
#ifdef CONFIG_RISCV_SCALAR_MISALIGNED
/*
* Disable compressed instructions set to keep a constant offset between FP
@@ -224,4 +224,4 @@ SYM_FUNC_START(get_f64_reg)
fp_access_epilogue
SYM_FUNC_END(get_f64_reg)
#endif /* CONFIG_RISCV_MISALIGNED */
#endif /* CONFIG_RISCV_SCALAR_MISALIGNED */

View File

@@ -201,6 +201,43 @@ static u64 hwprobe_misaligned(const struct cpumask *cpus)
}
#endif
#ifdef CONFIG_RISCV_VECTOR_MISALIGNED
static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
{
int cpu;
u64 perf = -1ULL;
/* Return if supported or not even if speed wasn't probed */
for_each_cpu(cpu, cpus) {
int this_perf = per_cpu(vector_misaligned_access, cpu);
if (perf == -1ULL)
perf = this_perf;
if (perf != this_perf) {
perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
break;
}
}
if (perf == -1ULL)
return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
return perf;
}
#else
static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
{
if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS))
return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS))
return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
}
#endif
static void hwprobe_one_pair(struct riscv_hwprobe *pair,
const struct cpumask *cpus)
{
@@ -229,6 +266,10 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,
pair->value = hwprobe_misaligned(cpus);
break;
case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF:
pair->value = hwprobe_vec_misaligned(cpus);
break;
case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
pair->value = 0;
if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))

View File

@@ -16,6 +16,7 @@
#include <asm/entry-common.h>
#include <asm/hwprobe.h>
#include <asm/cpufeature.h>
#include <asm/vector.h>
#define INSN_MATCH_LB 0x3
#define INSN_MASK_LB 0x707f
@@ -322,12 +323,37 @@ union reg_data {
u64 data_u64;
};
static bool unaligned_ctl __read_mostly;
/* sysctl hooks */
int unaligned_enabled __read_mostly = 1; /* Enabled by default */
int handle_misaligned_load(struct pt_regs *regs)
#ifdef CONFIG_RISCV_VECTOR_MISALIGNED
static int handle_vector_misaligned_load(struct pt_regs *regs)
{
unsigned long epc = regs->epc;
unsigned long insn;
if (get_insn(regs, epc, &insn))
return -1;
/* Only return 0 when in check_vector_unaligned_access_emulated */
if (*this_cpu_ptr(&vector_misaligned_access) == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) {
*this_cpu_ptr(&vector_misaligned_access) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
regs->epc = epc + INSN_LEN(insn);
return 0;
}
/* If vector instruction we don't emulate it yet */
regs->epc = epc;
return -1;
}
#else
static int handle_vector_misaligned_load(struct pt_regs *regs)
{
return -1;
}
#endif
static int handle_scalar_misaligned_load(struct pt_regs *regs)
{
union reg_data val;
unsigned long epc = regs->epc;
@@ -435,7 +461,7 @@ int handle_misaligned_load(struct pt_regs *regs)
return 0;
}
int handle_misaligned_store(struct pt_regs *regs)
static int handle_scalar_misaligned_store(struct pt_regs *regs)
{
union reg_data val;
unsigned long epc = regs->epc;
@@ -526,11 +552,96 @@ int handle_misaligned_store(struct pt_regs *regs)
return 0;
}
static bool check_unaligned_access_emulated(int cpu)
int handle_misaligned_load(struct pt_regs *regs)
{
unsigned long epc = regs->epc;
unsigned long insn;
if (IS_ENABLED(CONFIG_RISCV_VECTOR_MISALIGNED)) {
if (get_insn(regs, epc, &insn))
return -1;
if (insn_is_vector(insn))
return handle_vector_misaligned_load(regs);
}
if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED))
return handle_scalar_misaligned_load(regs);
return -1;
}
int handle_misaligned_store(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED))
return handle_scalar_misaligned_store(regs);
return -1;
}
#ifdef CONFIG_RISCV_VECTOR_MISALIGNED
void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused)
{
long *mas_ptr = this_cpu_ptr(&vector_misaligned_access);
unsigned long tmp_var;
*mas_ptr = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
kernel_vector_begin();
/*
* In pre-13.0.0 versions of GCC, vector registers cannot appear in
* the clobber list. This inline asm clobbers v0, but since we do not
* currently build the kernel with V enabled, the v0 clobber arg is not
* needed (as the compiler will not emit vector code itself). If the kernel
* is changed to build with V enabled, the clobber arg will need to be
* added here.
*/
__asm__ __volatile__ (
".balign 4\n\t"
".option push\n\t"
".option arch, +zve32x\n\t"
" vsetivli zero, 1, e16, m1, ta, ma\n\t" // Vectors of 16b
" vle16.v v0, (%[ptr])\n\t" // Load bytes
".option pop\n\t"
: : [ptr] "r" ((u8 *)&tmp_var + 1));
kernel_vector_end();
}
bool check_vector_unaligned_access_emulated_all_cpus(void)
{
int cpu;
if (!has_vector()) {
for_each_online_cpu(cpu)
per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
return false;
}
schedule_on_each_cpu(check_vector_unaligned_access_emulated);
for_each_online_cpu(cpu)
if (per_cpu(vector_misaligned_access, cpu)
== RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN)
return false;
return true;
}
#else
bool check_vector_unaligned_access_emulated_all_cpus(void)
{
return false;
}
#endif
#ifdef CONFIG_RISCV_SCALAR_MISALIGNED
static bool unaligned_ctl __read_mostly;
void check_unaligned_access_emulated(struct work_struct *work __always_unused)
{
int cpu = smp_processor_id();
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
unsigned long tmp_var, tmp_val;
bool misaligned_emu_detected;
*mas_ptr = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
@@ -538,19 +649,16 @@ static bool check_unaligned_access_emulated(int cpu)
" "REG_L" %[tmp], 1(%[ptr])\n"
: [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED);
/*
* If unaligned_ctl is already set, this means that we detected that all
* CPUS uses emulated misaligned access at boot time. If that changed
* when hotplugging the new cpu, this is something we don't handle.
*/
if (unlikely(unaligned_ctl && !misaligned_emu_detected)) {
if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) {
pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
while (true)
cpu_relax();
}
return misaligned_emu_detected;
}
bool check_unaligned_access_emulated_all_cpus(void)
@@ -562,8 +670,11 @@ bool check_unaligned_access_emulated_all_cpus(void)
* accesses emulated since tasks requesting such control can run on any
* CPU.
*/
schedule_on_each_cpu(check_unaligned_access_emulated);
for_each_online_cpu(cpu)
if (!check_unaligned_access_emulated(cpu))
if (per_cpu(misaligned_access_speed, cpu)
!= RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
return false;
unaligned_ctl = true;
@@ -574,3 +685,9 @@ bool unaligned_ctl_available(void)
{
return unaligned_ctl;
}
#else
bool check_unaligned_access_emulated_all_cpus(void)
{
return false;
}
#endif

View File

@@ -6,11 +6,13 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/jump_label.h>
#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <asm/cpufeature.h>
#include <asm/hwprobe.h>
#include <asm/vector.h>
#include "copy-unaligned.h"
@@ -19,7 +21,8 @@
#define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
DEFINE_PER_CPU(long, misaligned_access_speed);
DEFINE_PER_CPU(long, misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
DEFINE_PER_CPU(long, vector_misaligned_access) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
static cpumask_t fast_misaligned_access;
@@ -191,6 +194,7 @@ static int riscv_online_cpu(unsigned int cpu)
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN)
goto exit;
check_unaligned_access_emulated(NULL);
buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
if (!buf) {
pr_warn("Allocation failure, not measuring misaligned performance\n");
@@ -259,23 +263,159 @@ static int check_unaligned_access_speed_all_cpus(void)
kfree(bufs);
return 0;
}
#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
static int check_unaligned_access_speed_all_cpus(void)
{
return 0;
}
#endif
#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
static void check_vector_unaligned_access(struct work_struct *work __always_unused)
{
int cpu = smp_processor_id();
u64 start_cycles, end_cycles;
u64 word_cycles;
u64 byte_cycles;
int ratio;
unsigned long start_jiffies, now;
struct page *page;
void *dst;
void *src;
long speed = RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN)
return;
page = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
if (!page) {
pr_warn("Allocation failure, not measuring vector misaligned performance\n");
return;
}
/* Make an unaligned destination buffer. */
dst = (void *)((unsigned long)page_address(page) | 0x1);
/* Unalign src as well, but differently (off by 1 + 2 = 3). */
src = dst + (MISALIGNED_BUFFER_SIZE / 2);
src += 2;
word_cycles = -1ULL;
/* Do a warmup. */
kernel_vector_begin();
__riscv_copy_vec_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
start_jiffies = jiffies;
while ((now = jiffies) == start_jiffies)
cpu_relax();
/*
* For a fixed amount of time, repeatedly try the function, and take
* the best time in cycles as the measurement.
*/
while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
start_cycles = get_cycles64();
/* Ensure the CSR read can't reorder WRT to the copy. */
mb();
__riscv_copy_vec_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
/* Ensure the copy ends before the end time is snapped. */
mb();
end_cycles = get_cycles64();
if ((end_cycles - start_cycles) < word_cycles)
word_cycles = end_cycles - start_cycles;
}
byte_cycles = -1ULL;
__riscv_copy_vec_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
start_jiffies = jiffies;
while ((now = jiffies) == start_jiffies)
cpu_relax();
while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
start_cycles = get_cycles64();
/* Ensure the CSR read can't reorder WRT to the copy. */
mb();
__riscv_copy_vec_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
/* Ensure the copy ends before the end time is snapped. */
mb();
end_cycles = get_cycles64();
if ((end_cycles - start_cycles) < byte_cycles)
byte_cycles = end_cycles - start_cycles;
}
kernel_vector_end();
/* Don't divide by zero. */
if (!word_cycles || !byte_cycles) {
pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned vector access speed\n",
cpu);
return;
}
if (word_cycles < byte_cycles)
speed = RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
ratio = div_u64((byte_cycles * 100), word_cycles);
pr_info("cpu%d: Ratio of vector byte access time to vector unaligned word access is %d.%02d, unaligned accesses are %s\n",
cpu,
ratio / 100,
ratio % 100,
(speed == RISCV_HWPROBE_MISALIGNED_VECTOR_FAST) ? "fast" : "slow");
per_cpu(vector_misaligned_access, cpu) = speed;
}
static int riscv_online_cpu_vec(unsigned int cpu)
{
if (!has_vector())
return 0;
if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED)
return 0;
check_vector_unaligned_access_emulated(NULL);
check_vector_unaligned_access(NULL);
return 0;
}
/* Measure unaligned access speed on all CPUs present at boot in parallel. */
static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
{
schedule_on_each_cpu(check_vector_unaligned_access);
/*
* Setup hotplug callbacks for any new CPUs that come online or go
* offline.
*/
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
riscv_online_cpu_vec, NULL);
return 0;
}
#else /* CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS */
static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
{
return 0;
}
#endif
static int check_unaligned_access_all_cpus(void)
{
bool all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
bool all_cpus_emulated, all_cpus_vec_unsupported;
all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus();
if (!all_cpus_vec_unsupported &&
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
kthread_run(vec_check_unaligned_access_speed_all_cpus,
NULL, "vec_check_unaligned_access_speed_all_cpus");
}
if (!all_cpus_emulated)
return check_unaligned_access_speed_all_cpus();
return 0;
}
#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
static int check_unaligned_access_all_cpus(void)
{
check_unaligned_access_emulated_all_cpus();
return 0;
}
#endif
arch_initcall(check_unaligned_access_all_cpus);

View File

@@ -0,0 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2024 Rivos Inc. */
#include <linux/args.h>
#include <linux/linkage.h>
#include <asm/asm.h>
.text
#define WORD_EEW 32
#define WORD_SEW CONCATENATE(e, WORD_EEW)
#define VEC_L CONCATENATE(vle, WORD_EEW).v
#define VEC_S CONCATENATE(vle, WORD_EEW).v
/* void __riscv_copy_vec_words_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using word loads and stores. */
/* Note: The size is truncated to a multiple of WORD_EEW */
SYM_FUNC_START(__riscv_copy_vec_words_unaligned)
andi a4, a2, ~(WORD_EEW-1)
beqz a4, 2f
add a3, a1, a4
.option push
.option arch, +zve32x
1:
vsetivli t0, 8, WORD_SEW, m8, ta, ma
VEC_L v0, (a1)
VEC_S v0, (a0)
addi a0, a0, WORD_EEW
addi a1, a1, WORD_EEW
bltu a1, a3, 1b
2:
.option pop
ret
SYM_FUNC_END(__riscv_copy_vec_words_unaligned)
/* void __riscv_copy_vec_bytes_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using only byte accesses. */
/* Note: The size is truncated to a multiple of 8 */
SYM_FUNC_START(__riscv_copy_vec_bytes_unaligned)
andi a4, a2, ~(8-1)
beqz a4, 2f
add a3, a1, a4
.option push
.option arch, +zve32x
1:
vsetivli t0, 8, e8, m8, ta, ma
vle8.v v0, (a1)
vse8.v v0, (a0)
addi a0, a0, 8
addi a1, a1, 8
bltu a1, a3, 1b
2:
.option pop
ret
SYM_FUNC_END(__riscv_copy_vec_bytes_unaligned)

View File

@@ -66,7 +66,7 @@ void __init riscv_v_setup_ctx_cache(void)
#endif
}
static bool insn_is_vector(u32 insn_buf)
bool insn_is_vector(u32 insn_buf)
{
u32 opcode = insn_buf & __INSN_OPCODE_MASK;
u32 width, csr;