mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-14 07:31:52 -05:00
* arm64/for-next/perf: perf: arm_spe: Print the version of SPE detected perf: arm_spe: Add support for SPEv1.2 inverted event filtering perf: Add perf_event_attr::config3 drivers/perf: fsl_imx8_ddr_perf: Remove set-but-not-used variable perf: arm_spe: Support new SPEv1.2/v8.7 'not taken' event perf: arm_spe: Use new PMSIDR_EL1 register enums perf: arm_spe: Drop BIT() and use FIELD_GET/PREP accessors arm64/sysreg: Convert SPE registers to automatic generation arm64: Drop SYS_ from SPE register defines perf: arm_spe: Use feature numbering for PMSEVFR_EL1 defines perf/marvell: Add ACPI support to TAD uncore driver perf/marvell: Add ACPI support to DDR uncore driver perf/arm-cmn: Reset DTM_PMU_CONFIG at probe drivers/perf: hisi: Extract initialization of "cpa_pmu->pmu" drivers/perf: hisi: Simplify the parameters of hisi_pmu_init() drivers/perf: hisi: Advertise the PERF_PMU_CAP_NO_EXCLUDE capability * for-next/sysreg: : arm64 sysreg and cpufeature fixes/updates KVM: arm64: Use symbolic definition for ISR_EL1.A arm64/sysreg: Add definition of ISR_EL1 arm64/sysreg: Add definition for ICC_NMIAR1_EL1 arm64/cpufeature: Remove 4 bit assumption in ARM64_FEATURE_MASK() arm64/sysreg: Fix errors in 32 bit enumeration values arm64/cpufeature: Fix field sign for DIT hwcap detection * for-next/sme: : SME-related updates arm64/sme: Optimise SME exit on syscall entry arm64/sme: Don't use streaming mode to probe the maximum SME VL arm64/ptrace: Use system_supports_tpidr2() to check for TPIDR2 support * for-next/kselftest: (23 commits) : arm64 kselftest fixes and improvements kselftest/arm64: Don't require FA64 for streaming SVE+ZA tests kselftest/arm64: Copy whole EXTRA context kselftest/arm64: Fix enumeration of systems without 128 bit SME for SSVE+ZA kselftest/arm64: Fix enumeration of systems without 128 bit SME kselftest/arm64: Don't require FA64 for streaming SVE tests kselftest/arm64: Limit the maximum VL we try to set via ptrace kselftest/arm64: Correct buffer size for SME ZA storage kselftest/arm64: Remove the local NUM_VL definition kselftest/arm64: Verify simultaneous SSVE and ZA context generation kselftest/arm64: Verify that SSVE signal context has SVE_SIG_FLAG_SM set kselftest/arm64: Remove spurious comment from MTE test Makefile kselftest/arm64: Support build of MTE tests with clang kselftest/arm64: Initialise current at build time in signal tests kselftest/arm64: Don't pass headers to the compiler as source kselftest/arm64: Remove redundant _start labels from FP tests kselftest/arm64: Fix .pushsection for strings in FP tests kselftest/arm64: Run BTI selftests on systems without BTI kselftest/arm64: Fix test numbering when skipping tests kselftest/arm64: Skip non-power of 2 SVE vector lengths in fp-stress kselftest/arm64: Only enumerate power of two VLs in syscall-abi ... * for-next/misc: : Miscellaneous arm64 updates arm64/mm: Intercept pfn changes in set_pte_at() Documentation: arm64: correct spelling arm64: traps: attempt to dump all instructions arm64: Apply dynamic shadow call stack patching in two passes arm64: el2_setup.h: fix spelling typo in comments arm64: Kconfig: fix spelling arm64: cpufeature: Use kstrtobool() instead of strtobool() arm64: Avoid repeated AA64MMFR1_EL1 register read on pagefault path arm64: make ARCH_FORCE_MAX_ORDER selectable * for-next/sme2: (23 commits) : Support for arm64 SME 2 and 2.1 arm64/sme: Fix __finalise_el2 SMEver check kselftest/arm64: Remove redundant _start labels from zt-test kselftest/arm64: Add coverage of SME 2 and 2.1 hwcaps kselftest/arm64: Add coverage of the ZT ptrace regset kselftest/arm64: Add SME2 coverage to syscall-abi kselftest/arm64: Add test coverage for ZT register signal frames kselftest/arm64: Teach the generic signal context validation about ZT kselftest/arm64: Enumerate SME2 in the signal test utility code kselftest/arm64: Cover ZT in the FP stress test kselftest/arm64: Add a stress test program for ZT0 arm64/sme: Add hwcaps for SME 2 and 2.1 features arm64/sme: Implement ZT0 ptrace support arm64/sme: Implement signal handling for ZT arm64/sme: Implement context switching for ZT0 arm64/sme: Provide storage for ZT0 arm64/sme: Add basic enumeration for SME2 arm64/sme: Enable host kernel to access ZT0 arm64/sme: Manually encode ZT0 load and store instructions arm64/esr: Document ISS for ZT0 being disabled arm64/sme: Document SME 2 and SME 2.1 ABI ... * for-next/tpidr2: : Include TPIDR2 in the signal context kselftest/arm64: Add test case for TPIDR2 signal frame records kselftest/arm64: Add TPIDR2 to the set of known signal context records arm64/signal: Include TPIDR2 in the signal context arm64/sme: Document ABI for TPIDR2 signal information * for-next/scs: : arm64: harden shadow call stack pointer handling arm64: Stash shadow stack pointer in the task struct on interrupt arm64: Always load shadow stack pointer directly from the task struct * for-next/compat-hwcap: : arm64: Expose compat ARMv8 AArch32 features (HWCAPs) arm64: Add compat hwcap SSBS arm64: Add compat hwcap SB arm64: Add compat hwcap I8MM arm64: Add compat hwcap ASIMDBF16 arm64: Add compat hwcap ASIMDFHM arm64: Add compat hwcap ASIMDDP arm64: Add compat hwcap FPHP and ASIMDHP * for-next/ftrace: : Add arm64 support for DYNAMICE_FTRACE_WITH_CALL_OPS arm64: avoid executing padding bytes during kexec / hibernation arm64: Implement HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS arm64: ftrace: Update stale comment arm64: patching: Add aarch64_insn_write_literal_u64() arm64: insn: Add helpers for BTI arm64: Extend support for CONFIG_FUNCTION_ALIGNMENT ACPI: Don't build ACPICA with '-Os' Compiler attributes: GCC cold function alignment workarounds ftrace: Add DYNAMIC_FTRACE_WITH_CALL_OPS * for-next/efi-boot-mmu-on: : Permit arm64 EFI boot with MMU and caches on arm64: kprobes: Drop ID map text from kprobes blacklist arm64: head: Switch endianness before populating the ID map efi: arm64: enter with MMU and caches enabled arm64: head: Clean the ID map and the HYP text to the PoC if needed arm64: head: avoid cache invalidation when entering with the MMU on arm64: head: record the MMU state at primary entry arm64: kernel: move identity map out of .text mapping arm64: head: Move all finalise_el2 calls to after __enable_mmu * for-next/ptrauth: : arm64 pointer authentication cleanup arm64: pauth: don't sign leaf functions arm64: unify asm-arch manipulation * for-next/pseudo-nmi: : Pseudo-NMI code generation optimisations arm64: irqflags: use alternative branches for pseudo-NMI logic arm64: add ARM64_HAS_GIC_PRIO_RELAXED_SYNC cpucap arm64: make ARM64_HAS_GIC_PRIO_MASKING depend on ARM64_HAS_GIC_CPUIF_SYSREGS arm64: rename ARM64_HAS_IRQ_PRIO_MASKING to ARM64_HAS_GIC_PRIO_MASKING arm64: rename ARM64_HAS_SYSREG_GIC_CPUIF to ARM64_HAS_GIC_CPUIF_SYSREGS
164 lines
6.6 KiB
C
164 lines
6.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
#ifndef __ASM_HWCAP_H
|
|
#define __ASM_HWCAP_H
|
|
|
|
#include <uapi/asm/hwcap.h>
|
|
#include <asm/cpufeature.h>
|
|
|
|
#define COMPAT_HWCAP_SWP (1 << 0)
|
|
#define COMPAT_HWCAP_HALF (1 << 1)
|
|
#define COMPAT_HWCAP_THUMB (1 << 2)
|
|
#define COMPAT_HWCAP_26BIT (1 << 3)
|
|
#define COMPAT_HWCAP_FAST_MULT (1 << 4)
|
|
#define COMPAT_HWCAP_FPA (1 << 5)
|
|
#define COMPAT_HWCAP_VFP (1 << 6)
|
|
#define COMPAT_HWCAP_EDSP (1 << 7)
|
|
#define COMPAT_HWCAP_JAVA (1 << 8)
|
|
#define COMPAT_HWCAP_IWMMXT (1 << 9)
|
|
#define COMPAT_HWCAP_CRUNCH (1 << 10) /* Obsolete */
|
|
#define COMPAT_HWCAP_THUMBEE (1 << 11)
|
|
#define COMPAT_HWCAP_NEON (1 << 12)
|
|
#define COMPAT_HWCAP_VFPv3 (1 << 13)
|
|
#define COMPAT_HWCAP_VFPV3D16 (1 << 14)
|
|
#define COMPAT_HWCAP_TLS (1 << 15)
|
|
#define COMPAT_HWCAP_VFPv4 (1 << 16)
|
|
#define COMPAT_HWCAP_IDIVA (1 << 17)
|
|
#define COMPAT_HWCAP_IDIVT (1 << 18)
|
|
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
|
|
#define COMPAT_HWCAP_VFPD32 (1 << 19)
|
|
#define COMPAT_HWCAP_LPAE (1 << 20)
|
|
#define COMPAT_HWCAP_EVTSTRM (1 << 21)
|
|
#define COMPAT_HWCAP_FPHP (1 << 22)
|
|
#define COMPAT_HWCAP_ASIMDHP (1 << 23)
|
|
#define COMPAT_HWCAP_ASIMDDP (1 << 24)
|
|
#define COMPAT_HWCAP_ASIMDFHM (1 << 25)
|
|
#define COMPAT_HWCAP_ASIMDBF16 (1 << 26)
|
|
#define COMPAT_HWCAP_I8MM (1 << 27)
|
|
|
|
#define COMPAT_HWCAP2_AES (1 << 0)
|
|
#define COMPAT_HWCAP2_PMULL (1 << 1)
|
|
#define COMPAT_HWCAP2_SHA1 (1 << 2)
|
|
#define COMPAT_HWCAP2_SHA2 (1 << 3)
|
|
#define COMPAT_HWCAP2_CRC32 (1 << 4)
|
|
#define COMPAT_HWCAP2_SB (1 << 5)
|
|
#define COMPAT_HWCAP2_SSBS (1 << 6)
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#include <linux/log2.h>
|
|
|
|
/*
|
|
* For userspace we represent hwcaps as a collection of HWCAP{,2}_x bitfields
|
|
* as described in uapi/asm/hwcap.h. For the kernel we represent hwcaps as
|
|
* natural numbers (in a single range of size MAX_CPU_FEATURES) defined here
|
|
* with prefix KERNEL_HWCAP_ mapped to their HWCAP{,2}_x counterpart.
|
|
*
|
|
* Hwcaps should be set and tested within the kernel via the
|
|
* cpu_{set,have}_named_feature(feature) where feature is the unique suffix
|
|
* of KERNEL_HWCAP_{feature}.
|
|
*/
|
|
#define __khwcap_feature(x) const_ilog2(HWCAP_ ## x)
|
|
#define KERNEL_HWCAP_FP __khwcap_feature(FP)
|
|
#define KERNEL_HWCAP_ASIMD __khwcap_feature(ASIMD)
|
|
#define KERNEL_HWCAP_EVTSTRM __khwcap_feature(EVTSTRM)
|
|
#define KERNEL_HWCAP_AES __khwcap_feature(AES)
|
|
#define KERNEL_HWCAP_PMULL __khwcap_feature(PMULL)
|
|
#define KERNEL_HWCAP_SHA1 __khwcap_feature(SHA1)
|
|
#define KERNEL_HWCAP_SHA2 __khwcap_feature(SHA2)
|
|
#define KERNEL_HWCAP_CRC32 __khwcap_feature(CRC32)
|
|
#define KERNEL_HWCAP_ATOMICS __khwcap_feature(ATOMICS)
|
|
#define KERNEL_HWCAP_FPHP __khwcap_feature(FPHP)
|
|
#define KERNEL_HWCAP_ASIMDHP __khwcap_feature(ASIMDHP)
|
|
#define KERNEL_HWCAP_CPUID __khwcap_feature(CPUID)
|
|
#define KERNEL_HWCAP_ASIMDRDM __khwcap_feature(ASIMDRDM)
|
|
#define KERNEL_HWCAP_JSCVT __khwcap_feature(JSCVT)
|
|
#define KERNEL_HWCAP_FCMA __khwcap_feature(FCMA)
|
|
#define KERNEL_HWCAP_LRCPC __khwcap_feature(LRCPC)
|
|
#define KERNEL_HWCAP_DCPOP __khwcap_feature(DCPOP)
|
|
#define KERNEL_HWCAP_SHA3 __khwcap_feature(SHA3)
|
|
#define KERNEL_HWCAP_SM3 __khwcap_feature(SM3)
|
|
#define KERNEL_HWCAP_SM4 __khwcap_feature(SM4)
|
|
#define KERNEL_HWCAP_ASIMDDP __khwcap_feature(ASIMDDP)
|
|
#define KERNEL_HWCAP_SHA512 __khwcap_feature(SHA512)
|
|
#define KERNEL_HWCAP_SVE __khwcap_feature(SVE)
|
|
#define KERNEL_HWCAP_ASIMDFHM __khwcap_feature(ASIMDFHM)
|
|
#define KERNEL_HWCAP_DIT __khwcap_feature(DIT)
|
|
#define KERNEL_HWCAP_USCAT __khwcap_feature(USCAT)
|
|
#define KERNEL_HWCAP_ILRCPC __khwcap_feature(ILRCPC)
|
|
#define KERNEL_HWCAP_FLAGM __khwcap_feature(FLAGM)
|
|
#define KERNEL_HWCAP_SSBS __khwcap_feature(SSBS)
|
|
#define KERNEL_HWCAP_SB __khwcap_feature(SB)
|
|
#define KERNEL_HWCAP_PACA __khwcap_feature(PACA)
|
|
#define KERNEL_HWCAP_PACG __khwcap_feature(PACG)
|
|
|
|
#define __khwcap2_feature(x) (const_ilog2(HWCAP2_ ## x) + 64)
|
|
#define KERNEL_HWCAP_DCPODP __khwcap2_feature(DCPODP)
|
|
#define KERNEL_HWCAP_SVE2 __khwcap2_feature(SVE2)
|
|
#define KERNEL_HWCAP_SVEAES __khwcap2_feature(SVEAES)
|
|
#define KERNEL_HWCAP_SVEPMULL __khwcap2_feature(SVEPMULL)
|
|
#define KERNEL_HWCAP_SVEBITPERM __khwcap2_feature(SVEBITPERM)
|
|
#define KERNEL_HWCAP_SVESHA3 __khwcap2_feature(SVESHA3)
|
|
#define KERNEL_HWCAP_SVESM4 __khwcap2_feature(SVESM4)
|
|
#define KERNEL_HWCAP_FLAGM2 __khwcap2_feature(FLAGM2)
|
|
#define KERNEL_HWCAP_FRINT __khwcap2_feature(FRINT)
|
|
#define KERNEL_HWCAP_SVEI8MM __khwcap2_feature(SVEI8MM)
|
|
#define KERNEL_HWCAP_SVEF32MM __khwcap2_feature(SVEF32MM)
|
|
#define KERNEL_HWCAP_SVEF64MM __khwcap2_feature(SVEF64MM)
|
|
#define KERNEL_HWCAP_SVEBF16 __khwcap2_feature(SVEBF16)
|
|
#define KERNEL_HWCAP_I8MM __khwcap2_feature(I8MM)
|
|
#define KERNEL_HWCAP_BF16 __khwcap2_feature(BF16)
|
|
#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH)
|
|
#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG)
|
|
#define KERNEL_HWCAP_BTI __khwcap2_feature(BTI)
|
|
#define KERNEL_HWCAP_MTE __khwcap2_feature(MTE)
|
|
#define KERNEL_HWCAP_ECV __khwcap2_feature(ECV)
|
|
#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP)
|
|
#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES)
|
|
#define KERNEL_HWCAP_MTE3 __khwcap2_feature(MTE3)
|
|
#define KERNEL_HWCAP_SME __khwcap2_feature(SME)
|
|
#define KERNEL_HWCAP_SME_I16I64 __khwcap2_feature(SME_I16I64)
|
|
#define KERNEL_HWCAP_SME_F64F64 __khwcap2_feature(SME_F64F64)
|
|
#define KERNEL_HWCAP_SME_I8I32 __khwcap2_feature(SME_I8I32)
|
|
#define KERNEL_HWCAP_SME_F16F32 __khwcap2_feature(SME_F16F32)
|
|
#define KERNEL_HWCAP_SME_B16F32 __khwcap2_feature(SME_B16F32)
|
|
#define KERNEL_HWCAP_SME_F32F32 __khwcap2_feature(SME_F32F32)
|
|
#define KERNEL_HWCAP_SME_FA64 __khwcap2_feature(SME_FA64)
|
|
#define KERNEL_HWCAP_WFXT __khwcap2_feature(WFXT)
|
|
#define KERNEL_HWCAP_EBF16 __khwcap2_feature(EBF16)
|
|
#define KERNEL_HWCAP_SVE_EBF16 __khwcap2_feature(SVE_EBF16)
|
|
#define KERNEL_HWCAP_CSSC __khwcap2_feature(CSSC)
|
|
#define KERNEL_HWCAP_RPRFM __khwcap2_feature(RPRFM)
|
|
#define KERNEL_HWCAP_SVE2P1 __khwcap2_feature(SVE2P1)
|
|
#define KERNEL_HWCAP_SME2 __khwcap2_feature(SME2)
|
|
#define KERNEL_HWCAP_SME2P1 __khwcap2_feature(SME2P1)
|
|
#define KERNEL_HWCAP_SME_I16I32 __khwcap2_feature(SME_I16I32)
|
|
#define KERNEL_HWCAP_SME_BI32I32 __khwcap2_feature(SME_BI32I32)
|
|
#define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16)
|
|
#define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16)
|
|
|
|
/*
|
|
* This yields a mask that user programs can use to figure out what
|
|
* instruction set this cpu supports.
|
|
*/
|
|
#define ELF_HWCAP cpu_get_elf_hwcap()
|
|
#define ELF_HWCAP2 cpu_get_elf_hwcap2()
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
#define COMPAT_ELF_HWCAP (compat_elf_hwcap)
|
|
#define COMPAT_ELF_HWCAP2 (compat_elf_hwcap2)
|
|
extern unsigned int compat_elf_hwcap, compat_elf_hwcap2;
|
|
#endif
|
|
|
|
enum {
|
|
CAP_HWCAP = 1,
|
|
#ifdef CONFIG_COMPAT
|
|
CAP_COMPAT_HWCAP,
|
|
CAP_COMPAT_HWCAP2,
|
|
#endif
|
|
};
|
|
|
|
#endif
|
|
#endif
|