mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-10 18:40:25 -04:00
Merge branch 'for-next/cpufeature' into for-next/core
* for-next/cpufeature: arm64: Align boot cpucap handling with system cpucap handling arm64: Cleanup system cpucap handling arm64: Kconfig: drop KAISER reference from KPTI option description arm64: mm: Only map KPTI trampoline if it is going to be used arm64: Get rid of ARM64_HAS_NO_HW_PREFETCH
This commit is contained in:
@@ -1549,7 +1549,7 @@ config ARCH_FORCE_MAX_ORDER
|
||||
Don't change if unsure.
|
||||
|
||||
config UNMAP_KERNEL_AT_EL0
|
||||
bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
|
||||
bool "Unmap kernel when running in userspace (KPTI)" if EXPERT
|
||||
default y
|
||||
help
|
||||
Speculation attacks against some high-performance processors can
|
||||
|
||||
@@ -617,6 +617,7 @@ static inline bool id_aa64pfr1_mte(u64 pfr1)
|
||||
return val >= ID_AA64PFR1_EL1_MTE_MTE2;
|
||||
}
|
||||
|
||||
void __init setup_boot_cpu_features(void);
|
||||
void __init setup_system_features(void);
|
||||
void __init setup_user_features(void);
|
||||
|
||||
|
||||
@@ -1081,25 +1081,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
|
||||
|
||||
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
|
||||
init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
|
||||
|
||||
/*
|
||||
* Initialize the indirect array of CPU capabilities pointers before we
|
||||
* handle the boot CPU below.
|
||||
*/
|
||||
init_cpucap_indirect_list();
|
||||
|
||||
/*
|
||||
* Detect broken pseudo-NMI. Must be called _before_ the call to
|
||||
* setup_boot_cpu_capabilities() since it interacts with
|
||||
* can_use_gic_priorities().
|
||||
*/
|
||||
detect_system_supports_pseudo_nmi();
|
||||
|
||||
/*
|
||||
* Detect and enable early CPU capabilities based on the boot CPU,
|
||||
* after we have initialised the CPU feature infrastructure.
|
||||
*/
|
||||
setup_boot_cpu_capabilities();
|
||||
}
|
||||
|
||||
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
|
||||
@@ -1584,16 +1565,6 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry,
|
||||
return has_sre;
|
||||
}
|
||||
|
||||
static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
|
||||
{
|
||||
u32 midr = read_cpuid_id();
|
||||
|
||||
/* Cavium ThunderX pass 1.x and 2.x */
|
||||
return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
|
||||
MIDR_CPU_VAR_REV(0, 0),
|
||||
MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
|
||||
}
|
||||
|
||||
static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
@@ -2321,12 +2292,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, ATOMIC, IMP)
|
||||
},
|
||||
#endif /* CONFIG_ARM64_LSE_ATOMICS */
|
||||
{
|
||||
.desc = "Software prefetching using PRFM",
|
||||
.capability = ARM64_HAS_NO_HW_PREFETCH,
|
||||
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
||||
.matches = has_no_hw_prefetch,
|
||||
},
|
||||
{
|
||||
.desc = "Virtualization Host Extensions",
|
||||
.capability = ARM64_HAS_VIRT_HOST_EXTN,
|
||||
@@ -3271,14 +3236,6 @@ void check_local_cpu_capabilities(void)
|
||||
verify_local_cpu_capabilities();
|
||||
}
|
||||
|
||||
static void __init setup_boot_cpu_capabilities(void)
|
||||
{
|
||||
/* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
|
||||
update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
|
||||
/* Enable the SCOPE_BOOT_CPU capabilities alone right away */
|
||||
enable_cpu_capabilities(SCOPE_BOOT_CPU);
|
||||
}
|
||||
|
||||
bool this_cpu_has_cap(unsigned int n)
|
||||
{
|
||||
if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
|
||||
@@ -3334,23 +3291,70 @@ unsigned long cpu_get_elf_hwcap2(void)
|
||||
return elf_hwcap[1];
|
||||
}
|
||||
|
||||
void __init setup_system_features(void)
|
||||
static void __init setup_boot_cpu_capabilities(void)
|
||||
{
|
||||
int i;
|
||||
/*
|
||||
* The system-wide safe feature feature register values have been
|
||||
* finalized. Finalize and log the available system capabilities.
|
||||
* The boot CPU's feature register values have been recorded. Detect
|
||||
* boot cpucaps and local cpucaps for the boot CPU, then enable and
|
||||
* patch alternatives for the available boot cpucaps.
|
||||
*/
|
||||
update_cpu_capabilities(SCOPE_SYSTEM);
|
||||
if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
|
||||
!cpus_have_cap(ARM64_HAS_PAN))
|
||||
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
|
||||
update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
|
||||
enable_cpu_capabilities(SCOPE_BOOT_CPU);
|
||||
apply_boot_alternatives();
|
||||
}
|
||||
|
||||
void __init setup_boot_cpu_features(void)
|
||||
{
|
||||
/*
|
||||
* Initialize the indirect array of CPU capabilities pointers before we
|
||||
* handle the boot CPU.
|
||||
*/
|
||||
init_cpucap_indirect_list();
|
||||
|
||||
/*
|
||||
* Enable all the available capabilities which have not been enabled
|
||||
* already.
|
||||
* Detect broken pseudo-NMI. Must be called _before_ the call to
|
||||
* setup_boot_cpu_capabilities() since it interacts with
|
||||
* can_use_gic_priorities().
|
||||
*/
|
||||
detect_system_supports_pseudo_nmi();
|
||||
|
||||
setup_boot_cpu_capabilities();
|
||||
}
|
||||
|
||||
static void __init setup_system_capabilities(void)
|
||||
{
|
||||
/*
|
||||
* The system-wide safe feature register values have been finalized.
|
||||
* Detect, enable, and patch alternatives for the available system
|
||||
* cpucaps.
|
||||
*/
|
||||
update_cpu_capabilities(SCOPE_SYSTEM);
|
||||
enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
|
||||
apply_alternatives_all();
|
||||
|
||||
/*
|
||||
* Log any cpucaps with a cpumask as these aren't logged by
|
||||
* update_cpu_capabilities().
|
||||
*/
|
||||
for (int i = 0; i < ARM64_NCAPS; i++) {
|
||||
const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i];
|
||||
|
||||
if (caps && caps->cpus && caps->desc &&
|
||||
cpumask_any(caps->cpus) < nr_cpu_ids)
|
||||
pr_info("detected: %s on CPU%*pbl\n",
|
||||
caps->desc, cpumask_pr_args(caps->cpus));
|
||||
}
|
||||
|
||||
/*
|
||||
* TTBR0 PAN doesn't have its own cpucap, so log it manually.
|
||||
*/
|
||||
if (system_uses_ttbr0_pan())
|
||||
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
|
||||
}
|
||||
|
||||
void __init setup_system_features(void)
|
||||
{
|
||||
setup_system_capabilities();
|
||||
|
||||
kpti_install_ng_mappings();
|
||||
|
||||
@@ -3363,15 +3367,6 @@ void __init setup_system_features(void)
|
||||
if (!cache_type_cwg())
|
||||
pr_warn("No Cache Writeback Granule information, assuming %d\n",
|
||||
ARCH_DMA_MINALIGN);
|
||||
|
||||
for (i = 0; i < ARM64_NCAPS; i++) {
|
||||
const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i];
|
||||
|
||||
if (caps && caps->cpus && caps->desc &&
|
||||
cpumask_any(caps->cpus) < nr_cpu_ids)
|
||||
pr_info("detected: %s on CPU%*pbl\n",
|
||||
caps->desc, cpumask_pr_args(caps->cpus));
|
||||
}
|
||||
}
|
||||
|
||||
void __init setup_user_features(void)
|
||||
|
||||
@@ -1171,7 +1171,7 @@ void __init sve_setup(void)
|
||||
unsigned long b;
|
||||
int max_bit;
|
||||
|
||||
if (!cpus_have_cap(ARM64_SVE))
|
||||
if (!system_supports_sve())
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -1301,7 +1301,7 @@ void __init sme_setup(void)
|
||||
struct vl_info *info = &vl_info[ARM64_VEC_SME];
|
||||
int min_bit, max_bit;
|
||||
|
||||
if (!cpus_have_cap(ARM64_SME))
|
||||
if (!system_supports_sme())
|
||||
return;
|
||||
|
||||
/*
|
||||
|
||||
@@ -439,9 +439,8 @@ static void __init hyp_mode_check(void)
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
{
|
||||
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
|
||||
setup_system_features();
|
||||
hyp_mode_check();
|
||||
apply_alternatives_all();
|
||||
setup_system_features();
|
||||
setup_user_features();
|
||||
mark_linear_text_alias_ro();
|
||||
}
|
||||
@@ -454,14 +453,9 @@ void __init smp_prepare_boot_cpu(void)
|
||||
* freed shortly, so we must move over to the runtime per-cpu area.
|
||||
*/
|
||||
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
||||
cpuinfo_store_boot_cpu();
|
||||
|
||||
/*
|
||||
* We now know enough about the boot CPU to apply the
|
||||
* alternatives that cannot wait until interrupt handling
|
||||
* and/or scheduling is enabled.
|
||||
*/
|
||||
apply_boot_alternatives();
|
||||
cpuinfo_store_boot_cpu();
|
||||
setup_boot_cpu_features();
|
||||
|
||||
/* Conditionally switch to GIC PMR for interrupt masking */
|
||||
if (system_uses_irq_prio_masking())
|
||||
|
||||
@@ -18,13 +18,6 @@
|
||||
* x1 - src
|
||||
*/
|
||||
SYM_FUNC_START(__pi_copy_page)
|
||||
alternative_if ARM64_HAS_NO_HW_PREFETCH
|
||||
// Prefetch three cache lines ahead.
|
||||
prfm pldl1strm, [x1, #128]
|
||||
prfm pldl1strm, [x1, #256]
|
||||
prfm pldl1strm, [x1, #384]
|
||||
alternative_else_nop_endif
|
||||
|
||||
ldp x2, x3, [x1]
|
||||
ldp x4, x5, [x1, #16]
|
||||
ldp x6, x7, [x1, #32]
|
||||
@@ -39,10 +32,6 @@ alternative_else_nop_endif
|
||||
1:
|
||||
tst x0, #(PAGE_SIZE - 1)
|
||||
|
||||
alternative_if ARM64_HAS_NO_HW_PREFETCH
|
||||
prfm pldl1strm, [x1, #384]
|
||||
alternative_else_nop_endif
|
||||
|
||||
stnp x2, x3, [x0, #-256]
|
||||
ldp x2, x3, [x1]
|
||||
stnp x4, x5, [x0, #16 - 256]
|
||||
|
||||
@@ -674,6 +674,9 @@ static int __init map_entry_trampoline(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!arm64_kernel_unmapped_at_el0())
|
||||
return 0;
|
||||
|
||||
pgprot_t prot = kernel_exec_prot();
|
||||
phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
|
||||
|
||||
|
||||
@@ -40,7 +40,6 @@ HAS_LDAPR
|
||||
HAS_LSE_ATOMICS
|
||||
HAS_MOPS
|
||||
HAS_NESTED_VIRT
|
||||
HAS_NO_HW_PREFETCH
|
||||
HAS_PAN
|
||||
HAS_S1PIE
|
||||
HAS_RAS_EXTN
|
||||
|
||||
Reference in New Issue
Block a user