mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 12:21:22 -05:00
Merge tag 'riscv-for-linus-6.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V fixes from Paul Walmsley: - Close a race during boot between userspace vDSO usage and some late-initialized vDSO data - Improve performance on systems with non-CPU-cache-coherent DMA-capable peripherals by enabling write combining on pgprot_dmacoherent() allocations - Add human-readable detail for RISC-V IPI tracing - Provide more information to zsmalloc on 64-bit RISC-V to improve allocation - Silence useless boot messages about CPUs that have been disabled in DT - Resolve some compiler and smatch warnings and remove a redundant macro * tag 'riscv-for-linus-6.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: riscv: hwprobe: avoid uninitialized variable use in hwprobe_arch_id() riscv: cpufeature: avoid uninitialized variable in has_thead_homogeneous_vlenb() riscv: hwprobe: Fix stale vDSO data for late-initialized keys at boot riscv: add a forward declaration for cpuinfo_op RISC-V: Don't print details of CPUs disabled in DT riscv: Remove the PER_CPU_OFFSET_SHIFT macro riscv: mm: Define MAX_POSSIBLE_PHYSMEM_BITS for zsmalloc riscv: Register IPI IRQs with unique names ACPI: RIMT: Fix unused function warnings when CONFIG_IOMMU_API is disabled RISC-V: Define pgprot_dmacoherent() for non-coherent devices
This commit is contained in:
@@ -84,15 +84,9 @@
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#ifdef CONFIG_32BIT
|
||||
#define PER_CPU_OFFSET_SHIFT 2
|
||||
#else
|
||||
#define PER_CPU_OFFSET_SHIFT 3
|
||||
#endif
|
||||
|
||||
.macro asm_per_cpu dst sym tmp
|
||||
lw \tmp, TASK_TI_CPU_NUM(tp)
|
||||
slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
|
||||
slli \tmp, \tmp, RISCV_LGPTR
|
||||
la \dst, __per_cpu_offset
|
||||
add \dst, \dst, \tmp
|
||||
REG_L \tmp, 0(\dst)
|
||||
|
||||
@@ -31,6 +31,8 @@ struct riscv_isainfo {
|
||||
|
||||
DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
|
||||
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
|
||||
/* Per-cpu ISA extensions. */
|
||||
extern struct riscv_isainfo hart_isa[NR_CPUS];
|
||||
|
||||
|
||||
@@ -42,4 +42,11 @@ static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair,
|
||||
return pair->value == other_pair->value;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
void riscv_hwprobe_register_async_probe(void);
|
||||
void riscv_hwprobe_complete_async_probe(void);
|
||||
#else
|
||||
static inline void riscv_hwprobe_register_async_probe(void) {}
|
||||
static inline void riscv_hwprobe_complete_async_probe(void) {}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -69,6 +69,8 @@ typedef struct {
|
||||
|
||||
#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
|
||||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 56
|
||||
|
||||
/*
|
||||
* rv64 PTE format:
|
||||
* | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
|
||||
|
||||
@@ -654,6 +654,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
|
||||
return __pgprot(prot);
|
||||
}
|
||||
|
||||
#define pgprot_dmacoherent pgprot_writecombine
|
||||
|
||||
/*
|
||||
* Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By
|
||||
* default the M-mode firmware enables the hardware updating scheme when only Svadu is present in
|
||||
|
||||
@@ -12,6 +12,12 @@ struct vdso_arch_data {
|
||||
|
||||
/* Boolean indicating all CPUs have the same static hwprobe values. */
|
||||
__u8 homogeneous_cpus;
|
||||
|
||||
/*
|
||||
* A gate to check and see if the hwprobe data is actually ready, as
|
||||
* probing is deferred to avoid boot slowdowns.
|
||||
*/
|
||||
__u8 ready;
|
||||
};
|
||||
|
||||
#endif /* __RISCV_ASM_VDSO_ARCH_DATA_H */
|
||||
|
||||
@@ -62,10 +62,8 @@ int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned lo
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!of_device_is_available(node)) {
|
||||
pr_info("CPU with hartid=%lu is not available\n", *hart);
|
||||
if (!of_device_is_available(node))
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (of_property_read_string(node, "riscv,isa-base", &isa))
|
||||
goto old_interface;
|
||||
|
||||
@@ -932,9 +932,9 @@ static int has_thead_homogeneous_vlenb(void)
|
||||
{
|
||||
int cpu;
|
||||
u32 prev_vlenb = 0;
|
||||
u32 vlenb;
|
||||
u32 vlenb = 0;
|
||||
|
||||
/* Ignore thead,vlenb property if xtheavector is not enabled in the kernel */
|
||||
/* Ignore thead,vlenb property if xtheadvector is not enabled in the kernel */
|
||||
if (!IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -40,6 +40,17 @@ enum ipi_message_type {
|
||||
IPI_MAX
|
||||
};
|
||||
|
||||
static const char * const ipi_names[] = {
|
||||
[IPI_RESCHEDULE] = "Rescheduling interrupts",
|
||||
[IPI_CALL_FUNC] = "Function call interrupts",
|
||||
[IPI_CPU_STOP] = "CPU stop interrupts",
|
||||
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
|
||||
[IPI_IRQ_WORK] = "IRQ work interrupts",
|
||||
[IPI_TIMER] = "Timer broadcast interrupts",
|
||||
[IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
|
||||
[IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
|
||||
};
|
||||
|
||||
unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
|
||||
[0 ... NR_CPUS-1] = INVALID_HARTID
|
||||
};
|
||||
@@ -199,7 +210,7 @@ void riscv_ipi_set_virq_range(int virq, int nr)
|
||||
/* Request IPIs */
|
||||
for (i = 0; i < nr_ipi; i++) {
|
||||
err = request_percpu_irq(ipi_virq_base + i, handle_IPI,
|
||||
"IPI", &ipi_dummy_dev);
|
||||
ipi_names[i], &ipi_dummy_dev);
|
||||
WARN_ON(err);
|
||||
|
||||
ipi_desc[i] = irq_to_desc(ipi_virq_base + i);
|
||||
@@ -210,17 +221,6 @@ void riscv_ipi_set_virq_range(int virq, int nr)
|
||||
riscv_ipi_enable();
|
||||
}
|
||||
|
||||
static const char * const ipi_names[] = {
|
||||
[IPI_RESCHEDULE] = "Rescheduling interrupts",
|
||||
[IPI_CALL_FUNC] = "Function call interrupts",
|
||||
[IPI_CPU_STOP] = "CPU stop interrupts",
|
||||
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
|
||||
[IPI_IRQ_WORK] = "IRQ work interrupts",
|
||||
[IPI_TIMER] = "Timer broadcast interrupts",
|
||||
[IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
|
||||
[IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
|
||||
};
|
||||
|
||||
void show_ipi_stats(struct seq_file *p, int prec)
|
||||
{
|
||||
unsigned int cpu, i;
|
||||
|
||||
@@ -5,6 +5,9 @@
|
||||
* more details.
|
||||
*/
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/once.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/hwprobe.h>
|
||||
@@ -28,6 +31,11 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
|
||||
bool first = true;
|
||||
int cpu;
|
||||
|
||||
if (pair->key != RISCV_HWPROBE_KEY_MVENDORID &&
|
||||
pair->key != RISCV_HWPROBE_KEY_MIMPID &&
|
||||
pair->key != RISCV_HWPROBE_KEY_MARCHID)
|
||||
goto out;
|
||||
|
||||
for_each_cpu(cpu, cpus) {
|
||||
u64 cpu_id;
|
||||
|
||||
@@ -58,6 +66,7 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
pair->value = id;
|
||||
}
|
||||
|
||||
@@ -454,28 +463,32 @@ static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
|
||||
size_t pair_count, size_t cpusetsize,
|
||||
unsigned long __user *cpus_user,
|
||||
unsigned int flags)
|
||||
{
|
||||
if (flags & RISCV_HWPROBE_WHICH_CPUS)
|
||||
return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
|
||||
cpus_user, flags);
|
||||
|
||||
return hwprobe_get_values(pairs, pair_count, cpusetsize,
|
||||
cpus_user, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
static int __init init_hwprobe_vdso_data(void)
|
||||
static DECLARE_COMPLETION(boot_probes_done);
|
||||
static atomic_t pending_boot_probes = ATOMIC_INIT(1);
|
||||
|
||||
void riscv_hwprobe_register_async_probe(void)
|
||||
{
|
||||
atomic_inc(&pending_boot_probes);
|
||||
}
|
||||
|
||||
void riscv_hwprobe_complete_async_probe(void)
|
||||
{
|
||||
if (atomic_dec_and_test(&pending_boot_probes))
|
||||
complete(&boot_probes_done);
|
||||
}
|
||||
|
||||
static int complete_hwprobe_vdso_data(void)
|
||||
{
|
||||
struct vdso_arch_data *avd = vdso_k_arch_data;
|
||||
u64 id_bitsmash = 0;
|
||||
struct riscv_hwprobe pair;
|
||||
int key;
|
||||
|
||||
if (unlikely(!atomic_dec_and_test(&pending_boot_probes)))
|
||||
wait_for_completion(&boot_probes_done);
|
||||
|
||||
/*
|
||||
* Initialize vDSO data with the answers for the "all CPUs" case, to
|
||||
* save a syscall in the common case.
|
||||
@@ -503,13 +516,52 @@ static int __init init_hwprobe_vdso_data(void)
|
||||
* vDSO should defer to the kernel for exotic cpu masks.
|
||||
*/
|
||||
avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
|
||||
|
||||
/*
|
||||
* Make sure all the VDSO values are visible before we look at them.
|
||||
* This pairs with the implicit "no speculativly visible accesses"
|
||||
* barrier in the VDSO hwprobe code.
|
||||
*/
|
||||
smp_wmb();
|
||||
avd->ready = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init init_hwprobe_vdso_data(void)
|
||||
{
|
||||
struct vdso_arch_data *avd = vdso_k_arch_data;
|
||||
|
||||
/*
|
||||
* Prevent the vDSO cached values from being used, as they're not ready
|
||||
* yet.
|
||||
*/
|
||||
avd->ready = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
arch_initcall_sync(init_hwprobe_vdso_data);
|
||||
|
||||
#else
|
||||
|
||||
static int complete_hwprobe_vdso_data(void) { return 0; }
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
|
||||
size_t pair_count, size_t cpusetsize,
|
||||
unsigned long __user *cpus_user,
|
||||
unsigned int flags)
|
||||
{
|
||||
DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data);
|
||||
|
||||
if (flags & RISCV_HWPROBE_WHICH_CPUS)
|
||||
return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
|
||||
cpus_user, flags);
|
||||
|
||||
return hwprobe_get_values(pairs, pair_count, cpusetsize,
|
||||
cpus_user, flags);
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
|
||||
size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
|
||||
cpus, unsigned int, flags)
|
||||
|
||||
@@ -379,6 +379,7 @@ static void check_vector_unaligned_access(struct work_struct *work __always_unus
|
||||
static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
|
||||
{
|
||||
schedule_on_each_cpu(check_vector_unaligned_access);
|
||||
riscv_hwprobe_complete_async_probe();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -473,8 +474,12 @@ static int __init check_unaligned_access_all_cpus(void)
|
||||
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
|
||||
} else if (!check_vector_unaligned_access_emulated_all_cpus() &&
|
||||
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
|
||||
kthread_run(vec_check_unaligned_access_speed_all_cpus,
|
||||
NULL, "vec_check_unaligned_access_speed_all_cpus");
|
||||
riscv_hwprobe_register_async_probe();
|
||||
if (IS_ERR(kthread_run(vec_check_unaligned_access_speed_all_cpus,
|
||||
NULL, "vec_check_unaligned_access_speed_all_cpus"))) {
|
||||
pr_warn("Failed to create vec_unalign_check kthread\n");
|
||||
riscv_hwprobe_complete_async_probe();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -27,7 +27,7 @@ static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count,
|
||||
* homogeneous, then this function can handle requests for arbitrary
|
||||
* masks.
|
||||
*/
|
||||
if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus))
|
||||
if (flags != 0 || (!all_cpus && !avd->homogeneous_cpus) || unlikely(!avd->ready))
|
||||
return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
|
||||
|
||||
/* This is something we can handle, fill out the pairs. */
|
||||
|
||||
@@ -61,30 +61,6 @@ static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
|
||||
*
|
||||
* @node: RIMT table node to be looked-up
|
||||
*
|
||||
* Returns: fwnode_handle pointer on success, NULL on failure
|
||||
*/
|
||||
static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
|
||||
{
|
||||
struct fwnode_handle *fwnode = NULL;
|
||||
struct rimt_fwnode *curr;
|
||||
|
||||
spin_lock(&rimt_fwnode_lock);
|
||||
list_for_each_entry(curr, &rimt_fwnode_list, list) {
|
||||
if (curr->rimt_node == node) {
|
||||
fwnode = curr->fwnode;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&rimt_fwnode_lock);
|
||||
|
||||
return fwnode;
|
||||
}
|
||||
|
||||
static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
|
||||
void *context)
|
||||
{
|
||||
@@ -202,6 +178,67 @@ static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* RISC-V supports IOMMU as a PCI device or a platform device.
|
||||
* When it is a platform device, there should be a namespace device as
|
||||
* well along with RIMT. To create the link between RIMT information and
|
||||
* the platform device, the IOMMU driver should register itself with the
|
||||
* RIMT module. This is true for PCI based IOMMU as well.
|
||||
*/
|
||||
int rimt_iommu_register(struct device *dev)
|
||||
{
|
||||
struct fwnode_handle *rimt_fwnode;
|
||||
struct acpi_rimt_node *node;
|
||||
|
||||
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
|
||||
if (!node) {
|
||||
pr_err("Could not find IOMMU node in RIMT\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (dev_is_pci(dev)) {
|
||||
rimt_fwnode = acpi_alloc_fwnode_static();
|
||||
if (!rimt_fwnode)
|
||||
return -ENOMEM;
|
||||
|
||||
rimt_fwnode->dev = dev;
|
||||
if (!dev->fwnode)
|
||||
dev->fwnode = rimt_fwnode;
|
||||
|
||||
rimt_set_fwnode(node, rimt_fwnode);
|
||||
} else {
|
||||
rimt_set_fwnode(node, dev->fwnode);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
|
||||
/**
|
||||
* rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
|
||||
*
|
||||
* @node: RIMT table node to be looked-up
|
||||
*
|
||||
* Returns: fwnode_handle pointer on success, NULL on failure
|
||||
*/
|
||||
static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
|
||||
{
|
||||
struct fwnode_handle *fwnode = NULL;
|
||||
struct rimt_fwnode *curr;
|
||||
|
||||
spin_lock(&rimt_fwnode_lock);
|
||||
list_for_each_entry(curr, &rimt_fwnode_list, list) {
|
||||
if (curr->rimt_node == node) {
|
||||
fwnode = curr->fwnode;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&rimt_fwnode_lock);
|
||||
|
||||
return fwnode;
|
||||
}
|
||||
|
||||
static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
|
||||
{
|
||||
struct acpi_rimt_pcie_rc *pci_rc;
|
||||
@@ -290,43 +327,6 @@ static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* RISC-V supports IOMMU as a PCI device or a platform device.
|
||||
* When it is a platform device, there should be a namespace device as
|
||||
* well along with RIMT. To create the link between RIMT information and
|
||||
* the platform device, the IOMMU driver should register itself with the
|
||||
* RIMT module. This is true for PCI based IOMMU as well.
|
||||
*/
|
||||
int rimt_iommu_register(struct device *dev)
|
||||
{
|
||||
struct fwnode_handle *rimt_fwnode;
|
||||
struct acpi_rimt_node *node;
|
||||
|
||||
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
|
||||
if (!node) {
|
||||
pr_err("Could not find IOMMU node in RIMT\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (dev_is_pci(dev)) {
|
||||
rimt_fwnode = acpi_alloc_fwnode_static();
|
||||
if (!rimt_fwnode)
|
||||
return -ENOMEM;
|
||||
|
||||
rimt_fwnode->dev = dev;
|
||||
if (!dev->fwnode)
|
||||
dev->fwnode = rimt_fwnode;
|
||||
|
||||
rimt_set_fwnode(node, rimt_fwnode);
|
||||
} else {
|
||||
rimt_set_fwnode(node, dev->fwnode);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
|
||||
static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
|
||||
u32 id_in, u32 *id_out,
|
||||
u8 type_mask)
|
||||
|
||||
Reference in New Issue
Block a user