mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-08 03:02:31 -05:00
Merge branch 'for-next/misc' into for-next/core
* for-next/misc: arm64/mm: use GENMASK_ULL for TTBR_BADDR_MASK_52 arm64: numa: Don't check node against MAX_NUMNODES arm64: mm: Remove assembly DMA cache maintenance wrappers arm64/mm: Define defer_reserve_crashkernel() arm64: fix oops in concurrently setting insn_emulation sysctls arm64: Do not forget syscall when starting a new thread. arm64: boot: add zstd support
This commit is contained in:
@@ -16,7 +16,7 @@
|
||||
|
||||
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
|
||||
|
||||
targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo
|
||||
targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo Image.zst
|
||||
|
||||
$(obj)/Image: vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
@@ -35,3 +35,6 @@ $(obj)/Image.lzma: $(obj)/Image FORCE
|
||||
|
||||
$(obj)/Image.lzo: $(obj)/Image FORCE
|
||||
$(call if_changed,lzo)
|
||||
|
||||
$(obj)/Image.zst: $(obj)/Image FORCE
|
||||
$(call if_changed,zstd)
|
||||
|
||||
@@ -104,13 +104,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
|
||||
}
|
||||
#define flush_icache_range flush_icache_range
|
||||
|
||||
/*
|
||||
* Cache maintenance functions used by the DMA API. No to be used directly.
|
||||
*/
|
||||
extern void __dma_map_area(const void *, size_t, int);
|
||||
extern void __dma_unmap_area(const void *, size_t, int);
|
||||
extern void __dma_flush_area(const void *, size_t);
|
||||
|
||||
/*
|
||||
* Copy user data from/to a page which is mapped into a different
|
||||
* processes address space. Really, we want to allow our "user
|
||||
|
||||
@@ -351,6 +351,11 @@ static inline void *phys_to_virt(phys_addr_t x)
|
||||
})
|
||||
|
||||
void dump_mem_limit(void);
|
||||
|
||||
static inline bool defer_reserve_crashkernel(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32);
|
||||
}
|
||||
#endif /* !ASSEMBLY */
|
||||
|
||||
/*
|
||||
|
||||
@@ -281,10 +281,9 @@
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
/*
|
||||
* This should be GENMASK_ULL(47, 2).
|
||||
* TTBR_ELx[1] is RES0 in this configuration.
|
||||
*/
|
||||
#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2)
|
||||
#define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_VA_BITS_52
|
||||
|
||||
@@ -272,8 +272,9 @@ void tls_preserve_current_state(void);
|
||||
|
||||
static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
|
||||
{
|
||||
s32 previous_syscall = regs->syscallno;
|
||||
memset(regs, 0, sizeof(*regs));
|
||||
forget_syscall(regs);
|
||||
regs->syscallno = previous_syscall;
|
||||
regs->pc = pc;
|
||||
|
||||
if (system_uses_irq_prio_masking())
|
||||
|
||||
@@ -109,7 +109,7 @@ void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
|
||||
pxm = pa->proximity_domain;
|
||||
node = acpi_map_pxm_to_node(pxm);
|
||||
|
||||
if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
|
||||
if (node == NUMA_NO_NODE) {
|
||||
pr_err("SRAT: Too many proximity domains %d\n", pxm);
|
||||
bad_srat();
|
||||
return;
|
||||
|
||||
@@ -59,6 +59,7 @@ struct insn_emulation {
|
||||
static LIST_HEAD(insn_emulation);
|
||||
static int nr_insn_emulated __initdata;
|
||||
static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
|
||||
static DEFINE_MUTEX(insn_emulation_mutex);
|
||||
|
||||
static void register_emulation_hooks(struct insn_emulation_ops *ops)
|
||||
{
|
||||
@@ -207,10 +208,10 @@ static int emulation_proc_handler(struct ctl_table *table, int write,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret = 0;
|
||||
struct insn_emulation *insn = (struct insn_emulation *) table->data;
|
||||
struct insn_emulation *insn = container_of(table->data, struct insn_emulation, current_mode);
|
||||
enum insn_emulation_mode prev_mode = insn->current_mode;
|
||||
|
||||
table->data = &insn->current_mode;
|
||||
mutex_lock(&insn_emulation_mutex);
|
||||
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
|
||||
if (ret || !write || prev_mode == insn->current_mode)
|
||||
@@ -223,7 +224,7 @@ static int emulation_proc_handler(struct ctl_table *table, int write,
|
||||
update_insn_emulation_mode(insn, INSN_UNDEF);
|
||||
}
|
||||
ret:
|
||||
table->data = insn;
|
||||
mutex_unlock(&insn_emulation_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -247,7 +248,7 @@ static void __init register_insn_emulation_sysctl(void)
|
||||
sysctl->maxlen = sizeof(int);
|
||||
|
||||
sysctl->procname = insn->ops->name;
|
||||
sysctl->data = insn;
|
||||
sysctl->data = &insn->current_mode;
|
||||
sysctl->extra1 = &insn->min;
|
||||
sysctl->extra2 = &insn->max;
|
||||
sysctl->proc_handler = emulation_proc_handler;
|
||||
|
||||
@@ -194,44 +194,3 @@ SYM_FUNC_START(__pi_dcache_clean_pop)
|
||||
ret
|
||||
SYM_FUNC_END(__pi_dcache_clean_pop)
|
||||
SYM_FUNC_ALIAS(dcache_clean_pop, __pi_dcache_clean_pop)
|
||||
|
||||
/*
|
||||
* __dma_flush_area(start, size)
|
||||
*
|
||||
* clean & invalidate D / U line
|
||||
*
|
||||
* - start - virtual start address of region
|
||||
* - size - size in question
|
||||
*/
|
||||
SYM_FUNC_START(__pi___dma_flush_area)
|
||||
add x1, x0, x1
|
||||
dcache_by_line_op civac, sy, x0, x1, x2, x3
|
||||
ret
|
||||
SYM_FUNC_END(__pi___dma_flush_area)
|
||||
SYM_FUNC_ALIAS(__dma_flush_area, __pi___dma_flush_area)
|
||||
|
||||
/*
|
||||
* __dma_map_area(start, size, dir)
|
||||
* - start - kernel virtual start address
|
||||
* - size - size of region
|
||||
* - dir - DMA direction
|
||||
*/
|
||||
SYM_FUNC_START(__pi___dma_map_area)
|
||||
add x1, x0, x1
|
||||
b __pi_dcache_clean_poc
|
||||
SYM_FUNC_END(__pi___dma_map_area)
|
||||
SYM_FUNC_ALIAS(__dma_map_area, __pi___dma_map_area)
|
||||
|
||||
/*
|
||||
* __dma_unmap_area(start, size, dir)
|
||||
* - start - kernel virtual start address
|
||||
* - size - size of region
|
||||
* - dir - DMA direction
|
||||
*/
|
||||
SYM_FUNC_START(__pi___dma_unmap_area)
|
||||
add x1, x0, x1
|
||||
cmp w2, #DMA_TO_DEVICE
|
||||
b.ne __pi_dcache_inval_poc
|
||||
ret
|
||||
SYM_FUNC_END(__pi___dma_unmap_area)
|
||||
SYM_FUNC_ALIAS(__dma_unmap_area, __pi___dma_unmap_area)
|
||||
|
||||
@@ -14,20 +14,29 @@
|
||||
#include <asm/xen/xen-ops.h>
|
||||
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__dma_map_area(phys_to_virt(paddr), size, dir);
|
||||
unsigned long start = (unsigned long)phys_to_virt(paddr);
|
||||
|
||||
dcache_clean_poc(start, start + size);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__dma_unmap_area(phys_to_virt(paddr), size, dir);
|
||||
unsigned long start = (unsigned long)phys_to_virt(paddr);
|
||||
|
||||
if (dir == DMA_TO_DEVICE)
|
||||
return;
|
||||
|
||||
dcache_inval_poc(start, start + size);
|
||||
}
|
||||
|
||||
void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
{
|
||||
__dma_flush_area(page_address(page), size);
|
||||
unsigned long start = (unsigned long)page_address(page);
|
||||
|
||||
dcache_clean_inval_poc(start, start + size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
|
||||
@@ -389,7 +389,7 @@ void __init arm64_memblock_init(void)
|
||||
|
||||
early_init_fdt_scan_reserved_mem();
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||
if (!defer_reserve_crashkernel())
|
||||
reserve_crashkernel();
|
||||
|
||||
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
|
||||
@@ -438,7 +438,7 @@ void __init bootmem_init(void)
|
||||
* request_standard_resources() depends on crashkernel's memory being
|
||||
* reserved, so do it here.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||
if (defer_reserve_crashkernel())
|
||||
reserve_crashkernel();
|
||||
|
||||
memblock_dump_all();
|
||||
|
||||
@@ -536,8 +536,7 @@ static void __init map_mem(pgd_t *pgdp)
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
if (crash_mem_map) {
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) ||
|
||||
IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||
if (defer_reserve_crashkernel())
|
||||
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
|
||||
else if (crashk_res.end)
|
||||
memblock_mark_nomap(crashk_res.start,
|
||||
@@ -578,8 +577,7 @@ static void __init map_mem(pgd_t *pgdp)
|
||||
* through /sys/kernel/kexec_crash_size interface.
|
||||
*/
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
if (crash_mem_map &&
|
||||
!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) {
|
||||
if (crash_mem_map && !defer_reserve_crashkernel()) {
|
||||
if (crashk_res.end) {
|
||||
__map_memblock(pgdp, crashk_res.start,
|
||||
crashk_res.end + 1,
|
||||
|
||||
Reference in New Issue
Block a user