Files
linux/arch/csky/abiv2/cacheflush.c
Thomas Weißschuh 9aa12167ef csky: abiv2: adapt to new folio flags field
Recent changes require the raw folio flags to be accessed via ".f".  The
merge commit introducing this change adapted most architecture code but
forgot the csky abiv2.

[rppt@kernel.org: add fix for arch/csky/abiv2/cacheflush.c]
Link: https://lkml.kernel.org/r/aPCE238oxAB9QcZa@kernel.org
Fixes: 53fbef56e0 ("mm: introduce memdesc_flags_t")
Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Guo Ren <guoren@kernel.org>
Acked-by: Zi Yan <ziy@nvidia.com>
Cc: Guo Ren <guoren@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-10-21 15:46:18 -07:00

93 lines
2.2 KiB
C

// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/cache.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <asm/cache.h>
#include <asm/tlbflush.h>
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
unsigned long address, pte_t *pte, unsigned int nr)
{
unsigned long pfn = pte_pfn(*pte);
struct folio *folio;
unsigned int i;
flush_tlb_page(vma, address);
if (!pfn_valid(pfn))
return;
folio = page_folio(pfn_to_page(pfn));
if (test_and_set_bit(PG_dcache_clean, &folio->flags.f))
return;
icache_inv_range(address, address + nr*PAGE_SIZE);
for (i = 0; i < folio_nr_pages(folio); i++) {
unsigned long addr = (unsigned long) kmap_local_folio(folio,
i * PAGE_SIZE);
dcache_wb_range(addr, addr + PAGE_SIZE);
if (vma->vm_flags & VM_EXEC)
icache_inv_range(addr, addr + PAGE_SIZE);
kunmap_local((void *) addr);
}
}
void flush_icache_deferred(struct mm_struct *mm)
{
unsigned int cpu = smp_processor_id();
cpumask_t *mask = &mm->context.icache_stale_mask;
if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
/*
* Ensure the remote hart's writes are visible to this hart.
* This pairs with a barrier in flush_icache_mm.
*/
smp_mb();
local_icache_inv_all(NULL);
}
}
void flush_icache_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
unsigned int cpu;
cpumask_t others, *mask;
preempt_disable();
#ifdef CONFIG_CPU_HAS_ICACHE_INS
if (mm == current->mm) {
icache_inv_range(start, end);
preempt_enable();
return;
}
#endif
/* Mark every hart's icache as needing a flush for this MM. */
mask = &mm->context.icache_stale_mask;
cpumask_setall(mask);
/* Flush this hart's I$ now, and mark it as flushed. */
cpu = smp_processor_id();
cpumask_clear_cpu(cpu, mask);
local_icache_inv_all(NULL);
/*
* Flush the I$ of other harts concurrently executing, and mark them as
* flushed.
*/
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
if (mm != current->active_mm || !cpumask_empty(&others)) {
on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
cpumask_clear(mask);
}
preempt_enable();
}