x86/mm: Change cpa_flush() to call flush_kernel_range() directly

The function cpa_flush() calls __flush_tlb_one_kernel() and
flush_tlb_all().

Replacing that with a call to flush_tlb_kernel_range() allows
cpa_flush() to make use of INVLPGB or RAR without any additional
changes.

Initialize invlpgb_count_max to 1, since flush_tlb_kernel_range()
can now be called before invlpgb_count_max has been initialized
to the value read from CPUID.

[riel: remove now unused __cpa_flush_tlb]

Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
Signed-off-by: Rik van Riel <riel@surriel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20250606171112.4013261-4-riel%40surriel.com
This commit is contained in:
Yu-cheng Yu
2025-06-06 13:10:35 -04:00
committed by Dave Hansen
parent c17b750b3a
commit 86e6815b31

View File

@@ -399,15 +399,6 @@ static void cpa_flush_all(unsigned long cache)
on_each_cpu(__cpa_flush_all, (void *) cache, 1);
}
static void __cpa_flush_tlb(void *data)
{
struct cpa_data *cpa = data;
unsigned int i;
for (i = 0; i < cpa->numpages; i++)
flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
}
static int collapse_large_pages(unsigned long addr, struct list_head *pgtables);
static void cpa_collapse_large_pages(struct cpa_data *cpa)
@@ -444,6 +435,7 @@ static void cpa_collapse_large_pages(struct cpa_data *cpa)
static void cpa_flush(struct cpa_data *cpa, int cache)
{
unsigned long start, end;
unsigned int i;
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
@@ -453,10 +445,12 @@ static void cpa_flush(struct cpa_data *cpa, int cache)
goto collapse_large_pages;
}
if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling)
flush_tlb_all();
else
on_each_cpu(__cpa_flush_tlb, cpa, 1);
start = fix_addr(__cpa_addr(cpa, 0));
end = fix_addr(__cpa_addr(cpa, cpa->numpages));
if (cpa->force_flush_all)
end = TLB_FLUSH_ALL;
flush_tlb_kernel_range(start, end);
if (!cache)
goto collapse_large_pages;