mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 16:28:56 -05:00
x86/mtrr: Move cache control code to cacheinfo.c
Prepare making PAT and MTRR support independent from each other by moving some code needed by both out of the MTRR-specific sources. [ bp: Massage commit message. ] Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lore.kernel.org/r/20221102074713.21493-7-jgross@suse.com Signed-off-by: Borislav Petkov <bp@suse.de>
This commit is contained in:
committed by
Borislav Petkov
parent
4ad7149e46
commit
23a63e3690
@@ -20,6 +20,8 @@
|
||||
#include <asm/cacheinfo.h>
|
||||
#include <asm/amd_nb.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/mtrr.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
@@ -1043,3 +1045,78 @@ int populate_cache_leaves(unsigned int cpu)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
|
||||
*
|
||||
* Since we are disabling the cache don't allow any interrupts,
|
||||
* they would run extremely slow and would only increase the pain.
|
||||
*
|
||||
* The caller must ensure that local interrupts are disabled and
|
||||
* are reenabled after cache_enable() has been called.
|
||||
*/
|
||||
static unsigned long saved_cr4;
|
||||
static DEFINE_RAW_SPINLOCK(cache_disable_lock);
|
||||
|
||||
void cache_disable(void) __acquires(cache_disable_lock)
|
||||
{
|
||||
unsigned long cr0;
|
||||
|
||||
/*
|
||||
* Note that this is not ideal
|
||||
* since the cache is only flushed/disabled for this CPU while the
|
||||
* MTRRs are changed, but changing this requires more invasive
|
||||
* changes to the way the kernel boots
|
||||
*/
|
||||
|
||||
raw_spin_lock(&cache_disable_lock);
|
||||
|
||||
/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
|
||||
cr0 = read_cr0() | X86_CR0_CD;
|
||||
write_cr0(cr0);
|
||||
|
||||
/*
|
||||
* Cache flushing is the most time-consuming step when programming
|
||||
* the MTRRs. Fortunately, as per the Intel Software Development
|
||||
* Manual, we can skip it if the processor supports cache self-
|
||||
* snooping.
|
||||
*/
|
||||
if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
|
||||
wbinvd();
|
||||
|
||||
/* Save value of CR4 and clear Page Global Enable (bit 7) */
|
||||
if (cpu_feature_enabled(X86_FEATURE_PGE)) {
|
||||
saved_cr4 = __read_cr4();
|
||||
__write_cr4(saved_cr4 & ~X86_CR4_PGE);
|
||||
}
|
||||
|
||||
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
|
||||
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
||||
flush_tlb_local();
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_MTRR))
|
||||
mtrr_disable();
|
||||
|
||||
/* Again, only flush caches if we have to. */
|
||||
if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
|
||||
wbinvd();
|
||||
}
|
||||
|
||||
void cache_enable(void) __releases(cache_disable_lock)
|
||||
{
|
||||
/* Flush TLBs (no need to flush caches - they are disabled) */
|
||||
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
||||
flush_tlb_local();
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_MTRR))
|
||||
mtrr_enable();
|
||||
|
||||
/* Enable caches */
|
||||
write_cr0(read_cr0() & ~X86_CR0_CD);
|
||||
|
||||
/* Restore value of CR4 */
|
||||
if (cpu_feature_enabled(X86_FEATURE_PGE))
|
||||
__write_cr4(saved_cr4);
|
||||
|
||||
raw_spin_unlock(&cache_disable_lock);
|
||||
}
|
||||
|
||||
@@ -731,80 +731,6 @@ void mtrr_enable(void)
|
||||
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
|
||||
*
|
||||
* Since we are disabling the cache don't allow any interrupts,
|
||||
* they would run extremely slow and would only increase the pain.
|
||||
*
|
||||
* The caller must ensure that local interrupts are disabled and
|
||||
* are reenabled after cache_enable() has been called.
|
||||
*/
|
||||
static unsigned long saved_cr4;
|
||||
static DEFINE_RAW_SPINLOCK(cache_disable_lock);
|
||||
|
||||
void cache_disable(void) __acquires(cache_disable_lock)
|
||||
{
|
||||
unsigned long cr0;
|
||||
|
||||
/*
|
||||
* Note that this is not ideal
|
||||
* since the cache is only flushed/disabled for this CPU while the
|
||||
* MTRRs are changed, but changing this requires more invasive
|
||||
* changes to the way the kernel boots
|
||||
*/
|
||||
|
||||
raw_spin_lock(&cache_disable_lock);
|
||||
|
||||
/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
|
||||
cr0 = read_cr0() | X86_CR0_CD;
|
||||
write_cr0(cr0);
|
||||
|
||||
/*
|
||||
* Cache flushing is the most time-consuming step when programming
|
||||
* the MTRRs. Fortunately, as per the Intel Software Development
|
||||
* Manual, we can skip it if the processor supports cache self-
|
||||
* snooping.
|
||||
*/
|
||||
if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
|
||||
wbinvd();
|
||||
|
||||
/* Save value of CR4 and clear Page Global Enable (bit 7) */
|
||||
if (boot_cpu_has(X86_FEATURE_PGE)) {
|
||||
saved_cr4 = __read_cr4();
|
||||
__write_cr4(saved_cr4 & ~X86_CR4_PGE);
|
||||
}
|
||||
|
||||
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
|
||||
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
||||
flush_tlb_local();
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_MTRR))
|
||||
mtrr_disable();
|
||||
|
||||
/* Again, only flush caches if we have to. */
|
||||
if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
|
||||
wbinvd();
|
||||
}
|
||||
|
||||
void cache_enable(void) __releases(cache_disable_lock)
|
||||
{
|
||||
/* Flush TLBs (no need to flush caches - they are disabled) */
|
||||
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
||||
flush_tlb_local();
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_MTRR))
|
||||
mtrr_enable();
|
||||
|
||||
/* Enable caches */
|
||||
write_cr0(read_cr0() & ~X86_CR0_CD);
|
||||
|
||||
/* Restore value of CR4 */
|
||||
if (boot_cpu_has(X86_FEATURE_PGE))
|
||||
__write_cr4(saved_cr4);
|
||||
raw_spin_unlock(&cache_disable_lock);
|
||||
}
|
||||
|
||||
static void generic_set_all(void)
|
||||
{
|
||||
unsigned long mask, count;
|
||||
|
||||
Reference in New Issue
Block a user