mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-04 22:54:43 -04:00
x86/msr: Convert the rdpmc() macro to an __always_inline function
Functions offer type safety and better readability compared to macros. Additionally, always inline functions can match the performance of macros. Converting the rdpmc() macro into an always inline function is simple and straightforward, so just make the change. Moreover, the read result is now the returned value, further enhancing readability. Signed-off-by: Xin Li (Intel) <xin@zytor.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Brian Gerst <brgerst@gmail.com> Cc: Juergen Gross <jgross@suse.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Kees Cook <keescook@chromium.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Uros Bizjak <ubizjak@gmail.com> Link: https://lore.kernel.org/r/20250427092027.1598740-6-xin@zytor.com
This commit is contained in:
committed by
Ingo Molnar
parent
7d9ccde56b
commit
795ada5287
@@ -108,7 +108,7 @@ static void amd_uncore_read(struct perf_event *event)
|
||||
if (hwc->event_base_rdpmc < 0)
|
||||
rdmsrq(hwc->event_base, new);
|
||||
else
|
||||
rdpmc(hwc->event_base_rdpmc, new);
|
||||
new = rdpmc(hwc->event_base_rdpmc);
|
||||
|
||||
local64_set(&hwc->prev_count, new);
|
||||
delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
|
||||
|
||||
@@ -135,7 +135,7 @@ u64 x86_perf_event_update(struct perf_event *event)
|
||||
*/
|
||||
prev_raw_count = local64_read(&hwc->prev_count);
|
||||
do {
|
||||
rdpmc(hwc->event_base_rdpmc, new_raw_count);
|
||||
new_raw_count = rdpmc(hwc->event_base_rdpmc);
|
||||
} while (!local64_try_cmpxchg(&hwc->prev_count,
|
||||
&prev_raw_count, new_raw_count));
|
||||
|
||||
|
||||
@@ -2725,12 +2725,12 @@ static u64 intel_update_topdown_event(struct perf_event *event, int metric_end,
|
||||
|
||||
if (!val) {
|
||||
/* read Fixed counter 3 */
|
||||
rdpmc((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
|
||||
slots = rdpmc(3 | INTEL_PMC_FIXED_RDPMC_BASE);
|
||||
if (!slots)
|
||||
return 0;
|
||||
|
||||
/* read PERF_METRICS */
|
||||
rdpmc(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
|
||||
metrics = rdpmc(INTEL_PMC_FIXED_RDPMC_METRICS);
|
||||
} else {
|
||||
slots = val[0];
|
||||
metrics = val[1];
|
||||
|
||||
@@ -2277,7 +2277,7 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
|
||||
WARN_ON(this_cpu_read(cpu_hw_events.enabled));
|
||||
|
||||
prev_raw_count = local64_read(&hwc->prev_count);
|
||||
rdpmc(hwc->event_base_rdpmc, new_raw_count);
|
||||
new_raw_count = rdpmc(hwc->event_base_rdpmc);
|
||||
local64_set(&hwc->prev_count, new_raw_count);
|
||||
|
||||
/*
|
||||
|
||||
@@ -217,7 +217,10 @@ static inline int rdmsrq_safe(u32 msr, u64 *p)
|
||||
return err;
|
||||
}
|
||||
|
||||
#define rdpmc(counter, val) ((val) = native_read_pmc(counter))
|
||||
static __always_inline u64 rdpmc(int counter)
|
||||
{
|
||||
return native_read_pmc(counter);
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_PARAVIRT_XXL */
|
||||
|
||||
|
||||
@@ -239,13 +239,11 @@ static inline int rdmsrq_safe(unsigned msr, u64 *p)
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline u64 paravirt_read_pmc(int counter)
|
||||
static __always_inline u64 rdpmc(int counter)
|
||||
{
|
||||
return PVOP_CALL1(u64, cpu.read_pmc, counter);
|
||||
}
|
||||
|
||||
#define rdpmc(counter, val) ((val) = paravirt_read_pmc(counter))
|
||||
|
||||
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
||||
{
|
||||
PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
|
||||
|
||||
@@ -1019,8 +1019,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
|
||||
* used in L1 cache, second to capture accurate value that does not
|
||||
* include cache misses incurred because of instruction loads.
|
||||
*/
|
||||
rdpmc(hit_pmcnum, hits_before);
|
||||
rdpmc(miss_pmcnum, miss_before);
|
||||
hits_before = rdpmc(hit_pmcnum);
|
||||
miss_before = rdpmc(miss_pmcnum);
|
||||
/*
|
||||
* From SDM: Performing back-to-back fast reads are not guaranteed
|
||||
* to be monotonic.
|
||||
@@ -1028,8 +1028,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
|
||||
* before proceeding.
|
||||
*/
|
||||
rmb();
|
||||
rdpmc(hit_pmcnum, hits_before);
|
||||
rdpmc(miss_pmcnum, miss_before);
|
||||
hits_before = rdpmc(hit_pmcnum);
|
||||
miss_before = rdpmc(miss_pmcnum);
|
||||
/*
|
||||
* Use LFENCE to ensure all previous instructions are retired
|
||||
* before proceeding.
|
||||
@@ -1051,8 +1051,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
|
||||
* before proceeding.
|
||||
*/
|
||||
rmb();
|
||||
rdpmc(hit_pmcnum, hits_after);
|
||||
rdpmc(miss_pmcnum, miss_after);
|
||||
hits_after = rdpmc(hit_pmcnum);
|
||||
miss_after = rdpmc(miss_pmcnum);
|
||||
/*
|
||||
* Use LFENCE to ensure all previous instructions are retired
|
||||
* before proceeding.
|
||||
|
||||
Reference in New Issue
Block a user