Merge tag 'perf-urgent-2025-05-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull misc perf fixes from Ingo Molnar:

 - Require group events for branch counter groups and
   PEBS counter snapshotting groups to be x86 events.

 - Fix the handling of counter-snapshotting of non-precise
   events, where counter values may move backwards a bit,
   temporarily, confusing the code.

 - Restrict perf/KVM PEBS to guest-owned events.

* tag 'perf-urgent-2025-05-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86/intel: KVM: Mask PEBS_ENABLE loaded for guest with vCPU's value.
  perf/x86/intel/ds: Fix counter backwards of non-precise events counters-snapshotting
  perf/x86/intel: Check the X86 leader for pebs_counter_event_group
  perf/x86/intel: Only check the group flag for X86 leader
This commit is contained in:
Linus Torvalds
2025-05-04 08:06:42 -07:00
4 changed files with 30 additions and 6 deletions

View File

@@ -754,7 +754,7 @@ void x86_pmu_enable_all(int added)
}
}
static inline int is_x86_event(struct perf_event *event)
int is_x86_event(struct perf_event *event)
{
int i;

View File

@@ -4395,7 +4395,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
arr[pebs_enable] = (struct perf_guest_switch_msr){
.msr = MSR_IA32_PEBS_ENABLE,
.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
};
if (arr[pebs_enable].host) {

View File

@@ -2379,8 +2379,25 @@ __intel_pmu_pebs_last_event(struct perf_event *event,
*/
intel_pmu_save_and_restart_reload(event, count);
}
} else
intel_pmu_save_and_restart(event);
} else {
/*
* For a non-precise event, it's possible the
* counters-snapshotting records a positive value for the
* overflowed event. Then the HW auto-reload mechanism
* reset the counter to 0 immediately, because the
* pebs_event_reset is cleared if the PERF_X86_EVENT_AUTO_RELOAD
* is not set. The counter backwards may be observed in a
* PMI handler.
*
* Since the event value has been updated when processing the
* counters-snapshotting record, only needs to set the new
* period for the counter.
*/
if (is_pebs_counter_event_group(event))
static_call(x86_pmu_set_period)(event);
else
intel_pmu_save_and_restart(event);
}
}
static __always_inline void

View File

@@ -110,14 +110,21 @@ static inline bool is_topdown_event(struct perf_event *event)
return is_metric_event(event) || is_slots_event(event);
}
int is_x86_event(struct perf_event *event);
static inline bool check_leader_group(struct perf_event *leader, int flags)
{
return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false;
}
static inline bool is_branch_counters_group(struct perf_event *event)
{
return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS;
return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS);
}
static inline bool is_pebs_counter_event_group(struct perf_event *event)
{
return event->group_leader->hw.flags & PERF_X86_EVENT_PEBS_CNTR;
return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR);
}
struct amd_nb {