mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-04 10:56:06 -04:00
perf/x86/intel: Remove driver-specific throttle support
The throttle support has been added in the generic code. Remove the driver-specific throttle support. Besides the throttle, perf_event_overflow may return true because of event_limit. It already does an inatomic event disable. The pmu->stop is not required either. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20250520181644.2673067-4-kan.liang@linux.intel.com
This commit is contained in:
committed by
Peter Zijlstra
parent
e800ac5120
commit
b8328f6720
@@ -1728,8 +1728,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||
|
||||
perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL);
|
||||
|
||||
if (perf_event_overflow(event, &data, regs))
|
||||
x86_pmu_stop(event, 0);
|
||||
perf_event_overflow(event, &data, regs);
|
||||
}
|
||||
|
||||
if (handled)
|
||||
|
||||
@@ -3138,8 +3138,7 @@ static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
|
||||
continue;
|
||||
|
||||
perf_sample_data_init(data, 0, event->hw.last_period);
|
||||
if (perf_event_overflow(event, data, regs))
|
||||
x86_pmu_stop(event, 0);
|
||||
perf_event_overflow(event, data, regs);
|
||||
|
||||
/* Inject one fake event is enough. */
|
||||
break;
|
||||
@@ -3282,8 +3281,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
||||
if (has_branch_stack(event))
|
||||
intel_pmu_lbr_save_brstack(&data, cpuc, event);
|
||||
|
||||
if (perf_event_overflow(event, &data, regs))
|
||||
x86_pmu_stop(event, 0);
|
||||
perf_event_overflow(event, &data, regs);
|
||||
}
|
||||
|
||||
return handled;
|
||||
|
||||
@@ -2359,8 +2359,7 @@ __intel_pmu_pebs_last_event(struct perf_event *event,
|
||||
* All but the last records are processed.
|
||||
* The last one is left to be able to call the overflow handler.
|
||||
*/
|
||||
if (perf_event_overflow(event, data, regs))
|
||||
x86_pmu_stop(event, 0);
|
||||
perf_event_overflow(event, data, regs);
|
||||
}
|
||||
|
||||
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
|
||||
@@ -2588,8 +2587,8 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
|
||||
if (error[bit]) {
|
||||
perf_log_lost_samples(event, error[bit]);
|
||||
|
||||
if (iregs && perf_event_account_interrupt(event))
|
||||
x86_pmu_stop(event, 0);
|
||||
if (iregs)
|
||||
perf_event_account_interrupt(event);
|
||||
}
|
||||
|
||||
if (counts[bit]) {
|
||||
|
||||
@@ -254,8 +254,7 @@ static int knc_pmu_handle_irq(struct pt_regs *regs)
|
||||
|
||||
perf_sample_data_init(&data, 0, last_period);
|
||||
|
||||
if (perf_event_overflow(event, &data, regs))
|
||||
x86_pmu_stop(event, 0);
|
||||
perf_event_overflow(event, &data, regs);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -1072,8 +1072,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
|
||||
continue;
|
||||
|
||||
|
||||
if (perf_event_overflow(event, &data, regs))
|
||||
x86_pmu_stop(event, 0);
|
||||
perf_event_overflow(event, &data, regs);
|
||||
}
|
||||
|
||||
if (handled)
|
||||
|
||||
Reference in New Issue
Block a user