mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-16 21:15:10 -05:00
Merge tag 'perf-core-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf events updates from Ingo Molnar:
"Core & generic-arch updates:
- Add support for dynamic constraints and propagate it to the Intel
driver (Kan Liang)
- Fix & enhance driver-specific throttling support (Kan Liang)
- Record sample last_period before updating on the x86 and PowerPC
platforms (Mark Barnett)
- Make perf_pmu_unregister() usable (Peter Zijlstra)
- Unify perf_event_free_task() / perf_event_exit_task_context()
(Peter Zijlstra)
- Simplify perf_event_release_kernel() and perf_event_free_task()
(Peter Zijlstra)
- Allocate non-contiguous AUX pages by default (Yabin Cui)
Uprobes updates:
- Add support to emulate NOP instructions (Jiri Olsa)
- selftests/bpf: Add 5-byte NOP uprobe trigger benchmark (Jiri Olsa)
x86 Intel PMU enhancements:
- Support Intel Auto Counter Reload [ACR] (Kan Liang)
- Add PMU support for Clearwater Forest (Dapeng Mi)
- Arch-PEBS preparatory changes: (Dapeng Mi)
- Parse CPUID archPerfmonExt leaves for non-hybrid CPUs
- Decouple BTS initialization from PEBS initialization
- Introduce pairs of PEBS static calls
x86 AMD PMU enhancements:
- Use hrtimer for handling overflows in the AMD uncore driver
(Sandipan Das)
- Prevent UMC counters from saturating (Sandipan Das)
Fixes and cleanups:
- Fix put_ctx() ordering (Frederic Weisbecker)
- Fix irq work dereferencing garbage (Frederic Weisbecker)
- Misc fixes and cleanups (Changbin Du, Frederic Weisbecker, Ian
Rogers, Ingo Molnar, Kan Liang, Peter Zijlstra, Qing Wang, Sandipan
Das, Thorsten Blum)"
* tag 'perf-core-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (60 commits)
perf/headers: Clean up <linux/perf_event.h> a bit
perf/uapi: Clean up <uapi/linux/perf_event.h> a bit
perf/uapi: Fix PERF_RECORD_SAMPLE comments in <uapi/linux/perf_event.h>
mips/perf: Remove driver-specific throttle support
xtensa/perf: Remove driver-specific throttle support
sparc/perf: Remove driver-specific throttle support
loongarch/perf: Remove driver-specific throttle support
csky/perf: Remove driver-specific throttle support
arc/perf: Remove driver-specific throttle support
alpha/perf: Remove driver-specific throttle support
perf/apple_m1: Remove driver-specific throttle support
perf/arm: Remove driver-specific throttle support
s390/perf: Remove driver-specific throttle support
powerpc/perf: Remove driver-specific throttle support
perf/x86/zhaoxin: Remove driver-specific throttle support
perf/x86/amd: Remove driver-specific throttle support
perf/x86/intel: Remove driver-specific throttle support
perf: Only dump the throttle log for the leader
perf: Fix the throttle logic for a group
perf/core: Add the is_event_in_freq_mode() helper to simplify the code
...
This commit is contained in:
@@ -2069,11 +2069,6 @@ static struct cpuhp_step cpuhp_hp_states[] = {
|
||||
.teardown.single = NULL,
|
||||
.cant_stop = true,
|
||||
},
|
||||
[CPUHP_PERF_PREPARE] = {
|
||||
.name = "perf:prepare",
|
||||
.startup.single = perf_event_init_cpu,
|
||||
.teardown.single = perf_event_exit_cpu,
|
||||
},
|
||||
[CPUHP_RANDOM_PREPARE] = {
|
||||
.name = "random:prepare",
|
||||
.startup.single = random_prepare_cpu,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -679,7 +679,15 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
|
||||
{
|
||||
bool overwrite = !(flags & RING_BUFFER_WRITABLE);
|
||||
int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
|
||||
int ret = -ENOMEM, max_order;
|
||||
bool use_contiguous_pages = event->pmu->capabilities & (
|
||||
PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_PREFER_LARGE);
|
||||
/*
|
||||
* Initialize max_order to 0 for page allocation. This allocates single
|
||||
* pages to minimize memory fragmentation. This is overridden if the
|
||||
* PMU needs or prefers contiguous pages (use_contiguous_pages = true).
|
||||
*/
|
||||
int max_order = 0;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (!has_aux(event))
|
||||
return -EOPNOTSUPP;
|
||||
@@ -689,8 +697,8 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
|
||||
|
||||
if (!overwrite) {
|
||||
/*
|
||||
* Watermark defaults to half the buffer, and so does the
|
||||
* max_order, to aid PMU drivers in double buffering.
|
||||
* Watermark defaults to half the buffer, to aid PMU drivers
|
||||
* in double buffering.
|
||||
*/
|
||||
if (!watermark)
|
||||
watermark = min_t(unsigned long,
|
||||
@@ -698,16 +706,19 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
|
||||
(unsigned long)nr_pages << (PAGE_SHIFT - 1));
|
||||
|
||||
/*
|
||||
* Use aux_watermark as the basis for chunking to
|
||||
* help PMU drivers honor the watermark.
|
||||
* If using contiguous pages, use aux_watermark as the basis
|
||||
* for chunking to help PMU drivers honor the watermark.
|
||||
*/
|
||||
max_order = get_order(watermark);
|
||||
if (use_contiguous_pages)
|
||||
max_order = get_order(watermark);
|
||||
} else {
|
||||
/*
|
||||
* We need to start with the max_order that fits in nr_pages,
|
||||
* not the other way around, hence ilog2() and not get_order.
|
||||
* If using contiguous pages, we need to start with the
|
||||
* max_order that fits in nr_pages, not the other way around,
|
||||
* hence ilog2() and not get_order.
|
||||
*/
|
||||
max_order = ilog2(nr_pages);
|
||||
if (use_contiguous_pages)
|
||||
max_order = ilog2(nr_pages);
|
||||
watermark = 0;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user