Merge back earlier cpufreq material for 7.1

This commit is contained in:
Rafael J. Wysocki
2026-04-04 14:58:58 +02:00
10 changed files with 72 additions and 73 deletions

View File

@@ -1944,7 +1944,7 @@ static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
}
/* Look up the max frequency in DMI */
static u64 cppc_get_dmi_max_khz(void)
u64 cppc_get_dmi_max_khz(void)
{
u16 mhz = 0;
@@ -1958,6 +1958,7 @@ static u64 cppc_get_dmi_max_khz(void)
return KHZ_PER_MHZ * mhz;
}
EXPORT_SYMBOL_GPL(cppc_get_dmi_max_khz);
/*
* If CPPC lowest_freq and nominal_freq registers are exposed then we can

View File

@@ -163,7 +163,6 @@ config CPU_FREQ_GOV_ONDEMAND
config CPU_FREQ_GOV_CONSERVATIVE
tristate "'conservative' cpufreq governor"
depends on CPU_FREQ
select CPU_FREQ_GOV_COMMON
help
'conservative' - this driver is rather similar to the 'ondemand'
@@ -188,7 +187,7 @@ config CPU_FREQ_GOV_CONSERVATIVE
config CPU_FREQ_GOV_SCHEDUTIL
bool "'schedutil' cpufreq policy governor"
depends on CPU_FREQ && SMP
depends on SMP
select CPU_FREQ_GOV_ATTR_SET
select IRQ_WORK
help
@@ -365,6 +364,6 @@ config ACPI_CPPC_CPUFREQ_FIE
If in doubt, say N.
endif
endif # CPU_FREQ
endmenu

View File

@@ -675,6 +675,29 @@ static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
}
#endif
static void acpi_cpufreq_resolve_max_freq(struct cpufreq_policy *policy,
unsigned int pss_max_freq)
{
#ifdef CONFIG_ACPI_CPPC_LIB
u64 max_speed = cppc_get_dmi_max_khz();
/*
* Use DMI "Max Speed" if it looks plausible: must be
* above _PSS P0 frequency and within 2x of it.
*/
if (max_speed > pss_max_freq && max_speed < pss_max_freq * 2) {
policy->cpuinfo.max_freq = max_speed;
return;
}
#endif
/*
* If the maximum "boost" frequency is unknown, ask the arch
* scale-invariance code to use the "nominal" performance for
* CPU utilization scaling so as to prevent the schedutil
* governor from selecting inadequate CPU frequencies.
*/
arch_set_max_freq_ratio(true);
}
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
@@ -849,13 +872,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
} else {
/*
* If the maximum "boost" frequency is unknown, ask the arch
* scale-invariance code to use the "nominal" performance for
* CPU utilization scaling so as to prevent the schedutil
* governor from selecting inadequate CPU frequencies.
*/
arch_set_max_freq_ratio(true);
acpi_cpufreq_resolve_max_freq(policy, freq_table[0].frequency);
}
policy->freq_table = freq_table;

View File

@@ -769,8 +769,6 @@ static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
else if (policy->cpuinfo.max_freq > nominal_freq)
policy->cpuinfo.max_freq = nominal_freq;
policy->max = policy->cpuinfo.max_freq;
if (cppc_state == AMD_PSTATE_PASSIVE) {
ret = freq_qos_update_request(&cpudata->req[1], policy->cpuinfo.max_freq);
if (ret < 0)

View File

@@ -807,17 +807,11 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
int ret;
if (state)
policy->max = cppc_perf_to_khz(caps, caps->highest_perf);
policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->highest_perf);
else
policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
policy->cpuinfo.max_freq = policy->max;
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
if (ret < 0)
return ret;
policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->nominal_perf);
return 0;
}

View File

@@ -609,10 +609,19 @@ static int policy_set_boost(struct cpufreq_policy *policy, bool enable)
policy->boost_enabled = enable;
ret = cpufreq_driver->set_boost(policy, enable);
if (ret)
if (ret) {
policy->boost_enabled = !policy->boost_enabled;
return ret;
}
return ret;
ret = freq_qos_update_request(&policy->boost_freq_req, policy->cpuinfo.max_freq);
if (ret < 0) {
policy->boost_enabled = !policy->boost_enabled;
cpufreq_driver->set_boost(policy, policy->boost_enabled);
return ret;
}
return 0;
}
static ssize_t store_local_boost(struct cpufreq_policy *policy,
@@ -760,7 +769,7 @@ static ssize_t store_##file_name \
if (ret) \
return ret; \
\
ret = freq_qos_update_request(policy->object##_freq_req, val);\
ret = freq_qos_update_request(&policy->object##_freq_req, val); \
return ret >= 0 ? count : ret; \
}
@@ -1365,7 +1374,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
/* Cancel any pending policy->update work before freeing the policy. */
cancel_work_sync(&policy->update);
if (policy->max_freq_req) {
if (freq_qos_request_active(&policy->max_freq_req)) {
/*
* Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
* notification, since CPUFREQ_CREATE_POLICY notification was
@@ -1373,11 +1382,13 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_REMOVE_POLICY, policy);
freq_qos_remove_request(policy->max_freq_req);
freq_qos_remove_request(&policy->max_freq_req);
}
freq_qos_remove_request(policy->min_freq_req);
kfree(policy->min_freq_req);
if (freq_qos_request_active(&policy->min_freq_req))
freq_qos_remove_request(&policy->min_freq_req);
if (freq_qos_request_active(&policy->boost_freq_req))
freq_qos_remove_request(&policy->boost_freq_req);
cpufreq_policy_put_kobj(policy);
free_cpumask_var(policy->real_cpus);
@@ -1447,47 +1458,29 @@ static int cpufreq_policy_online(struct cpufreq_policy *policy,
add_cpu_dev_symlink(policy, j, get_cpu_device(j));
}
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
GFP_KERNEL);
if (!policy->min_freq_req) {
ret = -ENOMEM;
goto out_destroy_policy;
if (policy->boost_supported) {
ret = freq_qos_add_request(&policy->constraints,
&policy->boost_freq_req,
FREQ_QOS_MAX,
policy->cpuinfo.max_freq);
if (ret < 0)
goto out_destroy_policy;
}
ret = freq_qos_add_request(&policy->constraints,
policy->min_freq_req, FREQ_QOS_MIN,
&policy->min_freq_req, FREQ_QOS_MIN,
FREQ_QOS_MIN_DEFAULT_VALUE);
if (ret < 0) {
/*
* So we don't call freq_qos_remove_request() for an
* uninitialized request.
*/
kfree(policy->min_freq_req);
policy->min_freq_req = NULL;
if (ret < 0)
goto out_destroy_policy;
}
/*
* This must be initialized right here to avoid calling
* freq_qos_remove_request() on uninitialized request in case
* of errors.
*/
policy->max_freq_req = policy->min_freq_req + 1;
ret = freq_qos_add_request(&policy->constraints,
policy->max_freq_req, FREQ_QOS_MAX,
&policy->max_freq_req, FREQ_QOS_MAX,
FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0) {
policy->max_freq_req = NULL;
if (ret < 0)
goto out_destroy_policy;
}
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
} else {
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
if (ret < 0)
goto out_destroy_policy;
}
if (cpufreq_driver->get && has_target()) {
@@ -2364,8 +2357,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
target_freq = __resolve_freq(policy, target_freq, policy->min,
policy->max, relation);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
pr_debug("CPU %u: cur %u kHz -> target %u kHz (req %u kHz, rel %u)\n",
policy->cpu, policy->cur, target_freq, old_target_freq, relation);
/*
* This might look like a redundant call as we are checking it again
@@ -2789,16 +2782,10 @@ int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
return -ENXIO;
ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret) {
if (ret)
pr_err("%s: Policy frequency update failed\n", __func__);
return ret;
}
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
if (ret < 0)
return ret;
return 0;
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw);

View File

@@ -21,6 +21,7 @@
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
/* Ondemand Sampling types */
enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
@@ -57,7 +58,7 @@ static ssize_t file_name##_show \
{ \
struct dbs_data *dbs_data = to_dbs_data(attr_set); \
struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
return sprintf(buf, "%u\n", tuners->file_name); \
return sysfs_emit(buf, "%u\n", tuners->file_name); \
}
#define gov_show_one_common(file_name) \
@@ -65,7 +66,7 @@ static ssize_t file_name##_show \
(struct gov_attr_set *attr_set, char *buf) \
{ \
struct dbs_data *dbs_data = to_dbs_data(attr_set); \
return sprintf(buf, "%u\n", dbs_data->file_name); \
return sysfs_emit(buf, "%u\n", dbs_data->file_name); \
}
#define gov_attr_ro(_name) \

View File

@@ -3472,7 +3472,7 @@ static int intel_pstate_update_status(const char *buf, size_t size)
{
if (size == 3 && !strncmp(buf, "off", size)) {
if (!intel_pstate_driver)
return -EINVAL;
return 0;
if (hwp_active)
return -EBUSY;

View File

@@ -156,6 +156,7 @@ extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
extern bool cppc_perf_ctrs_in_pcc_cpu(unsigned int cpu);
extern bool cppc_perf_ctrs_in_pcc(void);
extern u64 cppc_get_dmi_max_khz(void);
extern unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf);
extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq);
extern bool acpi_cpc_valid(void);

View File

@@ -79,8 +79,9 @@ struct cpufreq_policy {
* called, but you're in IRQ context */
struct freq_constraints constraints;
struct freq_qos_request *min_freq_req;
struct freq_qos_request *max_freq_req;
struct freq_qos_request min_freq_req;
struct freq_qos_request max_freq_req;
struct freq_qos_request boost_freq_req;
struct cpufreq_frequency_table *freq_table;
enum cpufreq_table_sorting freq_table_sorted;
@@ -232,7 +233,7 @@ static inline bool policy_is_inactive(struct cpufreq_policy *policy)
static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
return cpumask_weight(policy->cpus) > 1;
return cpumask_nth(1, policy->cpus) < nr_cpumask_bits;
}
#ifdef CONFIG_CPU_FREQ