Merge back cpufreq material for 6.16

This commit is contained in:
Rafael J. Wysocki
2025-05-04 12:35:42 +02:00
4 changed files with 215 additions and 288 deletions

View File

@@ -808,19 +808,16 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
}
static void amd_pstate_update_limits(unsigned int cpu)
static void amd_pstate_update_limits(struct cpufreq_policy *policy)
{
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
bool highest_perf_changed = false;
unsigned int cpu = policy->cpu;
if (!amd_pstate_prefcore)
return;
if (!policy)
return;
if (amd_get_highest_perf(cpu, &cur_high))
return;

View File

@@ -255,51 +255,6 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
/**
* cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
* @policy: cpufreq policy returned by cpufreq_cpu_acquire().
*/
void cpufreq_cpu_release(struct cpufreq_policy *policy)
{
if (WARN_ON(!policy))
return;
lockdep_assert_held(&policy->rwsem);
up_write(&policy->rwsem);
cpufreq_cpu_put(policy);
}
/**
* cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
* @cpu: CPU to find the policy for.
*
* Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
* if the policy returned by it is not NULL, acquire its rwsem for writing.
* Return the policy if it is active or release it and return NULL otherwise.
*
* The policy returned by this function has to be released with the help of
* cpufreq_cpu_release() in order to release its rwsem and balance its usage
* counter properly.
*/
struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
if (!policy)
return NULL;
down_write(&policy->rwsem);
if (policy_is_inactive(policy)) {
cpufreq_cpu_release(policy);
return NULL;
}
return policy;
}
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -636,6 +591,22 @@ static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
return sysfs_emit(buf, "%d\n", policy->boost_enabled);
}
static int policy_set_boost(struct cpufreq_policy *policy, bool enable)
{
int ret;
if (policy->boost_enabled == enable)
return 0;
policy->boost_enabled = enable;
ret = cpufreq_driver->set_boost(policy, enable);
if (ret)
policy->boost_enabled = !policy->boost_enabled;
return ret;
}
static ssize_t store_local_boost(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
@@ -651,21 +622,14 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (!policy->boost_supported)
return -EINVAL;
if (policy->boost_enabled == enable)
return count;
policy->boost_enabled = enable;
cpus_read_lock();
ret = cpufreq_driver->set_boost(policy, enable);
ret = policy_set_boost(policy, enable);
cpus_read_unlock();
if (ret) {
policy->boost_enabled = !policy->boost_enabled;
return ret;
}
if (!ret)
return count;
return count;
return ret;
}
static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
@@ -1025,17 +989,16 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EBUSY;
if (!fattr->show)
return -EIO;
down_read(&policy->rwsem);
if (likely(!policy_is_inactive(policy)))
ret = fattr->show(policy, buf);
up_read(&policy->rwsem);
guard(cpufreq_policy_read)(policy);
return ret;
if (likely(!policy_is_inactive(policy)))
return fattr->show(policy, buf);
return -EBUSY;
}
static ssize_t store(struct kobject *kobj, struct attribute *attr,
@@ -1043,17 +1006,16 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EBUSY;
if (!fattr->store)
return -EIO;
down_write(&policy->rwsem);
if (likely(!policy_is_inactive(policy)))
ret = fattr->store(policy, buf, count);
up_write(&policy->rwsem);
guard(cpufreq_policy_write)(policy);
return ret;
if (likely(!policy_is_inactive(policy)))
return fattr->store(policy, buf, count);
return -EBUSY;
}
static void cpufreq_sysfs_release(struct kobject *kobj)
@@ -1211,7 +1173,8 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (cpumask_test_cpu(cpu, policy->cpus))
return 0;
down_write(&policy->rwsem);
guard(cpufreq_policy_write)(policy);
if (has_target())
cpufreq_stop_governor(policy);
@@ -1222,7 +1185,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
up_write(&policy->rwsem);
return ret;
}
@@ -1242,9 +1205,10 @@ static void handle_update(struct work_struct *work)
container_of(work, struct cpufreq_policy, update);
pr_debug("handle_update for cpu %u called\n", policy->cpu);
down_write(&policy->rwsem);
guard(cpufreq_policy_write)(policy);
refresh_frequency_limits(policy);
up_write(&policy->rwsem);
}
static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
@@ -1270,11 +1234,11 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
struct kobject *kobj;
struct completion *cmp;
down_write(&policy->rwsem);
cpufreq_stats_free_table(policy);
kobj = &policy->kobj;
cmp = &policy->kobj_unregister;
up_write(&policy->rwsem);
scoped_guard(cpufreq_policy_write, policy) {
cpufreq_stats_free_table(policy);
kobj = &policy->kobj;
cmp = &policy->kobj_unregister;
}
kobject_put(kobj);
/*
@@ -1350,7 +1314,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
init_waitqueue_head(&policy->transition_wait);
INIT_WORK(&policy->update, handle_update);
policy->cpu = cpu;
return policy;
err_min_qos_notifier:
@@ -1419,35 +1382,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
kfree(policy);
}
static int cpufreq_online(unsigned int cpu)
static int cpufreq_policy_online(struct cpufreq_policy *policy,
unsigned int cpu, bool new_policy)
{
struct cpufreq_policy *policy;
bool new_policy;
unsigned long flags;
unsigned int j;
int ret;
pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
guard(cpufreq_policy_write)(policy);
/* Check if this CPU already has a policy to manage it */
policy = per_cpu(cpufreq_cpu_data, cpu);
if (policy) {
WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
if (!policy_is_inactive(policy))
return cpufreq_add_policy_cpu(policy, cpu);
/* This is the only online CPU for the policy. Start over. */
new_policy = false;
down_write(&policy->rwsem);
policy->cpu = cpu;
policy->governor = NULL;
} else {
new_policy = true;
policy = cpufreq_policy_alloc(cpu);
if (!policy)
return -ENOMEM;
down_write(&policy->rwsem);
}
policy->cpu = cpu;
policy->governor = NULL;
if (!new_policy && cpufreq_driver->online) {
/* Recover policy->cpus using related_cpus */
@@ -1470,7 +1415,7 @@ static int cpufreq_online(unsigned int cpu)
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
goto out_free_policy;
goto out_clear_policy;
}
/*
@@ -1621,33 +1566,6 @@ static int cpufreq_online(unsigned int cpu)
goto out_destroy_policy;
}
up_write(&policy->rwsem);
kobject_uevent(&policy->kobj, KOBJ_ADD);
/* Callback for handling stuff after policy is ready */
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
/* Register cpufreq cooling only for a new policy */
if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
if (cpufreq_driver->set_boost && policy->boost_supported &&
policy->boost_enabled != cpufreq_boost_enabled()) {
policy->boost_enabled = cpufreq_boost_enabled();
ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
if (ret) {
/* If the set_boost fails, the online operation is not affected */
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
str_enable_disable(policy->boost_enabled));
policy->boost_enabled = !policy->boost_enabled;
}
}
pr_debug("initialization complete\n");
return 0;
out_destroy_policy:
@@ -1662,14 +1580,72 @@ static int cpufreq_online(unsigned int cpu)
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
out_free_policy:
out_clear_policy:
cpumask_clear(policy->cpus);
up_write(&policy->rwsem);
cpufreq_policy_free(policy);
return ret;
}
static int cpufreq_online(unsigned int cpu)
{
struct cpufreq_policy *policy;
bool new_policy;
int ret;
pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
/* Check if this CPU already has a policy to manage it */
policy = per_cpu(cpufreq_cpu_data, cpu);
if (policy) {
WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
if (!policy_is_inactive(policy))
return cpufreq_add_policy_cpu(policy, cpu);
/* This is the only online CPU for the policy. Start over. */
new_policy = false;
} else {
new_policy = true;
policy = cpufreq_policy_alloc(cpu);
if (!policy)
return -ENOMEM;
}
ret = cpufreq_policy_online(policy, cpu, new_policy);
if (ret) {
cpufreq_policy_free(policy);
return ret;
}
kobject_uevent(&policy->kobj, KOBJ_ADD);
/* Callback for handling stuff after policy is ready */
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
/* Register cpufreq cooling only for a new policy */
if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
/*
* Let the per-policy boost flag mirror the cpufreq_driver boost during
* initialization for a new policy. For an existing policy, maintain the
* previous boost value unless global boost is disabled.
*/
if (cpufreq_driver->set_boost && policy->boost_supported &&
(new_policy || !cpufreq_boost_enabled())) {
ret = policy_set_boost(policy, cpufreq_boost_enabled());
if (ret) {
/* If the set_boost fails, the online operation is not affected */
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
str_enable_disable(cpufreq_boost_enabled()));
}
}
pr_debug("initialization complete\n");
return 0;
}
/**
* cpufreq_add_dev - the cpufreq interface for a CPU device.
* @dev: CPU device.
@@ -1757,11 +1733,10 @@ static int cpufreq_offline(unsigned int cpu)
return 0;
}
down_write(&policy->rwsem);
guard(cpufreq_policy_write)(policy);
__cpufreq_offline(cpu, policy);
up_write(&policy->rwsem);
return 0;
}
@@ -1778,33 +1753,29 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (!policy)
return;
down_write(&policy->rwsem);
scoped_guard(cpufreq_policy_write, policy) {
if (cpu_online(cpu))
__cpufreq_offline(cpu, policy);
if (cpu_online(cpu))
__cpufreq_offline(cpu, policy);
remove_cpu_dev_symlink(policy, cpu, dev);
remove_cpu_dev_symlink(policy, cpu, dev);
if (!cpumask_empty(policy->real_cpus))
return;
if (!cpumask_empty(policy->real_cpus)) {
up_write(&policy->rwsem);
return;
/*
* Unregister cpufreq cooling once all the CPUs of the policy
* are removed.
*/
if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
cpufreq_cooling_unregister(policy->cdev);
policy->cdev = NULL;
}
/* We did light-weight exit earlier, do full tear down now */
if (cpufreq_driver->offline && cpufreq_driver->exit)
cpufreq_driver->exit(policy);
}
/*
* Unregister cpufreq cooling once all the CPUs of the policy are
* removed.
*/
if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
cpufreq_cooling_unregister(policy->cdev);
policy->cdev = NULL;
}
/* We did light-weight exit earlier, do full tear down now */
if (cpufreq_driver->offline && cpufreq_driver->exit)
cpufreq_driver->exit(policy);
up_write(&policy->rwsem);
cpufreq_policy_free(policy);
}
@@ -1874,27 +1845,26 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
struct cpufreq_policy *policy;
unsigned int ret_freq = 0;
struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
ret_freq = cpufreq_driver->get(cpu);
unsigned int ret_freq = cpufreq_driver->get(cpu);
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
return ret_freq;
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
policy = cpufreq_cpu_get(cpu);
if (policy) {
ret_freq = policy->cur;
cpufreq_cpu_put(policy);
}
if (policy)
return policy->cur;
return ret_freq;
return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get);
@@ -1906,15 +1876,13 @@ EXPORT_SYMBOL(cpufreq_quick_get);
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
unsigned int ret_freq = 0;
struct cpufreq_policy *policy __free(put_cpufreq_policy);
if (policy) {
ret_freq = policy->max;
cpufreq_cpu_put(policy);
}
policy = cpufreq_cpu_get(cpu);
if (policy)
return policy->max;
return ret_freq;
return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get_max);
@@ -1926,15 +1894,13 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
*/
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
unsigned int ret_freq = 0;
struct cpufreq_policy *policy __free(put_cpufreq_policy);
if (policy) {
ret_freq = policy->cpuinfo.max_freq;
cpufreq_cpu_put(policy);
}
policy = cpufreq_cpu_get(cpu);
if (policy)
return policy->cpuinfo.max_freq;
return ret_freq;
return 0;
}
EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
@@ -1954,19 +1920,18 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
unsigned int ret_freq = 0;
struct cpufreq_policy *policy __free(put_cpufreq_policy);
if (policy) {
down_read(&policy->rwsem);
if (cpufreq_driver->get)
ret_freq = __cpufreq_get(policy);
up_read(&policy->rwsem);
policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
cpufreq_cpu_put(policy);
}
guard(cpufreq_policy_read)(policy);
return ret_freq;
if (cpufreq_driver->get)
return __cpufreq_get(policy);
return 0;
}
EXPORT_SYMBOL(cpufreq_get);
@@ -2025,9 +1990,9 @@ void cpufreq_suspend(void)
for_each_active_policy(policy) {
if (has_target()) {
down_write(&policy->rwsem);
cpufreq_stop_governor(policy);
up_write(&policy->rwsem);
scoped_guard(cpufreq_policy_write, policy) {
cpufreq_stop_governor(policy);
}
}
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
@@ -2068,9 +2033,9 @@ void cpufreq_resume(void)
pr_err("%s: Failed to resume driver: %s\n", __func__,
cpufreq_driver->name);
} else if (has_target()) {
down_write(&policy->rwsem);
ret = cpufreq_start_governor(policy);
up_write(&policy->rwsem);
scoped_guard(cpufreq_policy_write, policy) {
ret = cpufreq_start_governor(policy);
}
if (ret)
pr_err("%s: Failed to start governor for CPU%u's policy\n",
@@ -2438,15 +2403,9 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int ret;
guard(cpufreq_policy_write)(policy);
down_write(&policy->rwsem);
ret = __cpufreq_driver_target(policy, target_freq, relation);
up_write(&policy->rwsem);
return ret;
return __cpufreq_driver_target(policy, target_freq, relation);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
@@ -2618,31 +2577,6 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
* POLICY INTERFACE *
*********************************************************************/
/**
* cpufreq_get_policy - get the current cpufreq_policy
* @policy: struct cpufreq_policy into which the current cpufreq_policy
* is written
* @cpu: CPU to find the policy for
*
* Reads the current cpufreq policy.
*/
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
{
struct cpufreq_policy *cpu_policy;
if (!policy)
return -EINVAL;
cpu_policy = cpufreq_cpu_get(cpu);
if (!cpu_policy)
return -EINVAL;
memcpy(policy, cpu_policy, sizeof(*policy));
cpufreq_cpu_put(cpu_policy);
return 0;
}
EXPORT_SYMBOL(cpufreq_get_policy);
DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
/**
@@ -2793,6 +2727,21 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
return ret;
}
static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
{
guard(cpufreq_policy_write)(policy);
/*
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
*/
if (cpufreq_driver->get && has_target() &&
(cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
return;
refresh_frequency_limits(policy);
}
/**
* cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
* @cpu: CPU to re-evaluate the policy for.
@@ -2804,23 +2753,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
*/
void cpufreq_update_policy(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
struct cpufreq_policy *policy __free(put_cpufreq_policy);
policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
/*
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
*/
if (cpufreq_driver->get && has_target() &&
(cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
goto unlock;
refresh_frequency_limits(policy);
unlock:
cpufreq_cpu_release(policy);
cpufreq_policy_refresh(policy);
}
EXPORT_SYMBOL(cpufreq_update_policy);
@@ -2829,7 +2768,7 @@ EXPORT_SYMBOL(cpufreq_update_policy);
* @cpu: CPU to update the policy limits for.
*
* Invoke the driver's ->update_limits callback if present or call
* cpufreq_update_policy() for @cpu.
* cpufreq_policy_refresh() for @cpu.
*/
void cpufreq_update_limits(unsigned int cpu)
{
@@ -2840,9 +2779,9 @@ void cpufreq_update_limits(unsigned int cpu)
return;
if (cpufreq_driver->update_limits)
cpufreq_driver->update_limits(cpu);
cpufreq_driver->update_limits(policy);
else
cpufreq_update_policy(cpu);
cpufreq_policy_refresh(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
@@ -2876,8 +2815,10 @@ static int cpufreq_boost_trigger_state(int state)
unsigned long flags;
int ret = 0;
if (cpufreq_driver->boost_enabled == state)
return 0;
/*
* Don't compare 'cpufreq_driver->boost_enabled' with 'state' here to
* make sure all policies are in sync with global boost flag.
*/
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver->boost_enabled = state;
@@ -2888,12 +2829,9 @@ static int cpufreq_boost_trigger_state(int state)
if (!policy->boost_supported)
continue;
policy->boost_enabled = state;
ret = cpufreq_driver->set_boost(policy, state);
if (ret) {
policy->boost_enabled = !policy->boost_enabled;
ret = policy_set_boost(policy, state);
if (ret)
goto err_reset_state;
}
}
cpus_read_unlock();

View File

@@ -1356,9 +1356,11 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}
static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
struct cpufreq_policy *policy)
static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy,
struct cpudata *cpudata)
{
guard(cpufreq_policy_write)(policy);
if (hwp_active)
intel_pstate_get_hwp_cap(cpudata);
@@ -1368,42 +1370,34 @@ static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
refresh_frequency_limits(policy);
}
static void intel_pstate_update_limits(unsigned int cpu)
static bool intel_pstate_update_max_freq(struct cpudata *cpudata)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
struct cpudata *cpudata;
struct cpufreq_policy *policy __free(put_cpufreq_policy);
policy = cpufreq_cpu_get(cpudata->cpu);
if (!policy)
return;
return false;
cpudata = all_cpu_data[cpu];
__intel_pstate_update_max_freq(policy, cpudata);
__intel_pstate_update_max_freq(cpudata, policy);
return true;
}
/* Prevent the driver from being unregistered now. */
mutex_lock(&intel_pstate_driver_lock);
static void intel_pstate_update_limits(struct cpufreq_policy *policy)
{
struct cpudata *cpudata = all_cpu_data[policy->cpu];
cpufreq_cpu_release(policy);
__intel_pstate_update_max_freq(policy, cpudata);
hybrid_update_capacity(cpudata);
mutex_unlock(&intel_pstate_driver_lock);
}
static void intel_pstate_update_limits_for_all(void)
{
int cpu;
for_each_possible_cpu(cpu) {
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
if (!policy)
continue;
__intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
cpufreq_cpu_release(policy);
}
for_each_possible_cpu(cpu)
intel_pstate_update_max_freq(all_cpu_data[cpu]);
mutex_lock(&hybrid_capacity_lock);
@@ -1843,13 +1837,8 @@ static void intel_pstate_notify_work(struct work_struct *work)
{
struct cpudata *cpudata =
container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
if (policy) {
__intel_pstate_update_max_freq(cpudata, policy);
cpufreq_cpu_release(policy);
if (intel_pstate_update_max_freq(cpudata)) {
/*
* The driver will not be unregistered while this function is
* running, so update the capacity without acquiring the driver

View File

@@ -170,6 +170,12 @@ struct cpufreq_policy {
struct notifier_block nb_max;
};
DEFINE_GUARD(cpufreq_policy_write, struct cpufreq_policy *,
down_write(&_T->rwsem), up_write(&_T->rwsem))
DEFINE_GUARD(cpufreq_policy_read, struct cpufreq_policy *,
down_read(&_T->rwsem), up_read(&_T->rwsem))
/*
* Used for passing new cpufreq policy data to the cpufreq driver's ->verify()
* callback for sanitization. That callback is only expected to modify the min
@@ -235,9 +241,6 @@ void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
void cpufreq_cpu_release(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
@@ -395,7 +398,7 @@ struct cpufreq_driver {
unsigned int (*get)(unsigned int cpu);
/* Called to update policy limits on firmware notifications. */
void (*update_limits)(unsigned int cpu);
void (*update_limits)(struct cpufreq_policy *policy);
/* optional */
int (*bios_limit)(int cpu, unsigned int *limit);