mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-22 05:20:55 -05:00
Merge back cpufreq material for v6.11.
This commit is contained in:
@@ -406,7 +406,7 @@ control its functionality at the system level. They are located in the
|
||||
``/sys/devices/system/cpu/amd_pstate/`` directory and affect all CPUs.
|
||||
|
||||
``status``
|
||||
Operation mode of the driver: "active", "passive" or "disable".
|
||||
Operation mode of the driver: "active", "passive", "guided" or "disable".
|
||||
|
||||
"active"
|
||||
The driver is functional and in the ``active mode``
|
||||
|
||||
@@ -470,6 +470,7 @@
|
||||
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
|
||||
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
|
||||
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
|
||||
#define X86_FEATURE_FAST_CPPC (21*32 + 5) /* "" AMD Fast CPPC */
|
||||
|
||||
/*
|
||||
* BUG word(s)
|
||||
|
||||
@@ -45,6 +45,7 @@ static const struct cpuid_bit cpuid_bits[] = {
|
||||
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
|
||||
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
|
||||
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
|
||||
{ X86_FEATURE_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
|
||||
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
|
||||
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
|
||||
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
|
||||
|
||||
@@ -71,6 +71,7 @@ config X86_AMD_PSTATE_DEFAULT_MODE
|
||||
config X86_AMD_PSTATE_UT
|
||||
tristate "selftest for AMD Processor P-State driver"
|
||||
depends on X86 && ACPI_PROCESSOR
|
||||
depends on X86_AMD_PSTATE
|
||||
default n
|
||||
help
|
||||
This kernel module is used for testing. It's safe to say M here.
|
||||
|
||||
@@ -51,6 +51,7 @@
|
||||
|
||||
#define AMD_PSTATE_TRANSITION_LATENCY 20000
|
||||
#define AMD_PSTATE_TRANSITION_DELAY 1000
|
||||
#define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600
|
||||
#define CPPC_HIGHEST_PERF_PERFORMANCE 196
|
||||
#define CPPC_HIGHEST_PERF_DEFAULT 166
|
||||
|
||||
@@ -85,15 +86,6 @@ struct quirk_entry {
|
||||
u32 lowest_freq;
|
||||
};
|
||||
|
||||
/*
|
||||
* TODO: We need more time to fine tune processors with shared memory solution
|
||||
* with community together.
|
||||
*
|
||||
* There are some performance drops on the CPU benchmarks which reports from
|
||||
* Suse. We are co-working with them to fine tune the shared memory solution. So
|
||||
* we disable it by default to go acpi-cpufreq on these processors and add a
|
||||
* module parameter to be able to enable it manually for debugging.
|
||||
*/
|
||||
static struct cpufreq_driver *current_pstate_driver;
|
||||
static struct cpufreq_driver amd_pstate_driver;
|
||||
static struct cpufreq_driver amd_pstate_epp_driver;
|
||||
@@ -157,7 +149,7 @@ static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
|
||||
* broken BIOS lack of nominal_freq and lowest_freq capabilities
|
||||
* definition in ACPI tables
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_ZEN2)) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
|
||||
quirks = dmi->driver_data;
|
||||
pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
|
||||
return 1;
|
||||
@@ -199,7 +191,7 @@ static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
|
||||
u64 epp;
|
||||
int ret;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_CPPC)) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
if (!cppc_req_cached) {
|
||||
epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
|
||||
&cppc_req_cached);
|
||||
@@ -252,7 +244,7 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
|
||||
int ret;
|
||||
struct cppc_perf_ctrls perf_ctrls;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_CPPC)) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
u64 value = READ_ONCE(cpudata->cppc_req_cached);
|
||||
|
||||
value &= ~GENMASK_ULL(31, 24);
|
||||
@@ -281,10 +273,8 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
|
||||
int epp = -EINVAL;
|
||||
int ret;
|
||||
|
||||
if (!pref_index) {
|
||||
pr_debug("EPP pref_index is invalid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!pref_index)
|
||||
epp = cpudata->epp_default;
|
||||
|
||||
if (epp == -EINVAL)
|
||||
epp = epp_values[pref_index];
|
||||
@@ -521,6 +511,8 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
|
||||
static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
|
||||
u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
|
||||
{
|
||||
unsigned long max_freq;
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
|
||||
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
|
||||
u64 value = prev;
|
||||
|
||||
@@ -530,6 +522,9 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
|
||||
cpudata->max_limit_perf);
|
||||
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
|
||||
|
||||
max_freq = READ_ONCE(cpudata->max_limit_freq);
|
||||
policy->cur = div_u64(des_perf * max_freq, max_perf);
|
||||
|
||||
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
|
||||
min_perf = des_perf;
|
||||
des_perf = 0;
|
||||
@@ -651,10 +646,9 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
|
||||
unsigned long capacity)
|
||||
{
|
||||
unsigned long max_perf, min_perf, des_perf,
|
||||
cap_perf, lowest_nonlinear_perf, max_freq;
|
||||
cap_perf, lowest_nonlinear_perf;
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
unsigned int target_freq;
|
||||
|
||||
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
|
||||
amd_pstate_update_min_max_limit(policy);
|
||||
@@ -662,7 +656,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
|
||||
|
||||
cap_perf = READ_ONCE(cpudata->highest_perf);
|
||||
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
|
||||
max_freq = READ_ONCE(cpudata->max_freq);
|
||||
|
||||
des_perf = cap_perf;
|
||||
if (target_perf < capacity)
|
||||
@@ -680,8 +673,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
|
||||
max_perf = min_perf;
|
||||
|
||||
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
|
||||
target_freq = div_u64(des_perf * max_freq, max_perf);
|
||||
policy->cur = target_freq;
|
||||
|
||||
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
|
||||
policy->governor->flags);
|
||||
@@ -753,7 +744,7 @@ static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_CPPC)) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
u64 cap1;
|
||||
|
||||
ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
|
||||
@@ -849,8 +840,12 @@ static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
|
||||
u32 transition_delay_ns;
|
||||
|
||||
transition_delay_ns = cppc_get_transition_latency(cpu);
|
||||
if (transition_delay_ns == CPUFREQ_ETERNAL)
|
||||
return AMD_PSTATE_TRANSITION_DELAY;
|
||||
if (transition_delay_ns == CPUFREQ_ETERNAL) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_FAST_CPPC))
|
||||
return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
|
||||
else
|
||||
return AMD_PSTATE_TRANSITION_DELAY;
|
||||
}
|
||||
|
||||
return transition_delay_ns / NSEC_PER_USEC;
|
||||
}
|
||||
@@ -921,12 +916,30 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
|
||||
WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
|
||||
WRITE_ONCE(cpudata->max_freq, max_freq);
|
||||
|
||||
/**
|
||||
* Below values need to be initialized correctly, otherwise driver will fail to load
|
||||
* max_freq is calculated according to (nominal_freq * highest_perf)/nominal_perf
|
||||
* lowest_nonlinear_freq is a value between [min_freq, nominal_freq]
|
||||
* Check _CPC in ACPI table objects if any values are incorrect
|
||||
*/
|
||||
if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
|
||||
pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
|
||||
min_freq, max_freq, nominal_freq * 1000);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq * 1000) {
|
||||
pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n",
|
||||
lowest_nonlinear_freq, min_freq, nominal_freq * 1000);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int min_freq, max_freq, nominal_freq, ret;
|
||||
int min_freq, max_freq, ret;
|
||||
struct device *dev;
|
||||
struct amd_cpudata *cpudata;
|
||||
|
||||
@@ -957,16 +970,6 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
|
||||
|
||||
min_freq = READ_ONCE(cpudata->min_freq);
|
||||
max_freq = READ_ONCE(cpudata->max_freq);
|
||||
nominal_freq = READ_ONCE(cpudata->nominal_freq);
|
||||
|
||||
if (min_freq <= 0 || max_freq <= 0 ||
|
||||
nominal_freq <= 0 || min_freq > max_freq) {
|
||||
dev_err(dev,
|
||||
"min_freq(%d) or max_freq(%d) or nominal_freq (%d) value is incorrect, check _CPC in ACPI tables\n",
|
||||
min_freq, max_freq, nominal_freq);
|
||||
ret = -EINVAL;
|
||||
goto free_cpudata1;
|
||||
}
|
||||
|
||||
policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
|
||||
policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
|
||||
@@ -980,7 +983,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
|
||||
/* It will be updated by governor */
|
||||
policy->cur = policy->cpuinfo.min_freq;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_CPPC))
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC))
|
||||
policy->fast_switch_possible = true;
|
||||
|
||||
ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
|
||||
@@ -1213,7 +1216,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode)
|
||||
|
||||
cppc_state = mode;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
|
||||
return 0;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
@@ -1386,7 +1389,7 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
|
||||
|
||||
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int min_freq, max_freq, nominal_freq, ret;
|
||||
int min_freq, max_freq, ret;
|
||||
struct amd_cpudata *cpudata;
|
||||
struct device *dev;
|
||||
u64 value;
|
||||
@@ -1419,15 +1422,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
|
||||
|
||||
min_freq = READ_ONCE(cpudata->min_freq);
|
||||
max_freq = READ_ONCE(cpudata->max_freq);
|
||||
nominal_freq = READ_ONCE(cpudata->nominal_freq);
|
||||
if (min_freq <= 0 || max_freq <= 0 ||
|
||||
nominal_freq <= 0 || min_freq > max_freq) {
|
||||
dev_err(dev,
|
||||
"min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect, check _CPC in ACPI tables\n",
|
||||
min_freq, max_freq, nominal_freq);
|
||||
ret = -EINVAL;
|
||||
goto free_cpudata1;
|
||||
}
|
||||
|
||||
policy->cpuinfo.min_freq = min_freq;
|
||||
policy->cpuinfo.max_freq = max_freq;
|
||||
@@ -1436,7 +1430,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
|
||||
|
||||
policy->driver_data = cpudata;
|
||||
|
||||
cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
|
||||
cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0);
|
||||
|
||||
policy->min = policy->cpuinfo.min_freq;
|
||||
policy->max = policy->cpuinfo.max_freq;
|
||||
@@ -1451,7 +1445,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
|
||||
else
|
||||
policy->policy = CPUFREQ_POLICY_POWERSAVE;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_CPPC)) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -1541,7 +1535,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
|
||||
epp = 0;
|
||||
|
||||
/* Set initial EPP value */
|
||||
if (boot_cpu_has(X86_FEATURE_CPPC)) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
value &= ~GENMASK_ULL(31, 24);
|
||||
value |= (u64)epp << 24;
|
||||
}
|
||||
@@ -1564,6 +1558,12 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
|
||||
|
||||
amd_pstate_epp_update_limit(policy);
|
||||
|
||||
/*
|
||||
* policy->cur is never updated with the amd_pstate_epp driver, but it
|
||||
* is used as a stale frequency value. So, keep it within limits.
|
||||
*/
|
||||
policy->cur = policy->min;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1580,7 +1580,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
|
||||
value = READ_ONCE(cpudata->cppc_req_cached);
|
||||
max_perf = READ_ONCE(cpudata->highest_perf);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_CPPC)) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
|
||||
} else {
|
||||
perf_ctrls.max_perf = max_perf;
|
||||
@@ -1614,7 +1614,7 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
|
||||
value = READ_ONCE(cpudata->cppc_req_cached);
|
||||
|
||||
mutex_lock(&amd_pstate_limits_lock);
|
||||
if (boot_cpu_has(X86_FEATURE_CPPC)) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
|
||||
|
||||
/* Set max perf same as min perf */
|
||||
@@ -1741,6 +1741,46 @@ static int __init amd_pstate_set_driver(int mode_idx)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F.
|
||||
* show the debug message that helps to check if the CPU has CPPC support for loading issue.
|
||||
*/
|
||||
static bool amd_cppc_supported(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
bool warn = false;
|
||||
|
||||
if ((boot_cpu_data.x86 == 0x17) && (boot_cpu_data.x86_model < 0x30)) {
|
||||
pr_debug_once("CPPC feature is not supported by the processor\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the CPPC feature is disabled in the BIOS for processors that support MSR-based CPPC,
|
||||
* the AMD Pstate driver may not function correctly.
|
||||
* Check the CPPC flag and display a warning message if the platform supports CPPC.
|
||||
* Note: below checking code will not abort the driver registeration process because of
|
||||
* the code is added for debugging purposes.
|
||||
*/
|
||||
if (!cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_ZEN1) || cpu_feature_enabled(X86_FEATURE_ZEN2)) {
|
||||
if (c->x86_model > 0x60 && c->x86_model < 0xaf)
|
||||
warn = true;
|
||||
} else if (cpu_feature_enabled(X86_FEATURE_ZEN3) || cpu_feature_enabled(X86_FEATURE_ZEN4)) {
|
||||
if ((c->x86_model > 0x10 && c->x86_model < 0x1F) ||
|
||||
(c->x86_model > 0x40 && c->x86_model < 0xaf))
|
||||
warn = true;
|
||||
} else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) {
|
||||
warn = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (warn)
|
||||
pr_warn_once("The CPPC feature is supported but currently disabled by the BIOS.\n"
|
||||
"Please enable it if your BIOS has the CPPC option.\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
static int __init amd_pstate_init(void)
|
||||
{
|
||||
struct device *dev_root;
|
||||
@@ -1749,6 +1789,11 @@ static int __init amd_pstate_init(void)
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
||||
return -ENODEV;
|
||||
|
||||
/* show debug message only if CPPC is not supported */
|
||||
if (!amd_cppc_supported())
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* show warning message when BIOS broken or ACPI disabled */
|
||||
if (!acpi_cpc_valid()) {
|
||||
pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
|
||||
return -ENODEV;
|
||||
@@ -1763,35 +1808,43 @@ static int __init amd_pstate_init(void)
|
||||
/* check if this machine need CPPC quirks */
|
||||
dmi_check_system(amd_pstate_quirks_table);
|
||||
|
||||
switch (cppc_state) {
|
||||
case AMD_PSTATE_UNDEFINED:
|
||||
/*
|
||||
* determine the driver mode from the command line or kernel config.
|
||||
* If no command line input is provided, cppc_state will be AMD_PSTATE_UNDEFINED.
|
||||
* command line options will override the kernel config settings.
|
||||
*/
|
||||
|
||||
if (cppc_state == AMD_PSTATE_UNDEFINED) {
|
||||
/* Disable on the following configs by default:
|
||||
* 1. Undefined platforms
|
||||
* 2. Server platforms
|
||||
* 3. Shared memory designs
|
||||
*/
|
||||
if (amd_pstate_acpi_pm_profile_undefined() ||
|
||||
amd_pstate_acpi_pm_profile_server() ||
|
||||
!boot_cpu_has(X86_FEATURE_CPPC)) {
|
||||
amd_pstate_acpi_pm_profile_server()) {
|
||||
pr_info("driver load is disabled, boot with specific mode to enable this\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
/* get driver mode from kernel config option [1:4] */
|
||||
cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE;
|
||||
}
|
||||
|
||||
switch (cppc_state) {
|
||||
case AMD_PSTATE_DISABLE:
|
||||
pr_info("driver load is disabled, boot with specific mode to enable this\n");
|
||||
return -ENODEV;
|
||||
case AMD_PSTATE_PASSIVE:
|
||||
case AMD_PSTATE_ACTIVE:
|
||||
case AMD_PSTATE_GUIDED:
|
||||
ret = amd_pstate_set_driver(cppc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* capability check */
|
||||
if (boot_cpu_has(X86_FEATURE_CPPC)) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
pr_debug("AMD CPPC MSR based functionality is supported\n");
|
||||
if (cppc_state != AMD_PSTATE_ACTIVE)
|
||||
current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
|
||||
@@ -1805,13 +1858,15 @@ static int __init amd_pstate_init(void)
|
||||
/* enable amd pstate feature */
|
||||
ret = amd_pstate_enable(true);
|
||||
if (ret) {
|
||||
pr_err("failed to enable with return %d\n", ret);
|
||||
pr_err("failed to enable driver mode(%d)\n", cppc_state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = cpufreq_register_driver(current_pstate_driver);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
pr_err("failed to register with return %d\n", ret);
|
||||
goto disable_driver;
|
||||
}
|
||||
|
||||
dev_root = bus_get_dev_root(&cpu_subsys);
|
||||
if (dev_root) {
|
||||
@@ -1827,6 +1882,8 @@ static int __init amd_pstate_init(void)
|
||||
|
||||
global_attr_free:
|
||||
cpufreq_unregister_driver(current_pstate_driver);
|
||||
disable_driver:
|
||||
amd_pstate_enable(false);
|
||||
return ret;
|
||||
}
|
||||
device_initcall(amd_pstate_init);
|
||||
|
||||
@@ -99,6 +99,7 @@ struct amd_cpudata {
|
||||
u32 policy;
|
||||
u64 cppc_cap1_cached;
|
||||
bool suspended;
|
||||
s16 epp_default;
|
||||
};
|
||||
|
||||
#endif /* _LINUX_AMD_PSTATE_H */
|
||||
|
||||
@@ -608,16 +608,15 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
|
||||
static ssize_t show_boost(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
|
||||
return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled);
|
||||
}
|
||||
|
||||
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret, enable;
|
||||
bool enable;
|
||||
|
||||
ret = sscanf(buf, "%d", &enable);
|
||||
if (ret != 1 || enable < 0 || enable > 1)
|
||||
if (kstrtobool(buf, &enable))
|
||||
return -EINVAL;
|
||||
|
||||
if (cpufreq_boost_trigger_state(enable)) {
|
||||
@@ -641,10 +640,10 @@ static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
|
||||
static ssize_t store_local_boost(struct cpufreq_policy *policy,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret, enable;
|
||||
int ret;
|
||||
bool enable;
|
||||
|
||||
ret = kstrtoint(buf, 10, &enable);
|
||||
if (ret || enable < 0 || enable > 1)
|
||||
if (kstrtobool(buf, &enable))
|
||||
return -EINVAL;
|
||||
|
||||
if (!cpufreq_driver->boost_enabled)
|
||||
@@ -739,7 +738,7 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
|
||||
static ssize_t show_##file_name \
|
||||
(struct cpufreq_policy *policy, char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, "%u\n", policy->object); \
|
||||
return sysfs_emit(buf, "%u\n", policy->object); \
|
||||
}
|
||||
|
||||
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
|
||||
@@ -760,11 +759,11 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
|
||||
|
||||
freq = arch_freq_get_on_cpu(policy->cpu);
|
||||
if (freq)
|
||||
ret = sprintf(buf, "%u\n", freq);
|
||||
ret = sysfs_emit(buf, "%u\n", freq);
|
||||
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
|
||||
ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
|
||||
ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
|
||||
else
|
||||
ret = sprintf(buf, "%u\n", policy->cur);
|
||||
ret = sysfs_emit(buf, "%u\n", policy->cur);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -798,9 +797,9 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
|
||||
unsigned int cur_freq = __cpufreq_get(policy);
|
||||
|
||||
if (cur_freq)
|
||||
return sprintf(buf, "%u\n", cur_freq);
|
||||
return sysfs_emit(buf, "%u\n", cur_freq);
|
||||
|
||||
return sprintf(buf, "<unknown>\n");
|
||||
return sysfs_emit(buf, "<unknown>\n");
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -809,12 +808,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
|
||||
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
|
||||
{
|
||||
if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
|
||||
return sprintf(buf, "powersave\n");
|
||||
return sysfs_emit(buf, "powersave\n");
|
||||
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||
return sprintf(buf, "performance\n");
|
||||
return sysfs_emit(buf, "performance\n");
|
||||
else if (policy->governor)
|
||||
return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
|
||||
policy->governor->name);
|
||||
return sysfs_emit(buf, "%s\n", policy->governor->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -873,7 +871,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
|
||||
struct cpufreq_governor *t;
|
||||
|
||||
if (!has_target()) {
|
||||
i += sprintf(buf, "performance powersave");
|
||||
i += sysfs_emit(buf, "performance powersave");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -882,11 +880,11 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
|
||||
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
|
||||
- (CPUFREQ_NAME_LEN + 2)))
|
||||
break;
|
||||
i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
|
||||
i += sysfs_emit_at(buf, i, "%s ", t->name);
|
||||
}
|
||||
mutex_unlock(&cpufreq_governor_mutex);
|
||||
out:
|
||||
i += sprintf(&buf[i], "\n");
|
||||
i += sysfs_emit_at(buf, i, "\n");
|
||||
return i;
|
||||
}
|
||||
|
||||
@@ -896,7 +894,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_cpu(cpu, mask) {
|
||||
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu);
|
||||
i += sysfs_emit_at(buf, i, "%u ", cpu);
|
||||
if (i >= (PAGE_SIZE - 5))
|
||||
break;
|
||||
}
|
||||
@@ -904,7 +902,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
|
||||
/* Remove the extra space at the end */
|
||||
i--;
|
||||
|
||||
i += sprintf(&buf[i], "\n");
|
||||
i += sysfs_emit_at(buf, i, "\n");
|
||||
return i;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
|
||||
@@ -947,7 +945,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
|
||||
static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
|
||||
{
|
||||
if (!policy->governor || !policy->governor->show_setspeed)
|
||||
return sprintf(buf, "<unsupported>\n");
|
||||
return sysfs_emit(buf, "<unsupported>\n");
|
||||
|
||||
return policy->governor->show_setspeed(policy, buf);
|
||||
}
|
||||
@@ -961,8 +959,8 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
|
||||
int ret;
|
||||
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
|
||||
if (!ret)
|
||||
return sprintf(buf, "%u\n", limit);
|
||||
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
|
||||
return sysfs_emit(buf, "%u\n", limit);
|
||||
return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq);
|
||||
}
|
||||
|
||||
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
|
||||
|
||||
@@ -300,6 +300,7 @@ static struct cpufreq_driver *intel_pstate_driver __read_mostly;
|
||||
|
||||
#define HYBRID_SCALING_FACTOR 78741
|
||||
#define HYBRID_SCALING_FACTOR_MTL 80000
|
||||
#define HYBRID_SCALING_FACTOR_LNL 86957
|
||||
|
||||
static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR;
|
||||
|
||||
@@ -1631,7 +1632,7 @@ void notify_hwp_interrupt(void)
|
||||
unsigned long flags;
|
||||
u64 value;
|
||||
|
||||
if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
|
||||
if (!hwp_active || !cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
|
||||
return;
|
||||
|
||||
rdmsrl_safe(MSR_HWP_STATUS, &value);
|
||||
@@ -1659,7 +1660,7 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
|
||||
{
|
||||
bool cancel_work;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
|
||||
if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
|
||||
return;
|
||||
|
||||
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
|
||||
@@ -2367,54 +2368,54 @@ static const struct pstate_funcs knl_funcs = {
|
||||
.get_val = core_get_val,
|
||||
};
|
||||
|
||||
#define X86_MATCH(model, policy) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
|
||||
X86_FEATURE_APERFMPERF, &policy)
|
||||
#define X86_MATCH(vfm, policy) \
|
||||
X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_APERFMPERF, &policy)
|
||||
|
||||
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
||||
X86_MATCH(SANDYBRIDGE, core_funcs),
|
||||
X86_MATCH(SANDYBRIDGE_X, core_funcs),
|
||||
X86_MATCH(ATOM_SILVERMONT, silvermont_funcs),
|
||||
X86_MATCH(IVYBRIDGE, core_funcs),
|
||||
X86_MATCH(HASWELL, core_funcs),
|
||||
X86_MATCH(BROADWELL, core_funcs),
|
||||
X86_MATCH(IVYBRIDGE_X, core_funcs),
|
||||
X86_MATCH(HASWELL_X, core_funcs),
|
||||
X86_MATCH(HASWELL_L, core_funcs),
|
||||
X86_MATCH(HASWELL_G, core_funcs),
|
||||
X86_MATCH(BROADWELL_G, core_funcs),
|
||||
X86_MATCH(ATOM_AIRMONT, airmont_funcs),
|
||||
X86_MATCH(SKYLAKE_L, core_funcs),
|
||||
X86_MATCH(BROADWELL_X, core_funcs),
|
||||
X86_MATCH(SKYLAKE, core_funcs),
|
||||
X86_MATCH(BROADWELL_D, core_funcs),
|
||||
X86_MATCH(XEON_PHI_KNL, knl_funcs),
|
||||
X86_MATCH(XEON_PHI_KNM, knl_funcs),
|
||||
X86_MATCH(ATOM_GOLDMONT, core_funcs),
|
||||
X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs),
|
||||
X86_MATCH(SKYLAKE_X, core_funcs),
|
||||
X86_MATCH(COMETLAKE, core_funcs),
|
||||
X86_MATCH(ICELAKE_X, core_funcs),
|
||||
X86_MATCH(TIGERLAKE, core_funcs),
|
||||
X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
|
||||
X86_MATCH(EMERALDRAPIDS_X, core_funcs),
|
||||
X86_MATCH(INTEL_SANDYBRIDGE, core_funcs),
|
||||
X86_MATCH(INTEL_SANDYBRIDGE_X, core_funcs),
|
||||
X86_MATCH(INTEL_ATOM_SILVERMONT, silvermont_funcs),
|
||||
X86_MATCH(INTEL_IVYBRIDGE, core_funcs),
|
||||
X86_MATCH(INTEL_HASWELL, core_funcs),
|
||||
X86_MATCH(INTEL_BROADWELL, core_funcs),
|
||||
X86_MATCH(INTEL_IVYBRIDGE_X, core_funcs),
|
||||
X86_MATCH(INTEL_HASWELL_X, core_funcs),
|
||||
X86_MATCH(INTEL_HASWELL_L, core_funcs),
|
||||
X86_MATCH(INTEL_HASWELL_G, core_funcs),
|
||||
X86_MATCH(INTEL_BROADWELL_G, core_funcs),
|
||||
X86_MATCH(INTEL_ATOM_AIRMONT, airmont_funcs),
|
||||
X86_MATCH(INTEL_SKYLAKE_L, core_funcs),
|
||||
X86_MATCH(INTEL_BROADWELL_X, core_funcs),
|
||||
X86_MATCH(INTEL_SKYLAKE, core_funcs),
|
||||
X86_MATCH(INTEL_BROADWELL_D, core_funcs),
|
||||
X86_MATCH(INTEL_XEON_PHI_KNL, knl_funcs),
|
||||
X86_MATCH(INTEL_XEON_PHI_KNM, knl_funcs),
|
||||
X86_MATCH(INTEL_ATOM_GOLDMONT, core_funcs),
|
||||
X86_MATCH(INTEL_ATOM_GOLDMONT_PLUS, core_funcs),
|
||||
X86_MATCH(INTEL_SKYLAKE_X, core_funcs),
|
||||
X86_MATCH(INTEL_COMETLAKE, core_funcs),
|
||||
X86_MATCH(INTEL_ICELAKE_X, core_funcs),
|
||||
X86_MATCH(INTEL_TIGERLAKE, core_funcs),
|
||||
X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
|
||||
X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
|
||||
X86_MATCH(BROADWELL_D, core_funcs),
|
||||
X86_MATCH(BROADWELL_X, core_funcs),
|
||||
X86_MATCH(SKYLAKE_X, core_funcs),
|
||||
X86_MATCH(ICELAKE_X, core_funcs),
|
||||
X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
|
||||
X86_MATCH(INTEL_BROADWELL_D, core_funcs),
|
||||
X86_MATCH(INTEL_BROADWELL_X, core_funcs),
|
||||
X86_MATCH(INTEL_SKYLAKE_X, core_funcs),
|
||||
X86_MATCH(INTEL_ICELAKE_X, core_funcs),
|
||||
X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
|
||||
X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
|
||||
{}
|
||||
};
|
||||
#endif
|
||||
|
||||
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
|
||||
X86_MATCH(KABYLAKE, core_funcs),
|
||||
X86_MATCH(INTEL_KABYLAKE, core_funcs),
|
||||
{}
|
||||
};
|
||||
|
||||
@@ -3350,14 +3351,13 @@ static inline void intel_pstate_request_control_from_smm(void) {}
|
||||
|
||||
#define INTEL_PSTATE_HWP_BROADWELL 0x01
|
||||
|
||||
#define X86_MATCH_HWP(model, hwp_mode) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
|
||||
X86_FEATURE_HWP, hwp_mode)
|
||||
#define X86_MATCH_HWP(vfm, hwp_mode) \
|
||||
X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_HWP, hwp_mode)
|
||||
|
||||
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
|
||||
X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
|
||||
X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
|
||||
X86_MATCH_HWP(ANY, 0),
|
||||
X86_MATCH_HWP(INTEL_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
|
||||
X86_MATCH_HWP(INTEL_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
|
||||
X86_MATCH_HWP(INTEL_ANY, 0),
|
||||
{}
|
||||
};
|
||||
|
||||
@@ -3390,15 +3390,19 @@ static const struct x86_cpu_id intel_epp_default[] = {
|
||||
* which can result in one core turbo frequency for
|
||||
* AlderLake Mobile CPUs.
|
||||
*/
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
|
||||
HWP_EPP_BALANCE_POWERSAVE, 115, 16)),
|
||||
X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
|
||||
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
|
||||
X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
|
||||
179, 64, 16)),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
|
||||
179, 64, 16)),
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
|
||||
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
|
||||
X86_MATCH_VFM(INTEL_METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE, HYBRID_SCALING_FACTOR_MTL),
|
||||
X86_MATCH_VFM(INTEL_LUNARLAKE_M, HYBRID_SCALING_FACTOR_LNL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
||||
@@ -520,10 +520,10 @@ static struct cpufreq_driver centrino_driver = {
|
||||
* or ASCII model IDs.
|
||||
*/
|
||||
static const struct x86_cpu_id centrino_ids[] = {
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 9, X86_FEATURE_EST, NULL),
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 13, X86_FEATURE_EST, NULL),
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 3, X86_FEATURE_EST, NULL),
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 4, X86_FEATURE_EST, NULL),
|
||||
X86_MATCH_VFM_FEATURE(IFM( 6, 9), X86_FEATURE_EST, NULL),
|
||||
X86_MATCH_VFM_FEATURE(IFM( 6, 13), X86_FEATURE_EST, NULL),
|
||||
X86_MATCH_VFM_FEATURE(IFM(15, 3), X86_FEATURE_EST, NULL),
|
||||
X86_MATCH_VFM_FEATURE(IFM(15, 4), X86_FEATURE_EST, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user