Merge branches 'acpi-processor' and 'acpi-cppc'

Merge ACPI processor driver updates and ACPI CPPC library updates for
7.1-rc1:

 - Address multiple assorted issues and clean up the code in the ACPI
   processor idle driver (Huisong Li)

 - Replace strlcat() in the ACPI processor idle drive with a better
   alternative (Andy Shevchenko)

 - Rearrange and clean up acpi_processor_errata_piix4() (Rafael Wysocki)

 - Move reference performance to capabilities and fix an uninitialized
   variable in the ACPI CPPC library (Pengjie Zhang)

 - Add support for the Performance Limited Register to the ACPI CPPC
   library (Sumit Gupta)

 - Add cppc_get_perf() API to read performance controls, extend
   cppc_set_epp_perf() for FFH/SystemMemory, and make the ACPI CPPC
   library warn on missing mandatory DESIRED_PERF register (Sumit Gupta)

 - Modify the cpufreq CPPC driver to update MIN_PERF/MAX_PERF in target
   callbacks to allow it to control performance bounds via standard
   scaling_min_freq and scaling_max_freq sysfs attributes and add sysfs
   documentation for the Performance Limited Register to it (Sumit Gupta)

* acpi-processor:
  ACPI: processor: idle: Reset cpuidle on C-state list changes
  cpuidle: Extract and export no-lock variants of cpuidle_unregister_device()
  ACPI: processor: idle: Fix NULL pointer dereference in hotplug path
  ACPI: processor: idle: Reset power_setup_done flag on initialization failure
  ACPI: processor: Rearrange and clean up acpi_processor_errata_piix4()
  ACPI: processor: idle: Replace strlcat() with better alternative
  ACPI: processor: idle: Remove redundant static variable and rename cstate check function
  ACPI: processor: idle: Move max_cstate update out of the loop
  ACPI: processor: idle: Remove redundant cstate check in acpi_processor_power_init
  ACPI: processor: idle: Add missing bounds check in flatten_lpi_states()

* acpi-cppc:
  ACPI: CPPC: Check cpc_read() return values consistently
  ACPI: CPPC: Fix uninitialized ref variable in cppc_get_perf_caps()
  ACPI: CPPC: Move reference performance to capabilities
  cpufreq: CPPC: Add sysfs documentation for perf_limited
  ACPI: CPPC: add APIs and sysfs interface for perf_limited
  cpufreq: cppc: Update MIN_PERF/MAX_PERF in target callbacks
  cpufreq: CPPC: Update cached perf_ctrls on sysfs write
  ACPI: CPPC: Extend cppc_set_epp_perf() for FFH/SystemMemory
  ACPI: CPPC: Warn on missing mandatory DESIRED_PERF register
  ACPI: CPPC: Add cppc_get_perf() API to read performance controls
This commit is contained in:
Rafael J. Wysocki
2026-04-09 21:26:06 +02:00
8 changed files with 459 additions and 125 deletions

View File

@@ -327,6 +327,24 @@ Description: Energy performance preference
This file is only present if the cppc-cpufreq driver is in use.
What: /sys/devices/system/cpu/cpuX/cpufreq/perf_limited
Date: February 2026
Contact: linux-pm@vger.kernel.org
Description: Performance Limited
Read to check if platform throttling (thermal/power/current
limits) caused delivered performance to fall below the
requested level. A non-zero value indicates throttling occurred.
Write the bitmask of bits to clear:
- 0x1 = clear bit 0 (desired performance excursion)
- 0x2 = clear bit 1 (minimum performance excursion)
- 0x3 = clear both bits
The platform sets these bits; OSPM can only clear them.
This file is only present if the cppc-cpufreq driver is in use.
What: /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1}
Date: August 2008

View File

@@ -48,11 +48,6 @@ acpi_handle acpi_get_processor_handle(int cpu)
static int acpi_processor_errata_piix4(struct pci_dev *dev)
{
u8 value1 = 0;
u8 value2 = 0;
struct pci_dev *ide_dev = NULL, *isa_dev = NULL;
if (!dev)
return -EINVAL;
@@ -108,16 +103,16 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
* each IDE controller's DMA status to make sure we catch all
* DMA activity.
*/
ide_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB,
PCI_ANY_ID, PCI_ANY_ID, NULL);
if (ide_dev) {
errata.piix4.bmisx = pci_resource_start(ide_dev, 4);
if (dev) {
errata.piix4.bmisx = pci_resource_start(dev, 4);
if (errata.piix4.bmisx)
dev_dbg(&ide_dev->dev,
dev_dbg(&dev->dev,
"Bus master activity detection (BM-IDE) erratum enabled\n");
pci_dev_put(ide_dev);
pci_dev_put(dev);
}
/*
@@ -129,18 +124,20 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
* disable C3 support if this is enabled, as some legacy
* devices won't operate well if fast DMA is disabled.
*/
isa_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_0,
PCI_ANY_ID, PCI_ANY_ID, NULL);
if (isa_dev) {
pci_read_config_byte(isa_dev, 0x76, &value1);
pci_read_config_byte(isa_dev, 0x77, &value2);
if (dev) {
u8 value1 = 0, value2 = 0;
pci_read_config_byte(dev, 0x76, &value1);
pci_read_config_byte(dev, 0x77, &value2);
if ((value1 & 0x80) || (value2 & 0x80)) {
errata.piix4.fdma = 1;
dev_dbg(&isa_dev->dev,
dev_dbg(&dev->dev,
"Type-F DMA livelock erratum (C3 disabled)\n");
}
pci_dev_put(isa_dev);
pci_dev_put(dev);
}
break;

View File

@@ -177,12 +177,12 @@ __ATTR(_name, 0444, show_##_name, NULL)
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, reference_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, guaranteed_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
/* Check for valid access_width, otherwise, fallback to using bit_width */
@@ -853,6 +853,16 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
}
per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
/*
* In CPPC v1, DESIRED_PERF is mandatory. In CPPC v2, it is optional
* only when AUTO_SEL_ENABLE is supported.
*/
if (!CPC_SUPPORTED(&cpc_ptr->cpc_regs[DESIRED_PERF]) &&
(!osc_sb_cppc2_support_acked ||
!CPC_SUPPORTED(&cpc_ptr->cpc_regs[AUTO_SEL_ENABLE])))
pr_warn("Desired perf. register is mandatory if CPPC v2 is not supported "
"or autonomous selection is disabled\n");
/*
* Initialize the remaining cpc_regs as unsupported.
* Example: In case FW exposes CPPC v2, the below loop will initialize
@@ -1342,9 +1352,10 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *highest_reg, *lowest_reg,
*lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
*low_freq_reg = NULL, *nom_freq_reg = NULL;
u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
*lowest_non_linear_reg, *nominal_reg, *reference_reg,
*guaranteed_reg, *low_freq_reg = NULL, *nom_freq_reg = NULL;
u64 high, low, guaranteed, nom, ref, min_nonlinear,
low_f = 0, nom_f = 0;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0, regs_in_pcc = 0;
@@ -1358,6 +1369,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
reference_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
@@ -1365,6 +1377,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
/* Are any of the regs PCC ?*/
if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
(CPC_SUPPORTED(reference_reg) && CPC_IN_PCC(reference_reg)) ||
CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg) ||
CPC_IN_PCC(guaranteed_reg)) {
if (pcc_ss_id < 0) {
@@ -1381,35 +1394,66 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
}
}
cpc_read(cpunum, highest_reg, &high);
ret = cpc_read(cpunum, highest_reg, &high);
if (ret)
goto out_err;
perf_caps->highest_perf = high;
cpc_read(cpunum, lowest_reg, &low);
ret = cpc_read(cpunum, lowest_reg, &low);
if (ret)
goto out_err;
perf_caps->lowest_perf = low;
cpc_read(cpunum, nominal_reg, &nom);
ret = cpc_read(cpunum, nominal_reg, &nom);
if (ret)
goto out_err;
perf_caps->nominal_perf = nom;
/*
* If reference perf register is not supported then we should
* use the nominal perf value
*/
if (CPC_SUPPORTED(reference_reg)) {
ret = cpc_read(cpunum, reference_reg, &ref);
if (ret)
goto out_err;
} else {
ref = nom;
}
perf_caps->reference_perf = ref;
if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
perf_caps->guaranteed_perf = 0;
} else {
cpc_read(cpunum, guaranteed_reg, &guaranteed);
ret = cpc_read(cpunum, guaranteed_reg, &guaranteed);
if (ret)
goto out_err;
perf_caps->guaranteed_perf = guaranteed;
}
cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
ret = cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
if (ret)
goto out_err;
perf_caps->lowest_nonlinear_perf = min_nonlinear;
if (!high || !low || !nom || !min_nonlinear)
if (!high || !low || !nom || !ref || !min_nonlinear) {
ret = -EFAULT;
goto out_err;
}
/* Read optional lowest and nominal frequencies if present */
if (CPC_SUPPORTED(low_freq_reg))
cpc_read(cpunum, low_freq_reg, &low_f);
if (CPC_SUPPORTED(low_freq_reg)) {
ret = cpc_read(cpunum, low_freq_reg, &low_f);
if (ret)
goto out_err;
}
if (CPC_SUPPORTED(nom_freq_reg))
cpc_read(cpunum, nom_freq_reg, &nom_f);
if (CPC_SUPPORTED(nom_freq_reg)) {
ret = cpc_read(cpunum, nom_freq_reg, &nom_f);
if (ret)
goto out_err;
}
perf_caps->lowest_freq = low_f;
perf_caps->nominal_freq = nom_f;
@@ -1431,20 +1475,10 @@ EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
bool cppc_perf_ctrs_in_pcc_cpu(unsigned int cpu)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cpc_register_resource *ref_perf_reg;
/*
* If reference perf register is not supported then we should use the
* nominal perf value
*/
ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
if (!CPC_SUPPORTED(ref_perf_reg))
ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
return CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]) ||
CPC_IN_PCC(ref_perf_reg);
CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]);
}
EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc_cpu);
@@ -1481,10 +1515,10 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *delivered_reg, *reference_reg,
*ref_perf_reg, *ctr_wrap_reg;
*ctr_wrap_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
struct cppc_pcc_data *pcc_ss_data = NULL;
u64 delivered, reference, ref_perf, ctr_wrap_time;
u64 delivered, reference, ctr_wrap_time;
int ret = 0, regs_in_pcc = 0;
if (!cpc_desc) {
@@ -1494,19 +1528,11 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
/*
* If reference perf register is not supported then we should
* use the nominal perf value
*/
if (!CPC_SUPPORTED(ref_perf_reg))
ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
/* Are any of the regs PCC ?*/
if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
CPC_IN_PCC(ctr_wrap_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id\n");
return -ENODEV;
@@ -1521,9 +1547,13 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
}
}
cpc_read(cpunum, delivered_reg, &delivered);
cpc_read(cpunum, reference_reg, &reference);
cpc_read(cpunum, ref_perf_reg, &ref_perf);
ret = cpc_read(cpunum, delivered_reg, &delivered);
if (ret)
goto out_err;
ret = cpc_read(cpunum, reference_reg, &reference);
if (ret)
goto out_err;
/*
* Per spec, if ctr_wrap_time optional register is unsupported, then the
@@ -1531,17 +1561,19 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
* platform
*/
ctr_wrap_time = (u64)(~((u64)0));
if (CPC_SUPPORTED(ctr_wrap_reg))
cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
if (CPC_SUPPORTED(ctr_wrap_reg)) {
ret = cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
if (ret)
goto out_err;
}
if (!delivered || !reference || !ref_perf) {
if (!delivered || !reference) {
ret = -EFAULT;
goto out_err;
}
perf_fb_ctrs->delivered = delivered;
perf_fb_ctrs->reference = reference;
perf_fb_ctrs->reference_perf = ref_perf;
perf_fb_ctrs->wraparound_time = ctr_wrap_time;
out_err:
if (regs_in_pcc)
@@ -1561,6 +1593,8 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
struct cpc_register_resource *auto_sel_reg;
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cppc_pcc_data *pcc_ss_data = NULL;
bool autosel_ffh_sysmem;
bool epp_ffh_sysmem;
int ret;
if (!cpc_desc) {
@@ -1571,6 +1605,11 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
epp_ffh_sysmem = CPC_SUPPORTED(epp_set_reg) &&
(CPC_IN_FFH(epp_set_reg) || CPC_IN_SYSTEM_MEMORY(epp_set_reg));
autosel_ffh_sysmem = CPC_SUPPORTED(auto_sel_reg) &&
(CPC_IN_FFH(auto_sel_reg) || CPC_IN_SYSTEM_MEMORY(auto_sel_reg));
if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
@@ -1596,11 +1635,22 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
up_write(&pcc_ss_data->pcc_lock);
} else if (osc_cpc_flexible_adr_space_confirmed &&
CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) {
ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
(epp_ffh_sysmem || autosel_ffh_sysmem)) {
if (autosel_ffh_sysmem) {
ret = cpc_write(cpu, auto_sel_reg, enable);
if (ret)
return ret;
}
if (epp_ffh_sysmem) {
ret = cpc_write(cpu, epp_set_reg,
perf_ctrls->energy_perf);
if (ret)
return ret;
}
} else {
ret = -ENOTSUPP;
pr_debug("_CPC in PCC and _CPC in FFH are not supported\n");
pr_debug("_CPC in PCC/FFH/SystemMemory are not supported\n");
}
return ret;
@@ -1738,6 +1788,101 @@ int cppc_set_enable(int cpu, bool enable)
}
EXPORT_SYMBOL_GPL(cppc_set_enable);
/**
* cppc_get_perf - Get a CPU's performance controls.
* @cpu: CPU for which to get performance controls.
* @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
*
* Return: 0 for success with perf_ctrls, -ERRNO otherwise.
*/
int cppc_get_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cpc_register_resource *desired_perf_reg,
*min_perf_reg, *max_perf_reg,
*energy_perf_reg, *auto_sel_reg;
u64 desired_perf = 0, min = 0, max = 0, energy_perf = 0, auto_sel = 0;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0, regs_in_pcc = 0;
if (!cpc_desc) {
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
return -ENODEV;
}
if (!perf_ctrls) {
pr_debug("Invalid perf_ctrls pointer\n");
return -EINVAL;
}
desired_perf_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF];
max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF];
energy_perf_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
/* Are any of the regs PCC ?*/
if (CPC_IN_PCC(desired_perf_reg) || CPC_IN_PCC(min_perf_reg) ||
CPC_IN_PCC(max_perf_reg) || CPC_IN_PCC(energy_perf_reg) ||
CPC_IN_PCC(auto_sel_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
return -ENODEV;
}
pcc_ss_data = pcc_data[pcc_ss_id];
regs_in_pcc = 1;
down_write(&pcc_ss_data->pcc_lock);
/* Ring doorbell once to update PCC subspace */
if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
ret = -EIO;
goto out_err;
}
}
/* Read optional elements if present */
if (CPC_SUPPORTED(max_perf_reg)) {
ret = cpc_read(cpu, max_perf_reg, &max);
if (ret)
goto out_err;
}
perf_ctrls->max_perf = max;
if (CPC_SUPPORTED(min_perf_reg)) {
ret = cpc_read(cpu, min_perf_reg, &min);
if (ret)
goto out_err;
}
perf_ctrls->min_perf = min;
if (CPC_SUPPORTED(desired_perf_reg)) {
ret = cpc_read(cpu, desired_perf_reg, &desired_perf);
if (ret)
goto out_err;
}
perf_ctrls->desired_perf = desired_perf;
if (CPC_SUPPORTED(energy_perf_reg)) {
ret = cpc_read(cpu, energy_perf_reg, &energy_perf);
if (ret)
goto out_err;
}
perf_ctrls->energy_perf = energy_perf;
if (CPC_SUPPORTED(auto_sel_reg)) {
ret = cpc_read(cpu, auto_sel_reg, &auto_sel);
if (ret)
goto out_err;
}
perf_ctrls->auto_sel = (bool)auto_sel;
out_err:
if (regs_in_pcc)
up_write(&pcc_ss_data->pcc_lock);
return ret;
}
EXPORT_SYMBOL_GPL(cppc_get_perf);
/**
* cppc_set_perf - Set a CPU's performance controls.
* @cpu: CPU for which to set performance controls.
@@ -1870,6 +2015,62 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
}
EXPORT_SYMBOL_GPL(cppc_set_perf);
/**
* cppc_get_perf_limited - Get the Performance Limited register value.
* @cpu: CPU from which to get Performance Limited register.
* @perf_limited: Pointer to store the Performance Limited value.
*
* The returned value contains sticky status bits indicating platform-imposed
* performance limitations.
*
* Return: 0 for success, -EIO on failure, -EOPNOTSUPP if not supported.
*/
int cppc_get_perf_limited(int cpu, u64 *perf_limited)
{
return cppc_get_reg_val(cpu, PERF_LIMITED, perf_limited);
}
EXPORT_SYMBOL_GPL(cppc_get_perf_limited);
/**
* cppc_set_perf_limited() - Clear bits in the Performance Limited register.
* @cpu: CPU on which to write register.
* @bits_to_clear: Bitmask of bits to clear in the perf_limited register.
*
* The Performance Limited register contains two sticky bits set by platform:
* - Bit 0 (Desired_Excursion): Set when delivered performance is constrained
* below desired performance. Not used when Autonomous Selection is enabled.
* - Bit 1 (Minimum_Excursion): Set when delivered performance is constrained
* below minimum performance.
*
* These bits are sticky and remain set until OSPM explicitly clears them.
* This function only allows clearing bits (the platform sets them).
*
* Return: 0 for success, -EINVAL for invalid bits, -EIO on register
* access failure, -EOPNOTSUPP if not supported.
*/
int cppc_set_perf_limited(int cpu, u64 bits_to_clear)
{
u64 current_val, new_val;
int ret;
/* Only bits 0 and 1 are valid */
if (bits_to_clear & ~CPPC_PERF_LIMITED_MASK)
return -EINVAL;
if (!bits_to_clear)
return 0;
ret = cppc_get_perf_limited(cpu, &current_val);
if (ret)
return ret;
/* Clear the specified bits */
new_val = current_val & ~bits_to_clear;
return cppc_set_reg_val(cpu, PERF_LIMITED, new_val);
}
EXPORT_SYMBOL_GPL(cppc_set_perf_limited);
/**
* cppc_get_transition_latency - returns frequency transition latency in ns
* @cpu_num: CPU number for per_cpu().

View File

@@ -819,19 +819,13 @@ static void acpi_processor_setup_cstates(struct acpi_processor *pr)
drv->state_count = count;
}
static inline void acpi_processor_cstate_first_run_checks(void)
static inline void acpi_processor_update_max_cstate(void)
{
static int first_run;
if (first_run)
return;
dmi_check_system(processor_power_dmi_table);
max_cstate = acpi_processor_cstate_check(max_cstate);
if (max_cstate < ACPI_C_STATES_MAX)
pr_notice("processor limited to max C-state %d\n", max_cstate);
first_run++;
if (nocst)
return;
@@ -840,7 +834,7 @@ static inline void acpi_processor_cstate_first_run_checks(void)
#else
static inline int disabled_by_idle_boot_param(void) { return 0; }
static inline void acpi_processor_cstate_first_run_checks(void) { }
static inline void acpi_processor_update_max_cstate(void) { }
static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
{
return -ENODEV;
@@ -1016,9 +1010,7 @@ static bool combine_lpi_states(struct acpi_lpi_state *local,
result->arch_flags = parent->arch_flags;
result->index = parent->index;
strscpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
scnprintf(result->desc, ACPI_CX_DESC_LEN, "%s+%s", local->desc, parent->desc);
return true;
}
@@ -1068,6 +1060,8 @@ static unsigned int flatten_lpi_states(struct acpi_processor *pr,
stash_composite_state(curr_level, flpi);
flat_state_cnt++;
flpi++;
if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER)
break;
}
}
}
@@ -1273,16 +1267,15 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
int acpi_processor_hotplug(struct acpi_processor *pr)
{
struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
int ret = 0;
struct cpuidle_device *dev;
if (disabled_by_idle_boot_param())
return 0;
if (!pr->flags.power_setup_done)
if (!pr->flags.power_setup_done || !dev)
return -ENODEV;
dev = per_cpu(acpi_cpuidle_device, pr->id);
cpuidle_pause_and_lock();
cpuidle_disable_device(dev);
ret = acpi_processor_get_power_info(pr);
@@ -1314,37 +1307,42 @@ int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
*/
if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
/* Protect against cpu-hotplug */
cpus_read_lock();
/* Unregister cpuidle device of all CPUs */
cpuidle_pause_and_lock();
/* Disable all cpuidle devices */
for_each_online_cpu(cpu) {
_pr = per_cpu(processors, cpu);
if (!_pr || !_pr->flags.power_setup_done)
continue;
for_each_possible_cpu(cpu) {
dev = per_cpu(acpi_cpuidle_device, cpu);
cpuidle_disable_device(dev);
}
/* Populate Updated C-state information */
acpi_processor_get_power_info(pr);
acpi_processor_setup_cpuidle_states(pr);
/* Enable all cpuidle devices */
for_each_online_cpu(cpu) {
_pr = per_cpu(processors, cpu);
if (!_pr || !_pr->flags.power_setup_done)
if (!_pr || !_pr->flags.power || !dev)
continue;
acpi_processor_get_power_info(_pr);
if (_pr->flags.power) {
dev = per_cpu(acpi_cpuidle_device, cpu);
acpi_processor_setup_cpuidle_dev(_pr, dev);
cpuidle_enable_device(dev);
}
cpuidle_unregister_device_no_lock(dev);
kfree(dev);
_pr->flags.power = 0;
}
cpuidle_resume_and_unlock();
/*
* Unregister ACPI idle driver, reinitialize ACPI idle states
* and register ACPI idle driver again.
*/
acpi_processor_unregister_idle_driver();
acpi_processor_register_idle_driver();
/*
* Reinitialize power information of all CPUs and re-register
* all cpuidle devices. Now idle states is ok to use, can enable
* cpuidle of each CPU safely one by one.
*/
for_each_possible_cpu(cpu) {
_pr = per_cpu(processors, cpu);
if (!_pr)
continue;
acpi_processor_power_init(_pr);
}
cpus_read_unlock();
}
@@ -1357,6 +1355,8 @@ void acpi_processor_register_idle_driver(void)
int ret = -ENODEV;
int cpu;
acpi_processor_update_max_cstate();
/*
* ACPI idle driver is used by all possible CPUs.
* Use the processor power info of one in them to set up idle states.
@@ -1368,7 +1368,6 @@ void acpi_processor_register_idle_driver(void)
if (!pr)
continue;
acpi_processor_cstate_first_run_checks();
ret = acpi_processor_get_power_info(pr);
if (!ret) {
pr->flags.power_setup_done = 1;
@@ -1384,6 +1383,7 @@ void acpi_processor_register_idle_driver(void)
ret = cpuidle_register_driver(&acpi_idle_driver);
if (ret) {
pr->flags.power_setup_done = 0;
pr_debug("register %s failed.\n", acpi_idle_driver.name);
return;
}
@@ -1392,7 +1392,16 @@ void acpi_processor_register_idle_driver(void)
void acpi_processor_unregister_idle_driver(void)
{
struct acpi_processor *pr;
int cpu;
cpuidle_unregister_driver(&acpi_idle_driver);
for_each_possible_cpu(cpu) {
pr = per_cpu(processors, cpu);
if (!pr)
continue;
pr->flags.power_setup_done = 0;
}
}
void acpi_processor_power_init(struct acpi_processor *pr)
@@ -1409,8 +1418,6 @@ void acpi_processor_power_init(struct acpi_processor *pr)
if (disabled_by_idle_boot_param())
return;
acpi_processor_cstate_first_run_checks();
if (!acpi_processor_get_power_info(pr))
pr->flags.power_setup_done = 1;
@@ -1431,6 +1438,7 @@ void acpi_processor_power_init(struct acpi_processor *pr)
*/
if (cpuidle_register_device(dev)) {
per_cpu(acpi_cpuidle_device, pr->id) = NULL;
pr->flags.power_setup_done = 0;
kfree(dev);
}
}

View File

@@ -50,7 +50,8 @@ struct cppc_freq_invariance {
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
static struct kthread_worker *kworker_fie;
static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
static int cppc_perf_from_fbctrs(u64 reference_perf,
struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1);
/**
@@ -70,7 +71,7 @@ static void __cppc_scale_freq_tick(struct cppc_freq_invariance *cppc_fi)
struct cppc_perf_fb_ctrs fb_ctrs = {0};
struct cppc_cpudata *cpu_data;
unsigned long local_freq_scale;
u64 perf;
u64 perf, ref_perf;
cpu_data = cppc_fi->cpu_data;
@@ -79,7 +80,9 @@ static void __cppc_scale_freq_tick(struct cppc_freq_invariance *cppc_fi)
return;
}
perf = cppc_perf_from_fbctrs(&cppc_fi->prev_perf_fb_ctrs, &fb_ctrs);
ref_perf = cpu_data->perf_caps.reference_perf;
perf = cppc_perf_from_fbctrs(ref_perf,
&cppc_fi->prev_perf_fb_ctrs, &fb_ctrs);
if (!perf)
return;
@@ -287,6 +290,21 @@ static inline void cppc_freq_invariance_exit(void)
}
#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
static void cppc_cpufreq_update_perf_limits(struct cppc_cpudata *cpu_data,
struct cpufreq_policy *policy)
{
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
u32 min_perf, max_perf;
min_perf = cppc_khz_to_perf(caps, policy->min);
max_perf = cppc_khz_to_perf(caps, policy->max);
cpu_data->perf_ctrls.min_perf =
clamp_t(u32, min_perf, caps->lowest_perf, caps->highest_perf);
cpu_data->perf_ctrls.max_perf =
clamp_t(u32, max_perf, caps->lowest_perf, caps->highest_perf);
}
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
@@ -298,6 +316,8 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
cpu_data->perf_ctrls.desired_perf =
cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
cppc_cpufreq_update_perf_limits(cpu_data, policy);
freqs.old = policy->cur;
freqs.new = target_freq;
@@ -322,8 +342,9 @@ static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy,
desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
cpu_data->perf_ctrls.desired_perf = desired_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
cppc_cpufreq_update_perf_limits(cpu_data, policy);
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
if (ret) {
pr_debug("Failed to set target on CPU:%d. ret:%d\n",
cpu, ret);
@@ -594,6 +615,12 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
goto free_mask;
}
ret = cppc_get_perf(cpu, &cpu_data->perf_ctrls);
if (ret) {
pr_debug("Err reading CPU%d perf ctrls: ret:%d\n", cpu, ret);
goto free_mask;
}
return cpu_data;
free_mask:
@@ -723,13 +750,11 @@ static inline u64 get_delta(u64 t1, u64 t0)
return (u32)t1 - (u32)t0;
}
static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
static int cppc_perf_from_fbctrs(u64 reference_perf,
struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
u64 reference_perf;
reference_perf = fb_ctrs_t0->reference_perf;
delta_reference = get_delta(fb_ctrs_t1->reference,
fb_ctrs_t0->reference);
@@ -766,7 +791,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
struct cppc_cpudata *cpu_data;
u64 delivered_perf;
u64 delivered_perf, reference_perf;
int ret;
if (!policy)
@@ -783,7 +808,9 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
return 0;
}
delivered_perf = cppc_perf_from_fbctrs(&fb_ctrs_t0, &fb_ctrs_t1);
reference_perf = cpu_data->perf_caps.reference_perf;
delivered_perf = cppc_perf_from_fbctrs(reference_perf,
&fb_ctrs_t0, &fb_ctrs_t1);
if (!delivered_perf)
goto out_invalid_counters;
@@ -849,6 +876,7 @@ static ssize_t show_auto_select(struct cpufreq_policy *policy, char *buf)
static ssize_t store_auto_select(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
bool val;
int ret;
@@ -860,6 +888,29 @@ static ssize_t store_auto_select(struct cpufreq_policy *policy,
if (ret)
return ret;
cpu_data->perf_ctrls.auto_sel = val;
if (val) {
u32 old_min_perf = cpu_data->perf_ctrls.min_perf;
u32 old_max_perf = cpu_data->perf_ctrls.max_perf;
/*
* When enabling autonomous selection, program MIN_PERF and
* MAX_PERF from current policy limits so that the platform
* uses the correct performance bounds immediately.
*/
cppc_cpufreq_update_perf_limits(cpu_data, policy);
ret = cppc_set_perf(policy->cpu, &cpu_data->perf_ctrls);
if (ret) {
cpu_data->perf_ctrls.min_perf = old_min_perf;
cpu_data->perf_ctrls.max_perf = old_max_perf;
cppc_set_auto_sel(policy->cpu, false);
cpu_data->perf_ctrls.auto_sel = false;
return ret;
}
}
return count;
}
@@ -910,19 +961,48 @@ static ssize_t store_##_name(struct cpufreq_policy *policy, \
CPPC_CPUFREQ_ATTR_RW_U64(auto_act_window, cppc_get_auto_act_window,
cppc_set_auto_act_window)
CPPC_CPUFREQ_ATTR_RW_U64(energy_performance_preference_val,
cppc_get_epp_perf, cppc_set_epp)
static ssize_t
show_energy_performance_preference_val(struct cpufreq_policy *policy, char *buf)
{
return cppc_cpufreq_sysfs_show_u64(policy->cpu, cppc_get_epp_perf, buf);
}
static ssize_t
store_energy_performance_preference_val(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
u64 val;
int ret;
ret = kstrtou64(buf, 0, &val);
if (ret)
return ret;
ret = cppc_set_epp(policy->cpu, val);
if (ret)
return ret;
cpu_data->perf_ctrls.energy_perf = val;
return count;
}
CPPC_CPUFREQ_ATTR_RW_U64(perf_limited, cppc_get_perf_limited,
cppc_set_perf_limited)
cpufreq_freq_attr_ro(freqdomain_cpus);
cpufreq_freq_attr_rw(auto_select);
cpufreq_freq_attr_rw(auto_act_window);
cpufreq_freq_attr_rw(energy_performance_preference_val);
cpufreq_freq_attr_rw(perf_limited);
static struct freq_attr *cppc_cpufreq_attr[] = {
&freqdomain_cpus,
&auto_select,
&auto_act_window,
&energy_performance_preference_val,
&perf_limited,
NULL,
};

View File

@@ -714,6 +714,23 @@ int cpuidle_register_device(struct cpuidle_device *dev)
EXPORT_SYMBOL_GPL(cpuidle_register_device);
void cpuidle_unregister_device_no_lock(struct cpuidle_device *dev)
{
if (!dev || dev->registered == 0)
return;
lockdep_assert_held(&cpuidle_lock);
cpuidle_disable_device(dev);
cpuidle_remove_sysfs(dev);
__cpuidle_unregister_device(dev);
cpuidle_coupled_unregister_device(dev);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_device_no_lock);
/**
* cpuidle_unregister_device - unregisters a CPU's idle PM feature
* @dev: the cpu
@@ -724,18 +741,9 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
return;
cpuidle_pause_and_lock();
cpuidle_disable_device(dev);
cpuidle_remove_sysfs(dev);
__cpuidle_unregister_device(dev);
cpuidle_coupled_unregister_device(dev);
cpuidle_unregister_device_no_lock(dev);
cpuidle_resume_and_unlock();
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
/**

View File

@@ -42,6 +42,11 @@
#define CPPC_EPP_PERFORMANCE_PREF 0x00
#define CPPC_EPP_ENERGY_EFFICIENCY_PREF 0xFF
#define CPPC_PERF_LIMITED_DESIRED_EXCURSION BIT(0)
#define CPPC_PERF_LIMITED_MINIMUM_EXCURSION BIT(1)
#define CPPC_PERF_LIMITED_MASK (CPPC_PERF_LIMITED_DESIRED_EXCURSION | \
CPPC_PERF_LIMITED_MINIMUM_EXCURSION)
/* Each register has the folowing format. */
struct cpc_reg {
u8 descriptor;
@@ -116,6 +121,7 @@ struct cppc_perf_caps {
u32 guaranteed_perf;
u32 highest_perf;
u32 nominal_perf;
u32 reference_perf;
u32 lowest_perf;
u32 lowest_nonlinear_perf;
u32 lowest_freq;
@@ -133,7 +139,6 @@ struct cppc_perf_ctrls {
struct cppc_perf_fb_ctrs {
u64 reference;
u64 delivered;
u64 reference_perf;
u64 wraparound_time;
};
@@ -151,6 +156,7 @@ extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf);
extern int cppc_get_highest_perf(int cpunum, u64 *highest_perf);
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_get_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
@@ -173,6 +179,8 @@ extern int cppc_get_auto_act_window(int cpu, u64 *auto_act_window);
extern int cppc_set_auto_act_window(int cpu, u64 auto_act_window);
extern int cppc_get_auto_sel(int cpu, bool *enable);
extern int cppc_set_auto_sel(int cpu, bool enable);
extern int cppc_get_perf_limited(int cpu, u64 *perf_limited);
extern int cppc_set_perf_limited(int cpu, u64 bits_to_clear);
extern int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf);
extern int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator);
extern int amd_detect_prefcore(bool *detected);
@@ -193,6 +201,10 @@ static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_
{
return -EOPNOTSUPP;
}
static inline int cppc_get_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
return -EOPNOTSUPP;
}
static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
return -EOPNOTSUPP;
@@ -265,6 +277,14 @@ static inline int cppc_set_auto_sel(int cpu, bool enable)
{
return -EOPNOTSUPP;
}
static inline int cppc_get_perf_limited(int cpu, u64 *perf_limited)
{
return -EOPNOTSUPP;
}
static inline int cppc_set_perf_limited(int cpu, u64 bits_to_clear)
{
return -EOPNOTSUPP;
}
static inline int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
{
return -ENODEV;

View File

@@ -188,6 +188,7 @@ extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
extern int cpuidle_register_device(struct cpuidle_device *dev);
extern void cpuidle_unregister_device(struct cpuidle_device *dev);
extern void cpuidle_unregister_device_no_lock(struct cpuidle_device *dev);
extern int cpuidle_register(struct cpuidle_driver *drv,
const struct cpumask *const coupled_cpus);
extern void cpuidle_unregister(struct cpuidle_driver *drv);
@@ -226,6 +227,7 @@ static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
static inline int cpuidle_register_device(struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { }
static inline void cpuidle_unregister_device_no_lock(struct cpuidle_device *dev) {}
static inline int cpuidle_register(struct cpuidle_driver *drv,
const struct cpumask *const coupled_cpus)
{return -ENODEV; }