drm/amd/pm: Use common helper for sienna dpm table

Use helper function to print sienna cichlid dpm tables to sysfs.

Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Lijo Lazar
2025-11-27 15:18:23 +05:30
committed by Alex Deucher
parent 5f77e0b11d
commit efb4febd52

View File

@@ -1278,94 +1278,45 @@ static int sienna_cichlid_emit_clk_levels(struct smu_context *smu,
struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
OverDriveTable_t *od_table =
(OverDriveTable_t *)table_context->overdrive_table;
int i, size = *offset, ret = 0, start_offset = *offset;
uint32_t cur_value = 0, value = 0, count = 0;
int size = *offset, ret = 0, start_offset = *offset;
uint32_t cur_value = 0;
struct smu_dpm_table *single_dpm_table = NULL;
struct smu_pcie_table *pcie_table;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
uint32_t gen_speed, lane_width;
uint32_t min_value, max_value;
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
break;
case SMU_SOCCLK:
single_dpm_table = &(dpm_context->dpm_tables.soc_table);
break;
case SMU_MCLK:
case SMU_UCLK:
single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
break;
case SMU_FCLK:
single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
break;
case SMU_VCLK:
case SMU_VCLK1:
single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
break;
case SMU_DCLK:
case SMU_DCLK1:
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
break;
case SMU_DCEFCLK:
ret = sienna_cichlid_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
if (ret)
return ret;
ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
if (ret)
return ret;
if (!sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
for (i = 0; i < count; i++) {
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
return ret;
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
}
} else {
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
if (ret)
return ret;
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
if (ret)
return ret;
freq_values[1] = cur_value;
mark_index = cur_value == freq_values[0] ? 0 :
cur_value == freq_values[2] ? 2 : 1;
count = 3;
if (mark_index != 1) {
count = 2;
freq_values[1] = freq_values[2];
}
for (i = 0; i < count; i++) {
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i],
cur_value == freq_values[i] ? "*" : "");
}
}
single_dpm_table = &(dpm_context->dpm_tables.dcef_table);
break;
case SMU_PCIE:
gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
pcie_table = &dpm_context->dpm_tables.pcie_table;
for (i = 0; i < pcie_table->lclk_levels; i++)
size += sysfs_emit_at(
buf, size, "%d: %s %s %dMhz %s\n", i,
(pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
(pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
(pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," :
"",
(pcie_table->pcie_lane[i] == 1) ? "x1" :
(pcie_table->pcie_lane[i] == 2) ? "x2" :
(pcie_table->pcie_lane[i] == 3) ? "x4" :
(pcie_table->pcie_lane[i] == 4) ? "x8" :
(pcie_table->pcie_lane[i] == 5) ? "x12" :
(pcie_table->pcie_lane[i] == 6) ? "x16" :
"",
pcie_table->lclk_freq[i],
(gen_speed == pcie_table->pcie_gen[i]) &&
(lane_width ==
pcie_table->pcie_lane[i]) ?
"*" :
"");
break;
return smu_cmn_print_pcie_levels(smu, pcie_table, gen_speed,
lane_width, buf, offset);
case SMU_OD_SCLK:
if (!smu->od_enabled || !od_table || !od_settings)
break;
@@ -1434,6 +1385,15 @@ static int sienna_cichlid_emit_clk_levels(struct smu_context *smu,
break;
}
if (single_dpm_table) {
ret = sienna_cichlid_get_current_clk_freq_by_table(
smu, clk_type, &cur_value);
if (ret)
return ret;
return smu_cmn_print_dpm_clk_levels(smu, single_dpm_table,
cur_value, buf, offset);
}
*offset += size - start_offset;
return 0;