mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 15:51:30 -05:00
Merge tag 'amd-drm-fixes-6.18-2025-11-20' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes
amd-drm-fixes-6.18-2025-11-20: amdgpu: - DTBCLK gating fix - EDID fetching retry improvements - HDMI HPD debounce filtering - DCN 2.0 cursor fix - DP MST PBN fix - VPE fix - GC 11 fix - PRT fix - MMIO remap page fix - SR-IOV fix radeon: - Fence deadlock fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patch.msgid.link/20251120164110.1077973-1-alexander.deucher@amd.com
This commit is contained in:
@@ -3414,10 +3414,11 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
|
||||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
|
||||
continue;
|
||||
/* skip CG for VCE/UVD, it's handled specially */
|
||||
/* skip CG for VCE/UVD/VPE, it's handled specially */
|
||||
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
|
||||
adev->ip_blocks[i].version->funcs->set_powergating_state) {
|
||||
/* enable powergating to save power */
|
||||
|
||||
@@ -1372,7 +1372,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
|
||||
mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
|
||||
flags |= AMDGPU_PTE_SYSTEM;
|
||||
|
||||
if (ttm->caching == ttm_cached)
|
||||
if (ttm && ttm->caching == ttm_cached)
|
||||
flags |= AMDGPU_PTE_SNOOPED;
|
||||
}
|
||||
|
||||
|
||||
@@ -2078,7 +2078,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo = before->bo_va->base.bo;
|
||||
|
||||
amdgpu_vm_it_insert(before, &vm->va);
|
||||
if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
|
||||
if (before->flags & AMDGPU_VM_PAGE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
|
||||
@@ -2093,7 +2093,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo = after->bo_va->base.bo;
|
||||
|
||||
amdgpu_vm_it_insert(after, &vm->va);
|
||||
if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
|
||||
if (after->flags & AMDGPU_VM_PAGE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
|
||||
|
||||
@@ -5872,9 +5872,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
if (flags & AMDGPU_IB_PREEMPTED)
|
||||
control |= INDIRECT_BUFFER_PRE_RESUME(1);
|
||||
|
||||
if (vmid)
|
||||
if (vmid && !ring->adev->gfx.rs64_enable)
|
||||
gfx_v11_0_ring_emit_de_meta(ring,
|
||||
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
|
||||
!amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED));
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
|
||||
@@ -141,7 +141,7 @@ static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
|
||||
adev->vcn.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
|
||||
|
||||
if (amdgpu_dpm_reset_vcn_is_supported(adev))
|
||||
if (amdgpu_dpm_reset_vcn_is_supported(adev) && !amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -122,7 +122,9 @@ static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
|
||||
case IP_VERSION(13, 0, 12):
|
||||
if ((adev->psp.sos.fw_version >= 0x00450025) && amdgpu_dpm_reset_vcn_is_supported(adev))
|
||||
if ((adev->psp.sos.fw_version >= 0x00450025) &&
|
||||
amdgpu_dpm_reset_vcn_is_supported(adev) &&
|
||||
!amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
break;
|
||||
default:
|
||||
|
||||
@@ -3859,6 +3859,97 @@ void amdgpu_dm_update_connector_after_detect(
|
||||
update_subconnector_property(aconnector);
|
||||
}
|
||||
|
||||
static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2)
|
||||
{
|
||||
if (!sink1 || !sink2)
|
||||
return false;
|
||||
if (sink1->sink_signal != sink2->sink_signal)
|
||||
return false;
|
||||
|
||||
if (sink1->dc_edid.length != sink2->dc_edid.length)
|
||||
return false;
|
||||
|
||||
if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid,
|
||||
sink1->dc_edid.length) != 0)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* DOC: hdmi_hpd_debounce_work
|
||||
*
|
||||
* HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD
|
||||
* (such as during power save transitions), this delay determines how long to
|
||||
* wait before processing the HPD event. This allows distinguishing between a
|
||||
* physical unplug (>hdmi_hpd_debounce_delay)
|
||||
* and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay).
|
||||
*
|
||||
* If the toggle is less than this delay, the driver compares sink capabilities
|
||||
* and permits a hotplug event if they changed.
|
||||
*
|
||||
* The default value of 1500ms was chosen based on experimental testing with
|
||||
* various monitors that exhibit spontaneous HPD toggling behavior.
|
||||
*/
|
||||
static void hdmi_hpd_debounce_work(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector =
|
||||
container_of(to_delayed_work(work), struct amdgpu_dm_connector,
|
||||
hdmi_hpd_debounce_work);
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct dc *dc = aconnector->dc_link->ctx->dc;
|
||||
bool fake_reconnect = false;
|
||||
bool reallow_idle = false;
|
||||
bool ret = false;
|
||||
guard(mutex)(&aconnector->hpd_lock);
|
||||
|
||||
/* Re-detect the display */
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) {
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
reallow_idle = true;
|
||||
}
|
||||
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
/* Apply workaround delay for certain panels */
|
||||
apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
|
||||
/* Compare sinks to determine if this was a spontaneous HPD toggle */
|
||||
if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) {
|
||||
/*
|
||||
* Sinks match - this was a spontaneous HDMI HPD toggle.
|
||||
*/
|
||||
drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n");
|
||||
fake_reconnect = true;
|
||||
}
|
||||
|
||||
/* Update connector state */
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
dm_restore_drm_connector_state(dev, connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
/* Only notify OS if sink actually changed */
|
||||
if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
}
|
||||
|
||||
/* Release the cached sink reference */
|
||||
if (aconnector->hdmi_prev_sink) {
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
}
|
||||
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
if (reallow_idle && dc->caps.ips_support)
|
||||
dc_allow_idle_optimizations(dc, true);
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
@@ -3868,6 +3959,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
|
||||
struct dc *dc = aconnector->dc_link->ctx->dc;
|
||||
bool ret = false;
|
||||
bool debounce_required = false;
|
||||
|
||||
if (adev->dm.disable_hpd_irq)
|
||||
return;
|
||||
@@ -3890,6 +3982,14 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
|
||||
drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
|
||||
|
||||
/*
|
||||
* Check for HDMI disconnect with debounce enabled.
|
||||
*/
|
||||
debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 &&
|
||||
dc_is_hdmi_signal(aconnector->dc_link->connector_signal) &&
|
||||
new_connection_type == dc_connection_none &&
|
||||
aconnector->dc_link->local_sink != NULL);
|
||||
|
||||
if (aconnector->base.force && new_connection_type == dc_connection_none) {
|
||||
emulated_link_detect(aconnector->dc_link);
|
||||
|
||||
@@ -3899,7 +3999,34 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
|
||||
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_connector_hotplug_event(connector);
|
||||
} else if (debounce_required) {
|
||||
/*
|
||||
* HDMI disconnect detected - schedule delayed work instead of
|
||||
* processing immediately. This allows us to coalesce spurious
|
||||
* HDMI signals from physical unplugs.
|
||||
*/
|
||||
drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n",
|
||||
aconnector->hdmi_hpd_debounce_delay_ms);
|
||||
|
||||
/* Cache the current sink for later comparison */
|
||||
if (aconnector->hdmi_prev_sink)
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink;
|
||||
if (aconnector->hdmi_prev_sink)
|
||||
dc_sink_retain(aconnector->hdmi_prev_sink);
|
||||
|
||||
/* Schedule delayed detection. */
|
||||
if (mod_delayed_work(system_wq,
|
||||
&aconnector->hdmi_hpd_debounce_work,
|
||||
msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms)))
|
||||
drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n");
|
||||
|
||||
} else {
|
||||
|
||||
/* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */
|
||||
if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work))
|
||||
return;
|
||||
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
dc_exit_ips_for_hw_access(dc);
|
||||
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
@@ -7388,6 +7515,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
|
||||
if (aconnector->mst_mgr.dev)
|
||||
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
|
||||
|
||||
/* Cancel and flush any pending HDMI HPD debounce work */
|
||||
cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
|
||||
if (aconnector->hdmi_prev_sink) {
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
}
|
||||
|
||||
if (aconnector->bl_idx != -1) {
|
||||
backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
|
||||
dm->backlight_dev[aconnector->bl_idx] = NULL;
|
||||
@@ -8549,6 +8683,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
||||
mutex_init(&aconnector->hpd_lock);
|
||||
mutex_init(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
|
||||
INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
|
||||
/*
|
||||
* configure support HPD hot plug connector_>polled default value is 0
|
||||
* which means HPD hot plug not supported
|
||||
|
||||
@@ -59,6 +59,7 @@
|
||||
|
||||
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
|
||||
|
||||
#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
|
||||
/*
|
||||
#include "include/amdgpu_dal_power_if.h"
|
||||
#include "amdgpu_dm_irq.h"
|
||||
@@ -819,6 +820,11 @@ struct amdgpu_dm_connector {
|
||||
bool pack_sdp_v1_3;
|
||||
enum adaptive_sync_type as_type;
|
||||
struct amdgpu_hdmi_vsdb_info vsdb_info;
|
||||
|
||||
/* HDMI HPD debounce support */
|
||||
unsigned int hdmi_hpd_debounce_delay_ms;
|
||||
struct delayed_work hdmi_hpd_debounce_work;
|
||||
struct dc_sink *hdmi_prev_sink;
|
||||
};
|
||||
|
||||
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
|
||||
|
||||
@@ -884,26 +884,28 @@ struct dsc_mst_fairness_params {
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_FP)
|
||||
static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
|
||||
static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn)
|
||||
{
|
||||
u8 link_coding_cap;
|
||||
uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
|
||||
uint64_t effective_kbps = (uint64_t)kbps;
|
||||
|
||||
link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
|
||||
if (link_coding_cap == DP_128b_132b_ENCODING)
|
||||
fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
|
||||
if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps
|
||||
effective_kbps *= 1006;
|
||||
effective_kbps = div_u64(effective_kbps, 1000);
|
||||
}
|
||||
|
||||
return fec_overhead_multiplier_x1000;
|
||||
return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000));
|
||||
}
|
||||
|
||||
static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
|
||||
static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin)
|
||||
{
|
||||
u64 peak_kbps = kbps;
|
||||
uint64_t pbn_effective = (uint64_t)pbn;
|
||||
|
||||
peak_kbps *= 1006;
|
||||
peak_kbps *= fec_overhead_multiplier_x1000;
|
||||
peak_kbps = div_u64(peak_kbps, 1000 * 1000);
|
||||
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
|
||||
if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn
|
||||
pbn_effective *= (1000000 / PEAK_FACTOR_X1000);
|
||||
else
|
||||
pbn_effective *= 1000;
|
||||
|
||||
return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64);
|
||||
}
|
||||
|
||||
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
|
||||
@@ -974,7 +976,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
|
||||
dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
|
||||
dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
|
||||
|
||||
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
|
||||
kbps = pbn_to_kbps(pbn, false);
|
||||
dc_dsc_compute_config(
|
||||
param.sink->ctx->dc->res_pool->dscs[0],
|
||||
¶m.sink->dsc_caps.dsc_dec_caps,
|
||||
@@ -1003,12 +1005,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
|
||||
int link_timeslots_used;
|
||||
int fair_pbn_alloc;
|
||||
int ret = 0;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i + k].dsc_enabled) {
|
||||
initial_slack[i] =
|
||||
kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
|
||||
kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn;
|
||||
bpp_increased[i] = false;
|
||||
remaining_to_increase += 1;
|
||||
} else {
|
||||
@@ -1104,7 +1105,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
int next_index;
|
||||
int remaining_to_try = 0;
|
||||
int ret;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
int var_pbn;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
@@ -1137,7 +1137,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
|
||||
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
|
||||
var_pbn = vars[next_index].pbn;
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true);
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
@@ -1197,7 +1197,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
int count = 0;
|
||||
int i, k, ret;
|
||||
bool debugfs_overwrite = false;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
struct drm_connector_state *new_conn_state;
|
||||
|
||||
memset(params, 0, sizeof(params));
|
||||
@@ -1278,7 +1277,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
|
||||
for (i = 0; i < count; i++) {
|
||||
vars[i + k].aconnector = params[i].aconnector;
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
|
||||
@@ -1300,7 +1299,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
|
||||
for (i = 0; i < count; i++) {
|
||||
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false);
|
||||
vars[i + k].dsc_enabled = true;
|
||||
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
@@ -1308,7 +1307,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
@@ -1763,18 +1762,6 @@ int pre_validate_dsc(struct drm_atomic_state *state,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t kbps_from_pbn(unsigned int pbn)
|
||||
{
|
||||
uint64_t kbps = (uint64_t)pbn;
|
||||
|
||||
kbps *= (1000000 / PEAK_FACTOR_X1000);
|
||||
kbps *= 8;
|
||||
kbps *= 54;
|
||||
kbps /= 64;
|
||||
|
||||
return (uint32_t)kbps;
|
||||
}
|
||||
|
||||
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
|
||||
struct dc_dsc_bw_range *bw_range)
|
||||
{
|
||||
@@ -1873,7 +1860,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
||||
dc_link_get_highest_encoding_format(stream->link));
|
||||
cur_link_settings = stream->link->verified_link_cap;
|
||||
root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
|
||||
virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
|
||||
virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true);
|
||||
|
||||
/* pick the end to end bw bottleneck */
|
||||
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
|
||||
@@ -1926,7 +1913,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
||||
immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
|
||||
|
||||
if (immediate_upstream_port) {
|
||||
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
|
||||
virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true);
|
||||
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
|
||||
} else {
|
||||
/* For topology LCT 1 case - only one mstb*/
|
||||
|
||||
@@ -394,6 +394,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
|
||||
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
|
||||
new_clocks->ref_dtbclk_khz = 600000;
|
||||
else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000)
|
||||
new_clocks->ref_dtbclk_khz = 0;
|
||||
|
||||
/*
|
||||
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
|
||||
@@ -435,7 +437,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
|
||||
actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
|
||||
|
||||
if (actual_dtbclk) {
|
||||
if (actual_dtbclk > 590000) {
|
||||
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
|
||||
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
|
||||
}
|
||||
|
||||
@@ -1411,7 +1411,7 @@ static void dccg35_set_dtbclk_dto(
|
||||
__func__, params->otg_inst, params->pixclk_khz,
|
||||
params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
|
||||
|
||||
} else {
|
||||
} else if (!params->ref_dtbclk_khz && !req_dtbclk_khz) {
|
||||
switch (params->otg_inst) {
|
||||
case 0:
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
|
||||
|
||||
@@ -614,6 +614,14 @@ void dcn20_dpp_pg_control(
|
||||
* DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
|
||||
* 1, 1000);
|
||||
*/
|
||||
|
||||
/* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */
|
||||
if (!power_on) {
|
||||
struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst];
|
||||
if (dpp5 && dpp5->funcs->dpp_force_disable_cursor)
|
||||
dpp5->funcs->dpp_force_disable_cursor(dpp5);
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
BREAK_TO_DEBUGGER();
|
||||
|
||||
@@ -1691,7 +1691,7 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
union edp_configuration_cap edp_config_cap;
|
||||
union dp_downstream_port_present ds_port = { 0 };
|
||||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
uint32_t read_dpcd_retry_cnt = 3;
|
||||
uint32_t read_dpcd_retry_cnt = 20;
|
||||
int i;
|
||||
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
|
||||
const uint32_t post_oui_delay = 30; // 30ms
|
||||
@@ -1734,12 +1734,13 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
}
|
||||
|
||||
dpcd_set_source_specific_data(link);
|
||||
/* Sink may need to configure internals based on vendor, so allow some
|
||||
* time before proceeding with possibly vendor specific transactions
|
||||
*/
|
||||
msleep(post_oui_delay);
|
||||
|
||||
for (i = 0; i < read_dpcd_retry_cnt; i++) {
|
||||
/*
|
||||
* Sink may need to configure internals based on vendor, so allow some
|
||||
* time before proceeding with possibly vendor specific transactions
|
||||
*/
|
||||
msleep(post_oui_delay);
|
||||
status = core_link_read_dpcd(
|
||||
link,
|
||||
DP_DPCD_REV,
|
||||
|
||||
@@ -360,13 +360,6 @@ static bool radeon_fence_is_signaled(struct dma_fence *f)
|
||||
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
|
||||
return true;
|
||||
|
||||
if (down_read_trylock(&rdev->exclusive_lock)) {
|
||||
radeon_fence_process(rdev, ring);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
|
||||
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user