mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 12:21:22 -05:00
Merge tag 'drm-fixes-2025-11-21' of https://gitlab.freedesktop.org/drm/kernel
Pull drm fixes from Dave Airlie: "A range of small fixes across the board, the i915 display disambiguation is probably the biggest otherwise amdgpu and xe as usual with tegra, nouveau, radeon and a core atomic fix. Looks mostly normal. atomic: - Return error codes on failed blob creation for planes nouveau: - Fix memory leak tegra: - Fix device ref counting - Fix pid ref counting - Revert booting on Pixel C xe: - Fix out-of-bounds access with BIT() - Fix kunit test checking wrong condition - Drop duplicate kconfig select - Fix guc2host irq handler with MSI-X i915: - Wildcat Lake and Panther Lake detangled for display fixes amdgpu: - DTBCLK gating fix - EDID fetching retry improvements - HDMI HPD debounce filtering - DCN 2.0 cursor fix - DP MST PBN fix - VPE fix - GC 11 fix - PRT fix - MMIO remap page fix - SR-IOV fix radeon: - Fence deadlock fix" * tag 'drm-fixes-2025-11-21' of https://gitlab.freedesktop.org/drm/kernel: (25 commits) drm/amdgpu: Add sriov vf check for VCN per queue reset support. drm/amdgpu/ttm: Fix crash when handling MMIO_REMAP in PDE flags drm/amdgpu/vm: Check PRT uAPI flag instead of PTE flag drm/amdgpu: Skip emit de meta data on gfx11 with rs64 enabled drm/amd: Skip power ungate during suspend for VPE drm/plane: Fix create_in_format_blob() return value drm/xe/irq: Handle msix vector0 interrupt drm/xe: Remove duplicate DRM_EXEC selection from Kconfig drm/xe/kunit: Fix forcewake assertion in mocs test drm/xe: Prevent BIT() overflow when handling invalid prefetch region drm/radeon: delete radeon_fence_process in is_signaled, no deadlock drm/amd/display: Fix pbn to kbps Conversion drm/amd/display: Clear the CUR_ENABLE register on DCN20 on DPP5 drm/amd/display: Add an HPD filter for HDMI drm/amd/display: Increase DPCD read retries drm/amd/display: Move sleep into each retry for retrieve_link_cap() drm/amd/display: Prevent Gating DTBCLK before It Is Properly Latched drm/i915/xe3: Restrict PTL intel_encoder_is_c10phy() to only PHY A drm/i915/display: Add definition for wcl as subplatform drm/pcids: Split PTL pciids group to make wcl subplatform ...
This commit is contained in:
@@ -3414,10 +3414,11 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
|
||||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
|
||||
continue;
|
||||
/* skip CG for VCE/UVD, it's handled specially */
|
||||
/* skip CG for VCE/UVD/VPE, it's handled specially */
|
||||
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
|
||||
adev->ip_blocks[i].version->funcs->set_powergating_state) {
|
||||
/* enable powergating to save power */
|
||||
|
||||
@@ -1372,7 +1372,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
|
||||
mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
|
||||
flags |= AMDGPU_PTE_SYSTEM;
|
||||
|
||||
if (ttm->caching == ttm_cached)
|
||||
if (ttm && ttm->caching == ttm_cached)
|
||||
flags |= AMDGPU_PTE_SNOOPED;
|
||||
}
|
||||
|
||||
|
||||
@@ -2078,7 +2078,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo = before->bo_va->base.bo;
|
||||
|
||||
amdgpu_vm_it_insert(before, &vm->va);
|
||||
if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
|
||||
if (before->flags & AMDGPU_VM_PAGE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
|
||||
@@ -2093,7 +2093,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo = after->bo_va->base.bo;
|
||||
|
||||
amdgpu_vm_it_insert(after, &vm->va);
|
||||
if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
|
||||
if (after->flags & AMDGPU_VM_PAGE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
|
||||
|
||||
@@ -5872,9 +5872,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
if (flags & AMDGPU_IB_PREEMPTED)
|
||||
control |= INDIRECT_BUFFER_PRE_RESUME(1);
|
||||
|
||||
if (vmid)
|
||||
if (vmid && !ring->adev->gfx.rs64_enable)
|
||||
gfx_v11_0_ring_emit_de_meta(ring,
|
||||
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
|
||||
!amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED));
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
|
||||
@@ -141,7 +141,7 @@ static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
|
||||
adev->vcn.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
|
||||
|
||||
if (amdgpu_dpm_reset_vcn_is_supported(adev))
|
||||
if (amdgpu_dpm_reset_vcn_is_supported(adev) && !amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -122,7 +122,9 @@ static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
|
||||
case IP_VERSION(13, 0, 12):
|
||||
if ((adev->psp.sos.fw_version >= 0x00450025) && amdgpu_dpm_reset_vcn_is_supported(adev))
|
||||
if ((adev->psp.sos.fw_version >= 0x00450025) &&
|
||||
amdgpu_dpm_reset_vcn_is_supported(adev) &&
|
||||
!amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
break;
|
||||
default:
|
||||
|
||||
@@ -3859,6 +3859,97 @@ void amdgpu_dm_update_connector_after_detect(
|
||||
update_subconnector_property(aconnector);
|
||||
}
|
||||
|
||||
static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2)
|
||||
{
|
||||
if (!sink1 || !sink2)
|
||||
return false;
|
||||
if (sink1->sink_signal != sink2->sink_signal)
|
||||
return false;
|
||||
|
||||
if (sink1->dc_edid.length != sink2->dc_edid.length)
|
||||
return false;
|
||||
|
||||
if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid,
|
||||
sink1->dc_edid.length) != 0)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* DOC: hdmi_hpd_debounce_work
|
||||
*
|
||||
* HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD
|
||||
* (such as during power save transitions), this delay determines how long to
|
||||
* wait before processing the HPD event. This allows distinguishing between a
|
||||
* physical unplug (>hdmi_hpd_debounce_delay)
|
||||
* and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay).
|
||||
*
|
||||
* If the toggle is less than this delay, the driver compares sink capabilities
|
||||
* and permits a hotplug event if they changed.
|
||||
*
|
||||
* The default value of 1500ms was chosen based on experimental testing with
|
||||
* various monitors that exhibit spontaneous HPD toggling behavior.
|
||||
*/
|
||||
static void hdmi_hpd_debounce_work(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector =
|
||||
container_of(to_delayed_work(work), struct amdgpu_dm_connector,
|
||||
hdmi_hpd_debounce_work);
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct dc *dc = aconnector->dc_link->ctx->dc;
|
||||
bool fake_reconnect = false;
|
||||
bool reallow_idle = false;
|
||||
bool ret = false;
|
||||
guard(mutex)(&aconnector->hpd_lock);
|
||||
|
||||
/* Re-detect the display */
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) {
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
reallow_idle = true;
|
||||
}
|
||||
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
/* Apply workaround delay for certain panels */
|
||||
apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
|
||||
/* Compare sinks to determine if this was a spontaneous HPD toggle */
|
||||
if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) {
|
||||
/*
|
||||
* Sinks match - this was a spontaneous HDMI HPD toggle.
|
||||
*/
|
||||
drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n");
|
||||
fake_reconnect = true;
|
||||
}
|
||||
|
||||
/* Update connector state */
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
dm_restore_drm_connector_state(dev, connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
/* Only notify OS if sink actually changed */
|
||||
if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
}
|
||||
|
||||
/* Release the cached sink reference */
|
||||
if (aconnector->hdmi_prev_sink) {
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
}
|
||||
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
if (reallow_idle && dc->caps.ips_support)
|
||||
dc_allow_idle_optimizations(dc, true);
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
@@ -3868,6 +3959,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
|
||||
struct dc *dc = aconnector->dc_link->ctx->dc;
|
||||
bool ret = false;
|
||||
bool debounce_required = false;
|
||||
|
||||
if (adev->dm.disable_hpd_irq)
|
||||
return;
|
||||
@@ -3890,6 +3982,14 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
|
||||
drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
|
||||
|
||||
/*
|
||||
* Check for HDMI disconnect with debounce enabled.
|
||||
*/
|
||||
debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 &&
|
||||
dc_is_hdmi_signal(aconnector->dc_link->connector_signal) &&
|
||||
new_connection_type == dc_connection_none &&
|
||||
aconnector->dc_link->local_sink != NULL);
|
||||
|
||||
if (aconnector->base.force && new_connection_type == dc_connection_none) {
|
||||
emulated_link_detect(aconnector->dc_link);
|
||||
|
||||
@@ -3899,7 +3999,34 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
|
||||
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_connector_hotplug_event(connector);
|
||||
} else if (debounce_required) {
|
||||
/*
|
||||
* HDMI disconnect detected - schedule delayed work instead of
|
||||
* processing immediately. This allows us to coalesce spurious
|
||||
* HDMI signals from physical unplugs.
|
||||
*/
|
||||
drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n",
|
||||
aconnector->hdmi_hpd_debounce_delay_ms);
|
||||
|
||||
/* Cache the current sink for later comparison */
|
||||
if (aconnector->hdmi_prev_sink)
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink;
|
||||
if (aconnector->hdmi_prev_sink)
|
||||
dc_sink_retain(aconnector->hdmi_prev_sink);
|
||||
|
||||
/* Schedule delayed detection. */
|
||||
if (mod_delayed_work(system_wq,
|
||||
&aconnector->hdmi_hpd_debounce_work,
|
||||
msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms)))
|
||||
drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n");
|
||||
|
||||
} else {
|
||||
|
||||
/* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */
|
||||
if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work))
|
||||
return;
|
||||
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
dc_exit_ips_for_hw_access(dc);
|
||||
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
@@ -7388,6 +7515,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
|
||||
if (aconnector->mst_mgr.dev)
|
||||
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
|
||||
|
||||
/* Cancel and flush any pending HDMI HPD debounce work */
|
||||
cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
|
||||
if (aconnector->hdmi_prev_sink) {
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
}
|
||||
|
||||
if (aconnector->bl_idx != -1) {
|
||||
backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
|
||||
dm->backlight_dev[aconnector->bl_idx] = NULL;
|
||||
@@ -8549,6 +8683,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
||||
mutex_init(&aconnector->hpd_lock);
|
||||
mutex_init(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
|
||||
INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
|
||||
/*
|
||||
* configure support HPD hot plug connector_>polled default value is 0
|
||||
* which means HPD hot plug not supported
|
||||
|
||||
@@ -59,6 +59,7 @@
|
||||
|
||||
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
|
||||
|
||||
#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
|
||||
/*
|
||||
#include "include/amdgpu_dal_power_if.h"
|
||||
#include "amdgpu_dm_irq.h"
|
||||
@@ -819,6 +820,11 @@ struct amdgpu_dm_connector {
|
||||
bool pack_sdp_v1_3;
|
||||
enum adaptive_sync_type as_type;
|
||||
struct amdgpu_hdmi_vsdb_info vsdb_info;
|
||||
|
||||
/* HDMI HPD debounce support */
|
||||
unsigned int hdmi_hpd_debounce_delay_ms;
|
||||
struct delayed_work hdmi_hpd_debounce_work;
|
||||
struct dc_sink *hdmi_prev_sink;
|
||||
};
|
||||
|
||||
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
|
||||
|
||||
@@ -884,26 +884,28 @@ struct dsc_mst_fairness_params {
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_FP)
|
||||
static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
|
||||
static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn)
|
||||
{
|
||||
u8 link_coding_cap;
|
||||
uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
|
||||
uint64_t effective_kbps = (uint64_t)kbps;
|
||||
|
||||
link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
|
||||
if (link_coding_cap == DP_128b_132b_ENCODING)
|
||||
fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
|
||||
if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps
|
||||
effective_kbps *= 1006;
|
||||
effective_kbps = div_u64(effective_kbps, 1000);
|
||||
}
|
||||
|
||||
return fec_overhead_multiplier_x1000;
|
||||
return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000));
|
||||
}
|
||||
|
||||
static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
|
||||
static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin)
|
||||
{
|
||||
u64 peak_kbps = kbps;
|
||||
uint64_t pbn_effective = (uint64_t)pbn;
|
||||
|
||||
peak_kbps *= 1006;
|
||||
peak_kbps *= fec_overhead_multiplier_x1000;
|
||||
peak_kbps = div_u64(peak_kbps, 1000 * 1000);
|
||||
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
|
||||
if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn
|
||||
pbn_effective *= (1000000 / PEAK_FACTOR_X1000);
|
||||
else
|
||||
pbn_effective *= 1000;
|
||||
|
||||
return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64);
|
||||
}
|
||||
|
||||
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
|
||||
@@ -974,7 +976,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
|
||||
dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
|
||||
dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
|
||||
|
||||
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
|
||||
kbps = pbn_to_kbps(pbn, false);
|
||||
dc_dsc_compute_config(
|
||||
param.sink->ctx->dc->res_pool->dscs[0],
|
||||
¶m.sink->dsc_caps.dsc_dec_caps,
|
||||
@@ -1003,12 +1005,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
|
||||
int link_timeslots_used;
|
||||
int fair_pbn_alloc;
|
||||
int ret = 0;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i + k].dsc_enabled) {
|
||||
initial_slack[i] =
|
||||
kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
|
||||
kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn;
|
||||
bpp_increased[i] = false;
|
||||
remaining_to_increase += 1;
|
||||
} else {
|
||||
@@ -1104,7 +1105,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
int next_index;
|
||||
int remaining_to_try = 0;
|
||||
int ret;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
int var_pbn;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
@@ -1137,7 +1137,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
|
||||
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
|
||||
var_pbn = vars[next_index].pbn;
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true);
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
@@ -1197,7 +1197,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
int count = 0;
|
||||
int i, k, ret;
|
||||
bool debugfs_overwrite = false;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
struct drm_connector_state *new_conn_state;
|
||||
|
||||
memset(params, 0, sizeof(params));
|
||||
@@ -1278,7 +1277,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
|
||||
for (i = 0; i < count; i++) {
|
||||
vars[i + k].aconnector = params[i].aconnector;
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
|
||||
@@ -1300,7 +1299,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
|
||||
for (i = 0; i < count; i++) {
|
||||
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false);
|
||||
vars[i + k].dsc_enabled = true;
|
||||
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
@@ -1308,7 +1307,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
@@ -1763,18 +1762,6 @@ int pre_validate_dsc(struct drm_atomic_state *state,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t kbps_from_pbn(unsigned int pbn)
|
||||
{
|
||||
uint64_t kbps = (uint64_t)pbn;
|
||||
|
||||
kbps *= (1000000 / PEAK_FACTOR_X1000);
|
||||
kbps *= 8;
|
||||
kbps *= 54;
|
||||
kbps /= 64;
|
||||
|
||||
return (uint32_t)kbps;
|
||||
}
|
||||
|
||||
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
|
||||
struct dc_dsc_bw_range *bw_range)
|
||||
{
|
||||
@@ -1873,7 +1860,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
||||
dc_link_get_highest_encoding_format(stream->link));
|
||||
cur_link_settings = stream->link->verified_link_cap;
|
||||
root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
|
||||
virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
|
||||
virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true);
|
||||
|
||||
/* pick the end to end bw bottleneck */
|
||||
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
|
||||
@@ -1926,7 +1913,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
||||
immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
|
||||
|
||||
if (immediate_upstream_port) {
|
||||
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
|
||||
virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true);
|
||||
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
|
||||
} else {
|
||||
/* For topology LCT 1 case - only one mstb*/
|
||||
|
||||
@@ -394,6 +394,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
|
||||
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
|
||||
new_clocks->ref_dtbclk_khz = 600000;
|
||||
else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000)
|
||||
new_clocks->ref_dtbclk_khz = 0;
|
||||
|
||||
/*
|
||||
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
|
||||
@@ -435,7 +437,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
|
||||
actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
|
||||
|
||||
if (actual_dtbclk) {
|
||||
if (actual_dtbclk > 590000) {
|
||||
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
|
||||
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
|
||||
}
|
||||
|
||||
@@ -1411,7 +1411,7 @@ static void dccg35_set_dtbclk_dto(
|
||||
__func__, params->otg_inst, params->pixclk_khz,
|
||||
params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
|
||||
|
||||
} else {
|
||||
} else if (!params->ref_dtbclk_khz && !req_dtbclk_khz) {
|
||||
switch (params->otg_inst) {
|
||||
case 0:
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
|
||||
|
||||
@@ -614,6 +614,14 @@ void dcn20_dpp_pg_control(
|
||||
* DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
|
||||
* 1, 1000);
|
||||
*/
|
||||
|
||||
/* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */
|
||||
if (!power_on) {
|
||||
struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst];
|
||||
if (dpp5 && dpp5->funcs->dpp_force_disable_cursor)
|
||||
dpp5->funcs->dpp_force_disable_cursor(dpp5);
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
BREAK_TO_DEBUGGER();
|
||||
|
||||
@@ -1691,7 +1691,7 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
union edp_configuration_cap edp_config_cap;
|
||||
union dp_downstream_port_present ds_port = { 0 };
|
||||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
uint32_t read_dpcd_retry_cnt = 3;
|
||||
uint32_t read_dpcd_retry_cnt = 20;
|
||||
int i;
|
||||
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
|
||||
const uint32_t post_oui_delay = 30; // 30ms
|
||||
@@ -1734,12 +1734,13 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
}
|
||||
|
||||
dpcd_set_source_specific_data(link);
|
||||
/* Sink may need to configure internals based on vendor, so allow some
|
||||
* time before proceeding with possibly vendor specific transactions
|
||||
*/
|
||||
msleep(post_oui_delay);
|
||||
|
||||
for (i = 0; i < read_dpcd_retry_cnt; i++) {
|
||||
/*
|
||||
* Sink may need to configure internals based on vendor, so allow some
|
||||
* time before proceeding with possibly vendor specific transactions
|
||||
*/
|
||||
msleep(post_oui_delay);
|
||||
status = core_link_read_dpcd(
|
||||
link,
|
||||
DP_DPCD_REV,
|
||||
|
||||
@@ -210,7 +210,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
|
||||
formats_size = sizeof(__u32) * plane->format_count;
|
||||
if (WARN_ON(!formats_size)) {
|
||||
/* 0 formats are never expected */
|
||||
return 0;
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
modifiers_size =
|
||||
@@ -226,7 +226,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
|
||||
|
||||
blob = drm_property_create_blob(dev, blob_size, NULL);
|
||||
if (IS_ERR(blob))
|
||||
return NULL;
|
||||
return blob;
|
||||
|
||||
blob_data = blob->data;
|
||||
blob_data->version = FORMAT_BLOB_CURRENT;
|
||||
|
||||
@@ -39,14 +39,12 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
|
||||
struct intel_display *display = to_intel_display(encoder);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
/* PTL doesn't have a PHY connected to PORT B; as such,
|
||||
* there will never be a case where PTL uses PHY B.
|
||||
* WCL uses PORT A and B with the C10 PHY.
|
||||
* Reusing the condition for WCL and extending it for PORT B
|
||||
* should not cause any issues for PTL.
|
||||
*/
|
||||
if (display->platform.pantherlake && phy < PHY_C)
|
||||
return true;
|
||||
if (display->platform.pantherlake) {
|
||||
if (display->platform.pantherlake_wildcatlake)
|
||||
return phy <= PHY_B;
|
||||
else
|
||||
return phy == PHY_A;
|
||||
}
|
||||
|
||||
if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C)
|
||||
return true;
|
||||
|
||||
@@ -1404,8 +1404,20 @@ static const struct platform_desc bmg_desc = {
|
||||
PLATFORM_GROUP(dgfx),
|
||||
};
|
||||
|
||||
static const u16 wcl_ids[] = {
|
||||
INTEL_WCL_IDS(ID),
|
||||
0
|
||||
};
|
||||
|
||||
static const struct platform_desc ptl_desc = {
|
||||
PLATFORM(pantherlake),
|
||||
.subplatforms = (const struct subplatform_desc[]) {
|
||||
{
|
||||
SUBPLATFORM(pantherlake, wildcatlake),
|
||||
.pciidlist = wcl_ids,
|
||||
},
|
||||
{},
|
||||
}
|
||||
};
|
||||
|
||||
__diag_pop();
|
||||
@@ -1482,6 +1494,7 @@ static const struct {
|
||||
INTEL_LNL_IDS(INTEL_DISPLAY_DEVICE, &lnl_desc),
|
||||
INTEL_BMG_IDS(INTEL_DISPLAY_DEVICE, &bmg_desc),
|
||||
INTEL_PTL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
|
||||
INTEL_WCL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
|
||||
};
|
||||
|
||||
static const struct {
|
||||
|
||||
@@ -101,7 +101,9 @@ struct pci_dev;
|
||||
/* Display ver 14.1 (based on GMD ID) */ \
|
||||
func(battlemage) \
|
||||
/* Display ver 30 (based on GMD ID) */ \
|
||||
func(pantherlake)
|
||||
func(pantherlake) \
|
||||
func(pantherlake_wildcatlake)
|
||||
|
||||
|
||||
#define __MEMBER(name) unsigned long name:1;
|
||||
#define __COUNT(x) 1 +
|
||||
|
||||
@@ -127,6 +127,9 @@ static bool dmc_firmware_param_disabled(struct intel_display *display)
|
||||
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
|
||||
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
|
||||
|
||||
#define XE3LPD_3002_DMC_PATH DMC_PATH(xe3lpd_3002)
|
||||
MODULE_FIRMWARE(XE3LPD_3002_DMC_PATH);
|
||||
|
||||
#define XE3LPD_DMC_PATH DMC_PATH(xe3lpd)
|
||||
MODULE_FIRMWARE(XE3LPD_DMC_PATH);
|
||||
|
||||
@@ -183,9 +186,10 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
|
||||
{
|
||||
const char *fw_path = NULL;
|
||||
u32 max_fw_size = 0;
|
||||
|
||||
if (DISPLAY_VERx100(display) == 3002 ||
|
||||
DISPLAY_VERx100(display) == 3000) {
|
||||
if (DISPLAY_VERx100(display) == 3002) {
|
||||
fw_path = XE3LPD_3002_DMC_PATH;
|
||||
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
|
||||
} else if (DISPLAY_VERx100(display) == 3000) {
|
||||
fw_path = XE3LPD_DMC_PATH;
|
||||
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
|
||||
} else if (DISPLAY_VERx100(display) == 2000) {
|
||||
|
||||
@@ -159,6 +159,8 @@ nvkm_falcon_fw_dtor(struct nvkm_falcon_fw *fw)
|
||||
nvkm_memory_unref(&fw->inst);
|
||||
nvkm_falcon_fw_dtor_sigs(fw);
|
||||
nvkm_firmware_dtor(&fw->fw);
|
||||
kfree(fw->boot);
|
||||
fw->boot = NULL;
|
||||
}
|
||||
|
||||
static const struct nvkm_firmware_func
|
||||
|
||||
@@ -360,13 +360,6 @@ static bool radeon_fence_is_signaled(struct dma_fence *f)
|
||||
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
|
||||
return true;
|
||||
|
||||
if (down_read_trylock(&rdev->exclusive_lock)) {
|
||||
radeon_fence_process(rdev, ring);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
|
||||
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -3148,6 +3148,7 @@ static int tegra_dc_couple(struct tegra_dc *dc)
|
||||
dc->client.parent = &parent->client;
|
||||
|
||||
dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
|
||||
put_device(companion);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -913,15 +913,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
|
||||
u32 value;
|
||||
int err;
|
||||
|
||||
/* If the bootloader enabled DSI it needs to be disabled
|
||||
* in order for the panel initialization commands to be
|
||||
* properly sent.
|
||||
*/
|
||||
value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
|
||||
|
||||
if (value & DSI_POWER_CONTROL_ENABLE)
|
||||
tegra_dsi_disable(dsi);
|
||||
|
||||
err = tegra_dsi_prepare(dsi);
|
||||
if (err < 0) {
|
||||
dev_err(dsi->dev, "failed to prepare: %d\n", err);
|
||||
|
||||
@@ -114,9 +114,12 @@ int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_
|
||||
if (err)
|
||||
goto put_channel;
|
||||
|
||||
if (supported)
|
||||
if (supported) {
|
||||
struct pid *pid = get_task_pid(current, PIDTYPE_TGID);
|
||||
context->memory_context = host1x_memory_context_alloc(
|
||||
host, client->base.dev, get_task_pid(current, PIDTYPE_TGID));
|
||||
host, client->base.dev, pid);
|
||||
put_pid(pid);
|
||||
}
|
||||
|
||||
if (IS_ERR(context->memory_context)) {
|
||||
if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) {
|
||||
|
||||
@@ -13,7 +13,6 @@ config DRM_XE
|
||||
select TMPFS
|
||||
select DRM_BUDDY
|
||||
select DRM_CLIENT_SELECTION
|
||||
select DRM_EXEC
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n
|
||||
select DRM_PANEL
|
||||
|
||||
@@ -49,7 +49,7 @@ static void read_l3cc_table(struct xe_gt *gt,
|
||||
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
||||
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
|
||||
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
||||
KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
|
||||
KUNIT_FAIL_AND_ABORT(test, "Forcewake Failed.\n");
|
||||
}
|
||||
|
||||
for (i = 0; i < info->num_mocs_regs; i++) {
|
||||
|
||||
@@ -847,22 +847,6 @@ static int xe_irq_msix_init(struct xe_device *xe)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t guc2host_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct xe_device *xe = arg;
|
||||
struct xe_tile *tile;
|
||||
u8 id;
|
||||
|
||||
if (!atomic_read(&xe->irq.enabled))
|
||||
return IRQ_NONE;
|
||||
|
||||
for_each_tile(tile, xe, id)
|
||||
xe_guc_irq_handler(&tile->primary_gt->uc.guc,
|
||||
GUC_INTR_GUC2HOST);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
|
||||
{
|
||||
unsigned int tile_id, gt_id;
|
||||
@@ -979,7 +963,7 @@ int xe_irq_msix_request_irqs(struct xe_device *xe)
|
||||
u16 msix;
|
||||
|
||||
msix = GUC2HOST_MSIX;
|
||||
err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
|
||||
err = xe_irq_msix_request_irq(xe, xe_irq_handler(xe), xe,
|
||||
DRIVER_NAME "-guc2host", false, &msix);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -375,6 +375,7 @@ static const struct pci_device_id pciidlist[] = {
|
||||
INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
|
||||
INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
|
||||
INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
|
||||
INTEL_WCL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
|
||||
@@ -3369,8 +3369,10 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
|
||||
op == DRM_XE_VM_BIND_OP_PREFETCH) ||
|
||||
XE_IOCTL_DBG(xe, prefetch_region &&
|
||||
op != DRM_XE_VM_BIND_OP_PREFETCH) ||
|
||||
XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
|
||||
!(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
|
||||
XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
|
||||
/* Guard against undefined shift in BIT(prefetch_region) */
|
||||
(prefetch_region >= (sizeof(xe->info.mem_region_mask) * 8) ||
|
||||
!(BIT(prefetch_region) & xe->info.mem_region_mask)))) ||
|
||||
XE_IOCTL_DBG(xe, obj &&
|
||||
op == DRM_XE_VM_BIND_OP_UNMAP) ||
|
||||
XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
|
||||
|
||||
@@ -877,7 +877,10 @@
|
||||
MACRO__(0xB08F, ## __VA_ARGS__), \
|
||||
MACRO__(0xB090, ## __VA_ARGS__), \
|
||||
MACRO__(0xB0A0, ## __VA_ARGS__), \
|
||||
MACRO__(0xB0B0, ## __VA_ARGS__), \
|
||||
MACRO__(0xB0B0, ## __VA_ARGS__)
|
||||
|
||||
/* WCL */
|
||||
#define INTEL_WCL_IDS(MACRO__, ...) \
|
||||
MACRO__(0xFD80, ## __VA_ARGS__), \
|
||||
MACRO__(0xFD81, ## __VA_ARGS__)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user