mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-09 13:43:21 -04:00
drm/i915/dmc_wl: Use non-sleeping variant of MMIO wait
Some display MMIO transactions for offsets in the range that requires
the DMC wakelock happen in atomic context (this has been confirmed
during tests on PTL). That means that we need to use a non-sleeping
variant of MMIO waiting function.
Implement __intel_de_wait_for_register_atomic_nowl() and use it when
waiting for acknowledgment of acquire/release.
v2:
- No __intel_de_wait_for_register_atomic_nowl() wrapper to convert
i915 to display. (Jani)
- Add a quick explanation why DMC_WAKELOCK_CTL_TIMEOUT_US is defined
in microseconds. (Luca)
Cc: Jani Nikula <jani.nikula@intel.com>
Reviewed-by: Luca Coelho <luciano.coelho@intel.com>
Signed-off-by: Gustavo Sousa <gustavo.sousa@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241108130218.24125-4-gustavo.sousa@intel.com
This commit is contained in:
committed by
Matt Roper
parent
359d2cc1f3
commit
b2ba4632b9
@@ -117,6 +117,16 @@ __intel_de_wait_for_register_nowl(struct intel_display *display,
|
||||
value, timeout);
|
||||
}
|
||||
|
||||
static inline int
|
||||
__intel_de_wait_for_register_atomic_nowl(struct intel_display *display,
|
||||
i915_reg_t reg,
|
||||
u32 mask, u32 value,
|
||||
unsigned int fast_timeout_us)
|
||||
{
|
||||
return __intel_wait_for_register(__to_uncore(display), reg, mask,
|
||||
value, fast_timeout_us, 0, NULL);
|
||||
}
|
||||
|
||||
static inline int
|
||||
intel_de_wait(struct intel_display *display, i915_reg_t reg,
|
||||
u32 mask, u32 value, unsigned int timeout)
|
||||
|
||||
@@ -39,7 +39,11 @@
|
||||
* potential future use.
|
||||
*/
|
||||
|
||||
#define DMC_WAKELOCK_CTL_TIMEOUT 5
|
||||
/*
|
||||
* Define DMC_WAKELOCK_CTL_TIMEOUT_US in microseconds because we use the
|
||||
* atomic variant of waiting MMIO.
|
||||
*/
|
||||
#define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
|
||||
#define DMC_WAKELOCK_HOLD_TIME 50
|
||||
|
||||
struct intel_dmc_wl_range {
|
||||
@@ -78,9 +82,9 @@ static void intel_dmc_wl_work(struct work_struct *work)
|
||||
|
||||
__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
|
||||
|
||||
if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
|
||||
DMC_WAKELOCK_CTL_ACK, 0,
|
||||
DMC_WAKELOCK_CTL_TIMEOUT)) {
|
||||
if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
|
||||
DMC_WAKELOCK_CTL_ACK, 0,
|
||||
DMC_WAKELOCK_CTL_TIMEOUT_US)) {
|
||||
WARN_RATELIMIT(1, "DMC wakelock release timed out");
|
||||
goto out_unlock;
|
||||
}
|
||||
@@ -217,10 +221,14 @@ void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
|
||||
__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
|
||||
DMC_WAKELOCK_CTL_REQ);
|
||||
|
||||
if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
|
||||
DMC_WAKELOCK_CTL_ACK,
|
||||
DMC_WAKELOCK_CTL_ACK,
|
||||
DMC_WAKELOCK_CTL_TIMEOUT)) {
|
||||
/*
|
||||
* We need to use the atomic variant of the waiting routine
|
||||
* because the DMC wakelock is also taken in atomic context.
|
||||
*/
|
||||
if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
|
||||
DMC_WAKELOCK_CTL_ACK,
|
||||
DMC_WAKELOCK_CTL_ACK,
|
||||
DMC_WAKELOCK_CTL_TIMEOUT_US)) {
|
||||
WARN_RATELIMIT(1, "DMC wakelock ack timed out");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user