mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-05 01:14:43 -04:00
drm/msm: move wq handling to KMS code
The global workqueue is only used for vblanks inside KMS code. Move allocation / flushing / deallcation of it to msm_kms.c Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com> Patchwork: https://patchwork.freedesktop.org/patch/662573/ Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
This commit is contained in:
committed by
Rob Clark
parent
a452510aad
commit
a409b78fcd
@@ -980,7 +980,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
|
||||
queue_delayed_work(priv->kms->wq, &dpu_enc->delayed_off_work,
|
||||
msecs_to_jiffies(dpu_enc->idle_timeout));
|
||||
|
||||
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
|
||||
|
||||
@@ -511,7 +511,7 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
|
||||
|
||||
if (pending & PENDING_CURSOR) {
|
||||
update_cursor(crtc);
|
||||
drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
|
||||
drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->kms->wq);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1196,7 +1196,7 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
|
||||
}
|
||||
|
||||
if (pending & PENDING_CURSOR)
|
||||
drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
|
||||
drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->kms->wq);
|
||||
}
|
||||
|
||||
static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
|
||||
|
||||
@@ -77,13 +77,6 @@ static int msm_drm_uninit(struct device *dev)
|
||||
drm_atomic_helper_shutdown(ddev);
|
||||
}
|
||||
|
||||
/* We must cancel and cleanup any pending vblank enable/disable
|
||||
* work before msm_irq_uninstall() to avoid work re-enabling an
|
||||
* irq after uninstall has disabled it.
|
||||
*/
|
||||
|
||||
flush_workqueue(priv->wq);
|
||||
|
||||
msm_gem_shrinker_cleanup(ddev);
|
||||
|
||||
msm_perf_debugfs_cleanup(priv);
|
||||
@@ -97,8 +90,6 @@ static int msm_drm_uninit(struct device *dev)
|
||||
ddev->dev_private = NULL;
|
||||
drm_dev_put(ddev);
|
||||
|
||||
destroy_workqueue(priv->wq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -119,12 +110,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
|
||||
ddev->dev_private = priv;
|
||||
priv->dev = ddev;
|
||||
|
||||
priv->wq = alloc_ordered_workqueue("msm", 0);
|
||||
if (!priv->wq) {
|
||||
ret = -ENOMEM;
|
||||
goto err_put_dev;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&priv->objects);
|
||||
mutex_init(&priv->obj_lock);
|
||||
|
||||
@@ -149,7 +134,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
|
||||
if (priv->kms_init) {
|
||||
ret = drmm_mode_config_init(ddev);
|
||||
if (ret)
|
||||
goto err_destroy_wq;
|
||||
goto err_put_dev;
|
||||
}
|
||||
|
||||
dma_set_max_seg_size(dev, UINT_MAX);
|
||||
@@ -157,7 +142,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
|
||||
/* Bind all our sub-components: */
|
||||
ret = component_bind_all(dev, ddev);
|
||||
if (ret)
|
||||
goto err_destroy_wq;
|
||||
goto err_put_dev;
|
||||
|
||||
ret = msm_gem_shrinker_init(ddev);
|
||||
if (ret)
|
||||
@@ -194,8 +179,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
|
||||
|
||||
return ret;
|
||||
|
||||
err_destroy_wq:
|
||||
destroy_workqueue(priv->wq);
|
||||
err_put_dev:
|
||||
drm_dev_put(ddev);
|
||||
|
||||
|
||||
@@ -175,8 +175,6 @@ struct msm_drm_private {
|
||||
struct mutex lock;
|
||||
} lru;
|
||||
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
unsigned int num_crtcs;
|
||||
|
||||
struct msm_drm_thread event_thread[MAX_CRTCS];
|
||||
|
||||
@@ -137,7 +137,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
|
||||
vbl_work->enable = enable;
|
||||
vbl_work->priv = priv;
|
||||
|
||||
queue_work(priv->wq, &vbl_work->work);
|
||||
queue_work(priv->kms->wq, &vbl_work->work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -227,6 +227,13 @@ void msm_drm_kms_uninit(struct device *dev)
|
||||
|
||||
BUG_ON(!kms);
|
||||
|
||||
/* We must cancel and cleanup any pending vblank enable/disable
|
||||
* work before msm_irq_uninstall() to avoid work re-enabling an
|
||||
* irq after uninstall has disabled it.
|
||||
*/
|
||||
|
||||
flush_workqueue(kms->wq);
|
||||
|
||||
/* clean up event worker threads */
|
||||
for (i = 0; i < priv->num_crtcs; i++) {
|
||||
if (priv->event_thread[i].worker)
|
||||
@@ -261,7 +268,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
|
||||
ret = priv->kms_init(ddev);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev, "failed to load kms\n");
|
||||
return ret;
|
||||
goto err_msm_uninit;
|
||||
}
|
||||
|
||||
/* Enable normalization of plane zpos */
|
||||
|
||||
@@ -153,6 +153,8 @@ struct msm_kms {
|
||||
struct mutex commit_lock[MAX_CRTCS];
|
||||
unsigned pending_crtc_mask;
|
||||
struct msm_pending_timer pending_timers[MAX_CRTCS];
|
||||
|
||||
struct workqueue_struct *wq;
|
||||
};
|
||||
|
||||
static inline int msm_kms_init(struct msm_kms *kms,
|
||||
@@ -165,6 +167,10 @@ static inline int msm_kms_init(struct msm_kms *kms,
|
||||
|
||||
kms->funcs = funcs;
|
||||
|
||||
kms->wq = alloc_ordered_workqueue("msm", 0);
|
||||
if (!kms->wq)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) {
|
||||
ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
|
||||
if (ret) {
|
||||
@@ -181,6 +187,8 @@ static inline void msm_kms_destroy(struct msm_kms *kms)
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
|
||||
msm_atomic_destroy_pending_timer(&kms->pending_timers[i]);
|
||||
|
||||
destroy_workqueue(kms->wq);
|
||||
}
|
||||
|
||||
#define for_each_crtc_mask(dev, crtc, crtc_mask) \
|
||||
|
||||
Reference in New Issue
Block a user