diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c index 910ffb7051f4..420467a5325c 100644 --- a/drivers/accel/amdxdna/aie2_ctx.c +++ b/drivers/accel/amdxdna/aie2_ctx.c @@ -133,11 +133,20 @@ static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx) dma_fence_put(fence); } +static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg) +{ + struct amdxdna_dev *xdna = hwctx->client->xdna; + + aie2_hwctx_wait_for_idle(hwctx); + aie2_hwctx_stop(xdna, hwctx, NULL); + aie2_hwctx_status_shift_stop(hwctx); + + return 0; +} + void aie2_hwctx_suspend(struct amdxdna_client *client) { struct amdxdna_dev *xdna = client->xdna; - struct amdxdna_hwctx *hwctx; - unsigned long hwctx_id; /* * Command timeout is unlikely. But if it happens, it doesn't @@ -145,19 +154,20 @@ void aie2_hwctx_suspend(struct amdxdna_client *client) * and abort all commands. */ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); - guard(mutex)(&client->hwctx_lock); - amdxdna_for_each_hwctx(client, hwctx_id, hwctx) { - aie2_hwctx_wait_for_idle(hwctx); - aie2_hwctx_stop(xdna, hwctx, NULL); - aie2_hwctx_status_shift_stop(hwctx); - } + amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb); } -void aie2_hwctx_resume(struct amdxdna_client *client) +static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg) +{ + struct amdxdna_dev *xdna = hwctx->client->xdna; + + aie2_hwctx_status_restore(hwctx); + return aie2_hwctx_restart(xdna, hwctx); +} + +int aie2_hwctx_resume(struct amdxdna_client *client) { struct amdxdna_dev *xdna = client->xdna; - struct amdxdna_hwctx *hwctx; - unsigned long hwctx_id; /* * The resume path cannot guarantee that mailbox channel can be @@ -165,11 +175,7 @@ void aie2_hwctx_resume(struct amdxdna_client *client) * mailbox channel, error will return. */ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); - guard(mutex)(&client->hwctx_lock); - amdxdna_for_each_hwctx(client, hwctx_id, hwctx) { - aie2_hwctx_status_restore(hwctx); - aie2_hwctx_restart(xdna, hwctx); - } + return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb); } static void diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c index 82412eec9a4b..9caad083543d 100644 --- a/drivers/accel/amdxdna/aie2_message.c +++ b/drivers/accel/amdxdna/aie2_message.c @@ -290,18 +290,25 @@ int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u6 return 0; } +static int amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx, void *arg) +{ + u32 *bitmap = arg; + + *bitmap |= GENMASK(hwctx->start_col + hwctx->num_col - 1, hwctx->start_col); + + return 0; +} + int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled) { DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS); struct amdxdna_dev *xdna = ndev->xdna; struct amdxdna_client *client; - struct amdxdna_hwctx *hwctx; - unsigned long hwctx_id; dma_addr_t dma_addr; u32 aie_bitmap = 0; u8 *buff_addr; - int ret, idx; + int ret; buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr, DMA_FROM_DEVICE, GFP_KERNEL); @@ -309,12 +316,8 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, return -ENOMEM; /* Go through each hardware context and mark the AIE columns that are active */ - list_for_each_entry(client, &xdna->client_list, node) { - idx = srcu_read_lock(&client->hwctx_srcu); - amdxdna_for_each_hwctx(client, hwctx_id, hwctx) - aie_bitmap |= amdxdna_hwctx_col_map(hwctx); - srcu_read_unlock(&client->hwctx_srcu, idx); - } + list_for_each_entry(client, &xdna->client_list, node) + amdxdna_hwctx_walk(client, &aie_bitmap, amdxdna_hwctx_col_map); *cols_filled = 0; req.dump_buff_addr = dma_addr; diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c index 6fc3191c3097..16ac0cab4f44 100644 --- a/drivers/accel/amdxdna/aie2_pci.c +++ b/drivers/accel/amdxdna/aie2_pci.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -465,8 +466,11 @@ static int aie2_hw_resume(struct amdxdna_dev *xdna) return ret; } - list_for_each_entry(client, &xdna->client_list, node) - aie2_hwctx_resume(client); + list_for_each_entry(client, &xdna->client_list, node) { + ret = aie2_hwctx_resume(client); + if (ret) + break; + } return ret; } @@ -779,65 +783,56 @@ static int aie2_get_clock_metadata(struct amdxdna_client *client, return ret; } -static int aie2_get_hwctx_status(struct amdxdna_client *client, - struct amdxdna_drm_get_info *args) +static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg) { - struct amdxdna_drm_query_hwctx __user *buf; - struct amdxdna_dev *xdna = client->xdna; - struct amdxdna_drm_query_hwctx *tmp; - struct amdxdna_client *tmp_client; - struct amdxdna_hwctx *hwctx; - unsigned long hwctx_id; - bool overflow = false; - u32 req_bytes = 0; - u32 hw_i = 0; - int ret = 0; - int idx; + struct amdxdna_drm_query_hwctx __user *buf, *tmp __free(kfree) = NULL; + struct amdxdna_drm_get_info *get_info_args = arg; - drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); + if (get_info_args->buffer_size < sizeof(*tmp)) + return -EINVAL; tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return -ENOMEM; - buf = u64_to_user_ptr(args->buffer); + tmp->pid = hwctx->client->pid; + tmp->context_id = hwctx->id; + tmp->start_col = hwctx->start_col; + tmp->num_col = hwctx->num_col; + tmp->command_submissions = hwctx->priv->seq; + tmp->command_completions = hwctx->priv->completed; + + buf = u64_to_user_ptr(get_info_args->buffer); + + if (copy_to_user(buf, tmp, sizeof(*tmp))) + return -EFAULT; + + get_info_args->buffer += sizeof(*tmp); + get_info_args->buffer_size -= sizeof(*tmp); + + return 0; +} + +static int aie2_get_hwctx_status(struct amdxdna_client *client, + struct amdxdna_drm_get_info *args) +{ + struct amdxdna_dev *xdna = client->xdna; + struct amdxdna_drm_get_info info_args; + struct amdxdna_client *tmp_client; + int ret; + + drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); + + info_args.buffer = args->buffer; + info_args.buffer_size = args->buffer_size; + list_for_each_entry(tmp_client, &xdna->client_list, node) { - idx = srcu_read_lock(&tmp_client->hwctx_srcu); - amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) { - req_bytes += sizeof(*tmp); - if (args->buffer_size < req_bytes) { - /* Continue iterating to get the required size */ - overflow = true; - continue; - } - - memset(tmp, 0, sizeof(*tmp)); - tmp->pid = tmp_client->pid; - tmp->context_id = hwctx->id; - tmp->start_col = hwctx->start_col; - tmp->num_col = hwctx->num_col; - tmp->command_submissions = hwctx->priv->seq; - tmp->command_completions = hwctx->priv->completed; - - if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) { - ret = -EFAULT; - srcu_read_unlock(&tmp_client->hwctx_srcu, idx); - goto out; - } - hw_i++; - } - srcu_read_unlock(&tmp_client->hwctx_srcu, idx); + ret = amdxdna_hwctx_walk(tmp_client, &info_args, aie2_hwctx_status_cb); + if (ret) + break; } - if (overflow) { - XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.", - args->buffer_size, req_bytes); - ret = -EINVAL; - } - -out: - kfree(tmp); - args->buffer_size = req_bytes; + args->buffer_size = (u32)(info_args.buffer - args->buffer); return ret; } diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h index 488d8ee568eb..91a8e948f82a 100644 --- a/drivers/accel/amdxdna/aie2_pci.h +++ b/drivers/accel/amdxdna/aie2_pci.h @@ -289,7 +289,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx); void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx); int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size); void aie2_hwctx_suspend(struct amdxdna_client *client); -void aie2_hwctx_resume(struct amdxdna_client *client); +int aie2_hwctx_resume(struct amdxdna_client *client); int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq); void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq); diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c index b47a7f8e9017..4bfe4ef20550 100644 --- a/drivers/accel/amdxdna/amdxdna_ctx.c +++ b/drivers/accel/amdxdna/amdxdna_ctx.c @@ -68,14 +68,30 @@ static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx, synchronize_srcu(ss); /* At this point, user is not able to submit new commands */ - mutex_lock(&xdna->dev_lock); xdna->dev_info->ops->hwctx_fini(hwctx); - mutex_unlock(&xdna->dev_lock); kfree(hwctx->name); kfree(hwctx); } +int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg, + int (*walk)(struct amdxdna_hwctx *hwctx, void *arg)) +{ + struct amdxdna_hwctx *hwctx; + unsigned long hwctx_id; + int ret = 0, idx; + + idx = srcu_read_lock(&client->hwctx_srcu); + amdxdna_for_each_hwctx(client, hwctx_id, hwctx) { + ret = walk(hwctx, arg); + if (ret) + break; + } + srcu_read_unlock(&client->hwctx_srcu, idx); + + return ret; +} + void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size) { struct amdxdna_cmd *cmd = abo->mem.kva; @@ -126,16 +142,12 @@ void amdxdna_hwctx_remove_all(struct amdxdna_client *client) struct amdxdna_hwctx *hwctx; unsigned long hwctx_id; - mutex_lock(&client->hwctx_lock); amdxdna_for_each_hwctx(client, hwctx_id, hwctx) { XDNA_DBG(client->xdna, "PID %d close HW context %d", client->pid, hwctx->id); xa_erase(&client->hwctx_xa, hwctx->id); - mutex_unlock(&client->hwctx_lock); amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu); - mutex_lock(&client->hwctx_lock); } - mutex_unlock(&client->hwctx_lock); } int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) @@ -225,6 +237,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d if (!drm_dev_enter(dev, &idx)) return -ENODEV; + mutex_lock(&xdna->dev_lock); hwctx = xa_erase(&client->hwctx_xa, args->handle); if (!hwctx) { ret = -EINVAL; @@ -241,6 +254,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle); out: + mutex_unlock(&xdna->dev_lock); drm_dev_exit(idx); return ret; } diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h index c652229547a3..7cd7a55936f0 100644 --- a/drivers/accel/amdxdna/amdxdna_ctx.h +++ b/drivers/accel/amdxdna/amdxdna_ctx.h @@ -139,14 +139,10 @@ amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo) void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size); int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo); -static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx) -{ - return GENMASK(hwctx->start_col + hwctx->num_col - 1, - hwctx->start_col); -} - void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job); void amdxdna_hwctx_remove_all(struct amdxdna_client *client); +int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg, + int (*walk)(struct amdxdna_hwctx *hwctx, void *arg)); int amdxdna_cmd_submit(struct amdxdna_client *client, u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt, diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c index fbca94183f96..8ef5e4f27f5e 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.c +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c @@ -81,7 +81,6 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp) ret = -ENODEV; goto unbind_sva; } - mutex_init(&client->hwctx_lock); init_srcu_struct(&client->hwctx_srcu); xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC); mutex_init(&client->mm_lock); @@ -116,7 +115,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp) xa_destroy(&client->hwctx_xa); cleanup_srcu_struct(&client->hwctx_srcu); - mutex_destroy(&client->hwctx_lock); mutex_destroy(&client->mm_lock); if (client->dev_heap) drm_gem_object_put(to_gobj(client->dev_heap)); @@ -142,8 +140,8 @@ static int amdxdna_flush(struct file *f, fl_owner_t id) mutex_lock(&xdna->dev_lock); list_del_init(&client->node); - mutex_unlock(&xdna->dev_lock); amdxdna_hwctx_remove_all(client); + mutex_unlock(&xdna->dev_lock); drm_dev_exit(idx); return 0; @@ -330,11 +328,8 @@ static void amdxdna_remove(struct pci_dev *pdev) struct amdxdna_client, node); while (client) { list_del_init(&client->node); - mutex_unlock(&xdna->dev_lock); - amdxdna_hwctx_remove_all(client); - mutex_lock(&xdna->dev_lock); client = list_first_entry_or_null(&xdna->client_list, struct amdxdna_client, node); } diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h index 40bbb3c06320..b6b3b424d1d5 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.h +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h @@ -116,8 +116,6 @@ struct amdxdna_device_id { struct amdxdna_client { struct list_head node; pid_t pid; - struct mutex hwctx_lock; /* protect hwctx */ - /* do NOT wait this srcu when hwctx_lock is held */ struct srcu_struct hwctx_srcu; struct xarray hwctx_xa; u32 next_hwctxid; diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index c0ad8f59e483..609cdb9d371e 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -2604,6 +2604,7 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux) platform->bridge.type = platform->pdata.panel_bridge ? DRM_MODE_CONNECTOR_eDP : DRM_MODE_CONNECTOR_DisplayPort; + platform->bridge.support_hdcp = true; drm_bridge_add(&platform->bridge); diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index d7e1c2f8f53c..e9f16dbc9535 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -373,7 +373,8 @@ static int display_connector_probe(struct platform_device *pdev) if (conn->bridge.ddc) conn->bridge.ops |= DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT; - if (conn->hpd_gpio) + /* Detecting the monitor requires reading DPCD */ + if (conn->hpd_gpio && type != DRM_MODE_CONNECTOR_DisplayPort) conn->bridge.ops |= DRM_BRIDGE_OP_DETECT; if (conn->hpd_irq >= 0) conn->bridge.ops |= DRM_BRIDGE_OP_HPD; diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index 8c915427d053..091c5335355a 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -641,6 +642,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, struct drm_bridge *bridge, *panel_bridge = NULL; unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB); unsigned int max_bpc = 8; + bool support_hdcp = false; int connector_type; int ret; @@ -763,6 +765,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (drm_bridge_is_panel(bridge)) panel_bridge = bridge; + + if (bridge->support_hdcp) + support_hdcp = true; } if (connector_type == DRM_MODE_CONNECTOR_Unknown) @@ -849,6 +854,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (panel_bridge) drm_panel_bridge_set_orientation(connector, panel_bridge); + if (support_hdcp && IS_REACHABLE(CONFIG_DRM_DISPLAY_HELPER) && + IS_ENABLED(CONFIG_DRM_DISPLAY_HDCP_HELPER)) + drm_connector_attach_content_protection_property(connector, true); + return connector; } EXPORT_SYMBOL_GPL(drm_bridge_connector_init); diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index 5bb4c77db2c3..647b49ff2da5 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -270,6 +270,29 @@ npages_in_range(unsigned long start, unsigned long end) return (end - start) >> PAGE_SHIFT; } +/** + * drm_gpusvm_notifier_find() - Find GPU SVM notifier from GPU SVM + * @gpusvm: Pointer to the GPU SVM structure. + * @start: Start address of the notifier + * @end: End address of the notifier + * + * Return: A pointer to the drm_gpusvm_notifier if found or NULL + */ +struct drm_gpusvm_notifier * +drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start, + unsigned long end) +{ + struct interval_tree_node *itree; + + itree = interval_tree_iter_first(&gpusvm->root, start, end - 1); + + if (itree) + return container_of(itree, struct drm_gpusvm_notifier, itree); + else + return NULL; +} +EXPORT_SYMBOL_GPL(drm_gpusvm_notifier_find); + /** * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier * @notifier: Pointer to the GPU SVM notifier structure. @@ -293,86 +316,6 @@ drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start, } EXPORT_SYMBOL_GPL(drm_gpusvm_range_find); -/** - * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier - * @range__: Iterator variable for the ranges - * @next__: Iterator variable for the ranges temporay storage - * @notifier__: Pointer to the GPU SVM notifier - * @start__: Start address of the range - * @end__: End address of the range - * - * This macro is used to iterate over GPU SVM ranges in a notifier while - * removing ranges from it. - */ -#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \ - for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \ - (next__) = __drm_gpusvm_range_next(range__); \ - (range__) && (drm_gpusvm_range_start(range__) < (end__)); \ - (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__)) - -/** - * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list - * @notifier: a pointer to the current drm_gpusvm_notifier - * - * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if - * the current notifier is the last one or if the input notifier is - * NULL. - */ -static struct drm_gpusvm_notifier * -__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier) -{ - if (notifier && !list_is_last(¬ifier->entry, - ¬ifier->gpusvm->notifier_list)) - return list_next_entry(notifier, entry); - - return NULL; -} - -static struct drm_gpusvm_notifier * -notifier_iter_first(struct rb_root_cached *root, unsigned long start, - unsigned long last) -{ - struct interval_tree_node *itree; - - itree = interval_tree_iter_first(root, start, last); - - if (itree) - return container_of(itree, struct drm_gpusvm_notifier, itree); - else - return NULL; -} - -/** - * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm - * @notifier__: Iterator variable for the notifiers - * @notifier__: Pointer to the GPU SVM notifier - * @start__: Start address of the notifier - * @end__: End address of the notifier - * - * This macro is used to iterate over GPU SVM notifiers in a gpusvm. - */ -#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \ - for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1); \ - (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ - (notifier__) = __drm_gpusvm_notifier_next(notifier__)) - -/** - * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm - * @notifier__: Iterator variable for the notifiers - * @next__: Iterator variable for the notifiers temporay storage - * @notifier__: Pointer to the GPU SVM notifier - * @start__: Start address of the notifier - * @end__: End address of the notifier - * - * This macro is used to iterate over GPU SVM notifiers in a gpusvm while - * removing notifiers from it. - */ -#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \ - for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1), \ - (next__) = __drm_gpusvm_notifier_next(notifier__); \ - (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ - (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__)) - /** * drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier. * @mni: Pointer to the mmu_interval_notifier structure. @@ -472,22 +415,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm, } EXPORT_SYMBOL_GPL(drm_gpusvm_init); -/** - * drm_gpusvm_notifier_find() - Find GPU SVM notifier - * @gpusvm: Pointer to the GPU SVM structure - * @fault_addr: Fault address - * - * This function finds the GPU SVM notifier associated with the fault address. - * - * Return: Pointer to the GPU SVM notifier on success, NULL otherwise. - */ -static struct drm_gpusvm_notifier * -drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, - unsigned long fault_addr) -{ - return notifier_iter_first(&gpusvm->root, fault_addr, fault_addr + 1); -} - /** * to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node * @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct @@ -943,7 +870,7 @@ drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm, if (!mmget_not_zero(mm)) return ERR_PTR(-EFAULT); - notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr); + notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr, fault_addr + 1); if (!notifier) { notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr); if (IS_ERR(notifier)) { @@ -1107,7 +1034,8 @@ void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm, drm_gpusvm_driver_lock_held(gpusvm); notifier = drm_gpusvm_notifier_find(gpusvm, - drm_gpusvm_range_start(range)); + drm_gpusvm_range_start(range), + drm_gpusvm_range_start(range) + 1); if (WARN_ON_ONCE(!notifier)) return; diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index bbc7fecb6f4a..d6bea8a4fffd 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -420,6 +420,71 @@ * new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2) */ +/** + * DOC: Madvise Logic - Splitting and Traversal + * + * This logic handles GPU VA range updates by generating remap and map operations + * without performing unmaps or merging existing mappings. + * + * 1) The requested range lies entirely within a single drm_gpuva. The logic splits + * the existing mapping at the start and end boundaries and inserts a new map. + * + * :: + * a start end b + * pre: |-----------------------| + * drm_gpuva1 + * + * a start end b + * new: |-----|=========|-------| + * remap map remap + * + * one REMAP and one MAP : Same behaviour as SPLIT and MERGE + * + * 2) The requested range spans multiple drm_gpuva regions. The logic traverses + * across boundaries, remapping the start and end segments, and inserting two + * map operations to cover the full range. + * + * :: a start b c end d + * pre: |------------------|--------------|------------------| + * drm_gpuva1 drm_gpuva2 drm_gpuva3 + * + * a start b c end d + * new: |-------|==========|--------------|========|---------| + * remap1 map1 drm_gpuva2 map2 remap2 + * + * two REMAPS and two MAPS + * + * 3) Either start or end lies within a drm_gpuva. A single remap and map operation + * are generated to update the affected portion. + * + * + * :: a/start b c end d + * pre: |------------------|--------------|------------------| + * drm_gpuva1 drm_gpuva2 drm_gpuva3 + * + * a/start b c end d + * new: |------------------|--------------|========|---------| + * drm_gpuva1 drm_gpuva2 map1 remap1 + * + * :: a start b c/end d + * pre: |------------------|--------------|------------------| + * drm_gpuva1 drm_gpuva2 drm_gpuva3 + * + * a start b c/end d + * new: |-------|==========|--------------|------------------| + * remap1 map1 drm_gpuva2 drm_gpuva3 + * + * one REMAP and one MAP + * + * 4) Both start and end align with existing drm_gpuva boundaries. No operations + * are needed as the range is already covered. + * + * 5) No existing drm_gpuvas. No operations. + * + * Unlike drm_gpuvm_sm_map_ops_create, this logic avoids unmaps and merging, + * focusing solely on remap and map operations for efficient traversal and update. + */ + /** * DOC: Locking * @@ -486,13 +551,18 @@ * u64 addr, u64 range, * struct drm_gem_object *obj, u64 offset) * { + * struct drm_gpuvm_map_req map_req = { + * .map.va.addr = addr, + * .map.va.range = range, + * .map.gem.obj = obj, + * .map.gem.offset = offset, + * }; * struct drm_gpuva_ops *ops; * struct drm_gpuva_op *op * struct drm_gpuvm_bo *vm_bo; * * driver_lock_va_space(); - * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range, - * obj, offset); + * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req); * if (IS_ERR(ops)) * return PTR_ERR(ops); * @@ -2054,16 +2124,18 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap); static int op_map_cb(const struct drm_gpuvm_ops *fn, void *priv, - u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset) + const struct drm_gpuvm_map_req *req) { struct drm_gpuva_op op = {}; + if (!req) + return 0; + op.op = DRM_GPUVA_OP_MAP; - op.map.va.addr = addr; - op.map.va.range = range; - op.map.gem.obj = obj; - op.map.gem.offset = offset; + op.map.va.addr = req->map.va.addr; + op.map.va.range = req->map.va.range; + op.map.gem.obj = req->map.gem.obj; + op.map.gem.offset = req->map.gem.offset; return fn->sm_step_map(&op, priv); } @@ -2088,10 +2160,13 @@ op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv, static int op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv, - struct drm_gpuva *va, bool merge) + struct drm_gpuva *va, bool merge, bool madvise) { struct drm_gpuva_op op = {}; + if (madvise) + return 0; + op.op = DRM_GPUVA_OP_UNMAP; op.unmap.va = va; op.unmap.keep = merge; @@ -2102,10 +2177,15 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv, static int __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, const struct drm_gpuvm_ops *ops, void *priv, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + const struct drm_gpuvm_map_req *req, + bool madvise) { + struct drm_gem_object *req_obj = req->map.gem.obj; + const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req; struct drm_gpuva *va, *next; + u64 req_offset = req->map.gem.offset; + u64 req_range = req->map.va.range; + u64 req_addr = req->map.va.addr; u64 req_end = req_addr + req_range; int ret; @@ -2120,19 +2200,22 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, u64 end = addr + range; bool merge = !!va->gem.obj; + if (madvise && obj) + continue; + if (addr == req_addr) { merge &= obj == req_obj && offset == req_offset; if (end == req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; break; } if (end < req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; continue; @@ -2153,6 +2236,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, NULL, &n, &u); if (ret) return ret; + + if (madvise) + op_map = req; break; } } else if (addr < req_addr) { @@ -2173,6 +2259,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, &p, NULL, &u); if (ret) return ret; + + if (madvise) + op_map = req; break; } @@ -2180,6 +2269,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, &p, NULL, &u); if (ret) return ret; + + if (madvise) { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = req_addr, + .map.va.range = end - req_addr, + }; + + ret = op_map_cb(ops, priv, &map_req); + if (ret) + return ret; + } + continue; } @@ -2195,6 +2296,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, &p, &n, &u); if (ret) return ret; + + if (madvise) + op_map = req; break; } } else if (addr > req_addr) { @@ -2203,16 +2307,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, (addr - req_addr); if (end == req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; + break; } if (end < req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; + continue; } @@ -2231,14 +2337,20 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, NULL, &n, &u); if (ret) return ret; + + if (madvise) { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = addr, + .map.va.range = req_end - addr, + }; + + return op_map_cb(ops, priv, &map_req); + } break; } } } - - return op_map_cb(ops, priv, - req_addr, req_range, - req_obj, req_offset); + return op_map_cb(ops, priv, op_map); } static int @@ -2290,7 +2402,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, if (ret) return ret; } else { - ret = op_unmap_cb(ops, priv, va, false); + ret = op_unmap_cb(ops, priv, va, false, false); if (ret) return ret; } @@ -2303,10 +2415,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps * @gpuvm: the &drm_gpuvm representing the GPU VA space * @priv: pointer to a driver private data structure - * @req_addr: the start address of the new mapping - * @req_range: the range of the new mapping - * @req_obj: the &drm_gem_object to map - * @req_offset: the offset within the &drm_gem_object + * @req: ptr to struct drm_gpuvm_map_req * * This function iterates the given range of the GPU VA space. It utilizes the * &drm_gpuvm_ops to call back into the driver providing the split and merge @@ -2333,8 +2442,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, */ int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + const struct drm_gpuvm_map_req *req) { const struct drm_gpuvm_ops *ops = gpuvm->ops; @@ -2343,9 +2451,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, ops->sm_step_unmap))) return -EINVAL; - return __drm_gpuvm_sm_map(gpuvm, ops, priv, - req_addr, req_range, - req_obj, req_offset); + return __drm_gpuvm_sm_map(gpuvm, ops, priv, req, false); } EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map); @@ -2421,10 +2527,7 @@ static const struct drm_gpuvm_ops lock_ops = { * @gpuvm: the &drm_gpuvm representing the GPU VA space * @exec: the &drm_exec locking context * @num_fences: for newly mapped objects, the # of fences to reserve - * @req_addr: the start address of the range to unmap - * @req_range: the range of the mappings to unmap - * @req_obj: the &drm_gem_object to map - * @req_offset: the offset within the &drm_gem_object + * @req: ptr to drm_gpuvm_map_req struct * * This function locks (drm_exec_lock_obj()) objects that will be unmapped/ * remapped, and locks+prepares (drm_exec_prepare_object()) objects that @@ -2445,9 +2548,7 @@ static const struct drm_gpuvm_ops lock_ops = { * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range); * break; * case DRIVER_OP_MAP: - * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, - * op->addr, op->range, - * obj, op->obj_offset); + * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req); * break; * } * @@ -2478,18 +2579,17 @@ static const struct drm_gpuvm_ops lock_ops = { int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec, unsigned int num_fences, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + struct drm_gpuvm_map_req *req) { + struct drm_gem_object *req_obj = req->map.gem.obj; + if (req_obj) { int ret = drm_exec_prepare_obj(exec, req_obj, num_fences); if (ret) return ret; } - return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, - req_addr, req_range, - req_obj, req_offset); + return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req, false); } EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock); @@ -2608,13 +2708,42 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = { .sm_step_unmap = drm_gpuva_sm_step, }; +static struct drm_gpuva_ops * +__drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, + const struct drm_gpuvm_map_req *req, + bool madvise) +{ + struct drm_gpuva_ops *ops; + struct { + struct drm_gpuvm *vm; + struct drm_gpuva_ops *ops; + } args; + int ret; + + ops = kzalloc(sizeof(*ops), GFP_KERNEL); + if (unlikely(!ops)) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&ops->list); + + args.vm = gpuvm; + args.ops = ops; + + ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req, madvise); + if (ret) + goto err_free_ops; + + return ops; + +err_free_ops: + drm_gpuva_ops_free(gpuvm, ops); + return ERR_PTR(ret); +} + /** * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge * @gpuvm: the &drm_gpuvm representing the GPU VA space - * @req_addr: the start address of the new mapping - * @req_range: the range of the new mapping - * @req_obj: the &drm_gem_object to map - * @req_offset: the offset within the &drm_gem_object + * @req: map request arguments * * This function creates a list of operations to perform splitting and merging * of existent mapping(s) with the newly requested one. @@ -2642,39 +2771,49 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = { */ struct drm_gpuva_ops * drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + const struct drm_gpuvm_map_req *req) { - struct drm_gpuva_ops *ops; - struct { - struct drm_gpuvm *vm; - struct drm_gpuva_ops *ops; - } args; - int ret; - - ops = kzalloc(sizeof(*ops), GFP_KERNEL); - if (unlikely(!ops)) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&ops->list); - - args.vm = gpuvm; - args.ops = ops; - - ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, - req_addr, req_range, - req_obj, req_offset); - if (ret) - goto err_free_ops; - - return ops; - -err_free_ops: - drm_gpuva_ops_free(gpuvm, ops); - return ERR_PTR(ret); + return __drm_gpuvm_sm_map_ops_create(gpuvm, req, false); } EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create); +/** + * drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split + * @gpuvm: the &drm_gpuvm representing the GPU VA space + * @req: map request arguments + * + * This function creates a list of operations to perform splitting + * of existent mapping(s) at start or end, based on the request map. + * + * The list can be iterated with &drm_gpuva_for_each_op and must be processed + * in the given order. It can contain map and remap operations, but it + * also can be empty if no operation is required, e.g. if the requested mapping + * already exists is the exact same way. + * + * There will be no unmap operations, a maximum of two remap operations and two + * map operations. The two map operations correspond to: one from start to the + * end of drm_gpuvaX, and another from the start of drm_gpuvaY to end. + * + * Note that before calling this function again with another mapping request it + * is necessary to update the &drm_gpuvm's view of the GPU VA space. The + * previously obtained operations must be either processed or abandoned. To + * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(), + * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be + * used. + * + * After the caller finished processing the returned &drm_gpuva_ops, they must + * be freed with &drm_gpuva_ops_free. + * + * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure + */ +struct drm_gpuva_ops * +drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm, + const struct drm_gpuvm_map_req *req) +{ + return __drm_gpuvm_sm_map_ops_create(gpuvm, req, true); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create); + /** * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on * unmap diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index fe03915d0095..a712e177b350 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -1096,6 +1096,43 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, } EXPORT_SYMBOL(mipi_dsi_dcs_read); +/** + * mipi_dsi_dcs_read_multi() - mipi_dsi_dcs_read() w/ accum_err + * @ctx: Context for multiple DSI transactions + * @cmd: DCS command + * @data: buffer in which to receive data + * @len: size of receive buffer + * + * Like mipi_dsi_dcs_read() but deals with errors in a way that makes it + * convenient to make several calls in a row. + */ +void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd, + void *data, size_t len) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + struct mipi_dsi_msg msg = { + .channel = dsi->channel, + .type = MIPI_DSI_DCS_READ, + .tx_buf = &cmd, + .tx_len = 1, + .rx_buf = data, + .rx_len = len + }; + ssize_t ret; + + if (ctx->accum_err) + return; + + ret = mipi_dsi_device_transfer(dsi, &msg); + if (ret < 0) { + ctx->accum_err = ret; + dev_err(dev, "dcs read with command %#x failed: %d\n", cmd, + ctx->accum_err); + } +} +EXPORT_SYMBOL(mipi_dsi_dcs_read_multi); + /** * mipi_dsi_dcs_nop() - send DCS nop packet * @dsi: DSI peripheral device diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 2896fa7501b1..3d97990170bf 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -185,12 +185,17 @@ struct pvr_vm_bind_op { static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op) { switch (bind_op->type) { - case PVR_VM_BIND_TYPE_MAP: + case PVR_VM_BIND_TYPE_MAP: { + const struct drm_gpuvm_map_req map_req = { + .map.va.addr = bind_op->device_addr, + .map.va.range = bind_op->size, + .map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj), + .map.gem.offset = bind_op->offset, + }; + return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr, - bind_op, bind_op->device_addr, - bind_op->size, - gem_from_pvr_gem(bind_op->pvr_obj), - bind_op->offset); + bind_op, &map_req); + } case PVR_VM_BIND_TYPE_UNMAP: return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr, diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 3cd8562a5109..210604181c05 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -371,6 +371,12 @@ struct drm_gpuva * msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, u64 offset, u64 range_start, u64 range_end) { + struct drm_gpuva_op_map op_map = { + .va.addr = range_start, + .va.range = range_end - range_start, + .gem.obj = obj, + .gem.offset = offset, + }; struct msm_gem_vm *vm = to_msm_vm(gpuvm); struct drm_gpuvm_bo *vm_bo; struct msm_gem_vma *vma; @@ -399,7 +405,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, if (obj) GEM_WARN_ON((range_end - range_start) > obj->size); - drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset); + drm_gpuva_init_from_op(&vma->base, &op_map); vma->mapped = false; ret = drm_gpuva_insert(&vm->base, &vma->base); @@ -1171,11 +1177,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec) op->obj_offset); break; case MSM_VM_BIND_OP_MAP: - case MSM_VM_BIND_OP_MAP_NULL: - ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, - op->iova, op->range, - op->obj, op->obj_offset); + case MSM_VM_BIND_OP_MAP_NULL: { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = op->iova, + .map.va.range = op->range, + .map.gem.obj = op->obj, + .map.gem.offset = op->obj_offset, + }; + + ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req); break; + } default: /* * lookup_op() should have already thrown an error for @@ -1282,10 +1294,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job) if (op->flags & MSM_VM_BIND_OP_DUMP) arg.flags |= MSM_VMA_DUMP; fallthrough; - case MSM_VM_BIND_OP_MAP_NULL: - ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova, - op->range, op->obj, op->obj_offset); + case MSM_VM_BIND_OP_MAP_NULL: { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = op->iova, + .map.va.range = op->range, + .map.gem.obj = op->obj, + .map.gem.offset = op->obj_offset, + }; + + ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req); break; + } default: /* * lookup_op() should have already thrown an error for diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index ddfc46bc1b3e..d94a85509176 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job, break; case OP_MAP: { struct nouveau_uvma_region *reg; + struct drm_gpuvm_map_req map_req = { + .map.va.addr = op->va.addr, + .map.va.range = op->va.range, + .map.gem.obj = op->gem.obj, + .map.gem.offset = op->gem.offset, + }; reg = nouveau_uvma_region_find_first(uvmm, op->va.addr, @@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job, } op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base, - op->va.addr, - op->va.range, - op->gem.obj, - op->gem.offset); + &map_req); if (IS_ERR(op->ops)) { ret = PTR_ERR(op->ops); goto unwind_continue; diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c index 17898a29efe8..561e6643dcbb 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c @@ -148,24 +148,20 @@ static inline struct nt35560 *panel_to_nt35560(struct drm_panel *panel) static int nt35560_set_brightness(struct backlight_device *bl) { struct nt35560 *nt = bl_get_data(bl); - struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); - int period_ns = 1023; + struct mipi_dsi_multi_context dsi_ctx = { + .dsi = to_mipi_dsi_device(nt->dev) + }; int duty_ns = bl->props.brightness; + int period_ns = 1023; u8 pwm_ratio; u8 pwm_div; - u8 par; - int ret; if (backlight_is_blank(bl)) { /* Disable backlight */ - par = 0x00; - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, - &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret); - return ret; - } - return 0; + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, + MIPI_DCS_WRITE_CONTROL_DISPLAY, + 0x00); + return dsi_ctx.accum_err; } /* Calculate the PWM duty cycle in n/256's */ @@ -176,12 +172,6 @@ static int nt35560_set_brightness(struct backlight_device *bl) /* Set up PWM dutycycle ONE byte (differs from the standard) */ dev_dbg(nt->dev, "calculated duty cycle %02x\n", pwm_ratio); - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, - &pwm_ratio, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to set display PWM ratio (%d)\n", ret); - return ret; - } /* * Sequence to write PWMDIV: @@ -192,46 +182,23 @@ static int nt35560_set_brightness(struct backlight_device *bl) * 0x22 PWMDIV * 0x7F 0xAA CMD2 page 1 lock */ - par = 0xaa; - ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to unlock CMD 2 (%d)\n", ret); - return ret; - } - par = 0x01; - ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to enter page 1 (%d)\n", ret); - return ret; - } - par = 0x01; - ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to disable MTP reload (%d)\n", ret); - return ret; - } - ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to set PWM divisor (%d)\n", ret); - return ret; - } - par = 0xaa; - ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to lock CMD 2 (%d)\n", ret); - return ret; - } + mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx, + MIPI_DCS_SET_DISPLAY_BRIGHTNESS, + pwm_ratio); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0xaa); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7d, 0x01); + + mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx, 0x22, pwm_div); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0xaa); /* Enable backlight */ - par = 0x24; - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, - &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to enable display backlight (%d)\n", ret); - return ret; - } + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, + 0x24); - return 0; + return dsi_ctx.accum_err; } static const struct backlight_ops nt35560_bl_ops = { @@ -244,32 +211,23 @@ static const struct backlight_properties nt35560_bl_props = { .max_brightness = 1023, }; -static int nt35560_read_id(struct nt35560 *nt) +static void nt35560_read_id(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); + struct device dev = dsi_ctx->dsi->dev; u8 vendor, version, panel; u16 val; - int ret; - ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID1, &vendor, 1); - if (ret < 0) { - dev_err(nt->dev, "could not vendor ID byte\n"); - return ret; - } - ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID2, &version, 1); - if (ret < 0) { - dev_err(nt->dev, "could not read device version byte\n"); - return ret; - } - ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID3, &panel, 1); - if (ret < 0) { - dev_err(nt->dev, "could not read panel ID byte\n"); - return ret; - } + mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID1, &vendor, 1); + mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID2, &version, 1); + mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID3, &panel, 1); + + if (dsi_ctx->accum_err < 0) + return; if (vendor == 0x00) { - dev_err(nt->dev, "device vendor ID is zero\n"); - return -ENODEV; + dev_err(&dev, "device vendor ID is zero\n"); + dsi_ctx->accum_err = -ENODEV; + return; } val = (vendor << 8) | panel; @@ -278,16 +236,16 @@ static int nt35560_read_id(struct nt35560 *nt) case DISPLAY_SONY_ACX424AKP_ID2: case DISPLAY_SONY_ACX424AKP_ID3: case DISPLAY_SONY_ACX424AKP_ID4: - dev_info(nt->dev, "MTP vendor: %02x, version: %02x, panel: %02x\n", + dev_info(&dev, + "MTP vendor: %02x, version: %02x, panel: %02x\n", vendor, version, panel); break; default: - dev_info(nt->dev, "unknown vendor: %02x, version: %02x, panel: %02x\n", + dev_info(&dev, + "unknown vendor: %02x, version: %02x, panel: %02x\n", vendor, version, panel); break; } - - return 0; } static int nt35560_power_on(struct nt35560 *nt) @@ -322,92 +280,56 @@ static void nt35560_power_off(struct nt35560 *nt) static int nt35560_prepare(struct drm_panel *panel) { struct nt35560 *nt = panel_to_nt35560(panel); - struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); - const u8 mddi = 3; + struct mipi_dsi_multi_context dsi_ctx = { + .dsi = to_mipi_dsi_device(nt->dev) + }; int ret; ret = nt35560_power_on(nt); if (ret) return ret; - ret = nt35560_read_id(nt); - if (ret) { - dev_err(nt->dev, "failed to read panel ID (%d)\n", ret); - goto err_power_off; - } + nt35560_read_id(&dsi_ctx); - /* Enabe tearing mode: send TE (tearing effect) at VBLANK */ - ret = mipi_dsi_dcs_set_tear_on(dsi, + /* Enable tearing mode: send TE (tearing effect) at VBLANK */ + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (ret) { - dev_err(nt->dev, "failed to enable vblank TE (%d)\n", ret); - goto err_power_off; - } /* * Set MDDI * * This presumably deactivates the Qualcomm MDDI interface and * selects DSI, similar code is found in other drivers such as the - * Sharp LS043T1LE01 which makes us suspect that this panel may be - * using a Novatek NT35565 or similar display driver chip that shares - * this command. Due to the lack of documentation we cannot know for - * sure. + * Sharp LS043T1LE01. */ - ret = mipi_dsi_dcs_write(dsi, NT35560_DCS_SET_MDDI, - &mddi, sizeof(mddi)); - if (ret < 0) { - dev_err(nt->dev, "failed to set MDDI (%d)\n", ret); - goto err_power_off; - } + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, NT35560_DCS_SET_MDDI, 3); - /* Exit sleep mode */ - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret) { - dev_err(nt->dev, "failed to exit sleep mode (%d)\n", ret); - goto err_power_off; - } - msleep(140); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 140); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret) { - dev_err(nt->dev, "failed to turn display on (%d)\n", ret); - goto err_power_off; - } + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); if (nt->video_mode) { - /* In video mode turn peripheral on */ - ret = mipi_dsi_turn_on_peripheral(dsi); - if (ret) { - dev_err(nt->dev, "failed to turn on peripheral\n"); - goto err_power_off; - } + mipi_dsi_turn_on_peripheral_multi(&dsi_ctx); } - return 0; - -err_power_off: - nt35560_power_off(nt); - return ret; + if (dsi_ctx.accum_err < 0) + nt35560_power_off(nt); + return dsi_ctx.accum_err; } static int nt35560_unprepare(struct drm_panel *panel) { struct nt35560 *nt = panel_to_nt35560(panel); - struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { + .dsi = to_mipi_dsi_device(nt->dev) + }; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret) { - dev_err(nt->dev, "failed to turn display off (%d)\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + + if (dsi_ctx.accum_err < 0) + return dsi_ctx.accum_err; - /* Enter sleep mode */ - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret) { - dev_err(nt->dev, "failed to enter sleep mode (%d)\n", ret); - return ret; - } msleep(85); nt35560_power_off(nt); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c index e91f50662997..7e2f4e043d62 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c @@ -7,7 +7,9 @@ #include #include #include +#include #include +#include #include #include