Merge tag 'drm-misc-next-2025-08-21' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next

drm-misc-next for v6.18:

Core Changes:

bridge:
- Support Content Protection property

gpuvm:
- Support madvice in Xe driver

mipi:
- Add more multi-read/write helpers for improved error handling

Driver Changes:

amdxdna:
- Refactoring wrt. hardware contexts

bridge:
- display-connector: Improve DP display detection

panel:
- Fix includes in various drivers

panthor:
- Add support for Mali G710, G510, G310, Gx15, Gx20, Gx25
- Improve cache flushing

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://lore.kernel.org/r/20250821073822.GA45904@2a02-2454-fd5e-fd00-8f09-b5f-980b-a7ef.dyn6.pyur.net
This commit is contained in:
Dave Airlie
2025-08-25 06:38:18 +10:00
42 changed files with 861 additions and 562 deletions

View File

@@ -133,11 +133,20 @@ static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
dma_fence_put(fence);
}
static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg)
{
struct amdxdna_dev *xdna = hwctx->client->xdna;
aie2_hwctx_wait_for_idle(hwctx);
aie2_hwctx_stop(xdna, hwctx, NULL);
aie2_hwctx_status_shift_stop(hwctx);
return 0;
}
void aie2_hwctx_suspend(struct amdxdna_client *client)
{
struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_hwctx *hwctx;
unsigned long hwctx_id;
/*
* Command timeout is unlikely. But if it happens, it doesn't
@@ -145,19 +154,20 @@ void aie2_hwctx_suspend(struct amdxdna_client *client)
* and abort all commands.
*/
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
guard(mutex)(&client->hwctx_lock);
amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
aie2_hwctx_wait_for_idle(hwctx);
aie2_hwctx_stop(xdna, hwctx, NULL);
aie2_hwctx_status_shift_stop(hwctx);
}
amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb);
}
void aie2_hwctx_resume(struct amdxdna_client *client)
static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg)
{
struct amdxdna_dev *xdna = hwctx->client->xdna;
aie2_hwctx_status_restore(hwctx);
return aie2_hwctx_restart(xdna, hwctx);
}
int aie2_hwctx_resume(struct amdxdna_client *client)
{
struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_hwctx *hwctx;
unsigned long hwctx_id;
/*
* The resume path cannot guarantee that mailbox channel can be
@@ -165,11 +175,7 @@ void aie2_hwctx_resume(struct amdxdna_client *client)
* mailbox channel, error will return.
*/
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
guard(mutex)(&client->hwctx_lock);
amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
aie2_hwctx_status_restore(hwctx);
aie2_hwctx_restart(xdna, hwctx);
}
return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb);
}
static void

View File

@@ -290,18 +290,25 @@ int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u6
return 0;
}
static int amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx, void *arg)
{
u32 *bitmap = arg;
*bitmap |= GENMASK(hwctx->start_col + hwctx->num_col - 1, hwctx->start_col);
return 0;
}
int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
u32 size, u32 *cols_filled)
{
DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS);
struct amdxdna_dev *xdna = ndev->xdna;
struct amdxdna_client *client;
struct amdxdna_hwctx *hwctx;
unsigned long hwctx_id;
dma_addr_t dma_addr;
u32 aie_bitmap = 0;
u8 *buff_addr;
int ret, idx;
int ret;
buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
DMA_FROM_DEVICE, GFP_KERNEL);
@@ -309,12 +316,8 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
return -ENOMEM;
/* Go through each hardware context and mark the AIE columns that are active */
list_for_each_entry(client, &xdna->client_list, node) {
idx = srcu_read_lock(&client->hwctx_srcu);
amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
aie_bitmap |= amdxdna_hwctx_col_map(hwctx);
srcu_read_unlock(&client->hwctx_srcu, idx);
}
list_for_each_entry(client, &xdna->client_list, node)
amdxdna_hwctx_walk(client, &aie_bitmap, amdxdna_hwctx_col_map);
*cols_filled = 0;
req.dump_buff_addr = dma_addr;

View File

@@ -10,6 +10,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include <linux/cleanup.h>
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/iommu.h>
@@ -465,8 +466,11 @@ static int aie2_hw_resume(struct amdxdna_dev *xdna)
return ret;
}
list_for_each_entry(client, &xdna->client_list, node)
aie2_hwctx_resume(client);
list_for_each_entry(client, &xdna->client_list, node) {
ret = aie2_hwctx_resume(client);
if (ret)
break;
}
return ret;
}
@@ -779,65 +783,56 @@ static int aie2_get_clock_metadata(struct amdxdna_client *client,
return ret;
}
static int aie2_get_hwctx_status(struct amdxdna_client *client,
struct amdxdna_drm_get_info *args)
static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
{
struct amdxdna_drm_query_hwctx __user *buf;
struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_drm_query_hwctx *tmp;
struct amdxdna_client *tmp_client;
struct amdxdna_hwctx *hwctx;
unsigned long hwctx_id;
bool overflow = false;
u32 req_bytes = 0;
u32 hw_i = 0;
int ret = 0;
int idx;
struct amdxdna_drm_query_hwctx __user *buf, *tmp __free(kfree) = NULL;
struct amdxdna_drm_get_info *get_info_args = arg;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
if (get_info_args->buffer_size < sizeof(*tmp))
return -EINVAL;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
buf = u64_to_user_ptr(args->buffer);
tmp->pid = hwctx->client->pid;
tmp->context_id = hwctx->id;
tmp->start_col = hwctx->start_col;
tmp->num_col = hwctx->num_col;
tmp->command_submissions = hwctx->priv->seq;
tmp->command_completions = hwctx->priv->completed;
buf = u64_to_user_ptr(get_info_args->buffer);
if (copy_to_user(buf, tmp, sizeof(*tmp)))
return -EFAULT;
get_info_args->buffer += sizeof(*tmp);
get_info_args->buffer_size -= sizeof(*tmp);
return 0;
}
static int aie2_get_hwctx_status(struct amdxdna_client *client,
struct amdxdna_drm_get_info *args)
{
struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_drm_get_info info_args;
struct amdxdna_client *tmp_client;
int ret;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
info_args.buffer = args->buffer;
info_args.buffer_size = args->buffer_size;
list_for_each_entry(tmp_client, &xdna->client_list, node) {
idx = srcu_read_lock(&tmp_client->hwctx_srcu);
amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) {
req_bytes += sizeof(*tmp);
if (args->buffer_size < req_bytes) {
/* Continue iterating to get the required size */
overflow = true;
continue;
}
memset(tmp, 0, sizeof(*tmp));
tmp->pid = tmp_client->pid;
tmp->context_id = hwctx->id;
tmp->start_col = hwctx->start_col;
tmp->num_col = hwctx->num_col;
tmp->command_submissions = hwctx->priv->seq;
tmp->command_completions = hwctx->priv->completed;
if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) {
ret = -EFAULT;
srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
goto out;
}
hw_i++;
}
srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
ret = amdxdna_hwctx_walk(tmp_client, &info_args, aie2_hwctx_status_cb);
if (ret)
break;
}
if (overflow) {
XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.",
args->buffer_size, req_bytes);
ret = -EINVAL;
}
out:
kfree(tmp);
args->buffer_size = req_bytes;
args->buffer_size = (u32)(info_args.buffer - args->buffer);
return ret;
}

View File

@@ -289,7 +289,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx);
int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
void aie2_hwctx_suspend(struct amdxdna_client *client);
void aie2_hwctx_resume(struct amdxdna_client *client);
int aie2_hwctx_resume(struct amdxdna_client *client);
int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq);

View File

@@ -68,14 +68,30 @@ static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
synchronize_srcu(ss);
/* At this point, user is not able to submit new commands */
mutex_lock(&xdna->dev_lock);
xdna->dev_info->ops->hwctx_fini(hwctx);
mutex_unlock(&xdna->dev_lock);
kfree(hwctx->name);
kfree(hwctx);
}
int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
int (*walk)(struct amdxdna_hwctx *hwctx, void *arg))
{
struct amdxdna_hwctx *hwctx;
unsigned long hwctx_id;
int ret = 0, idx;
idx = srcu_read_lock(&client->hwctx_srcu);
amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
ret = walk(hwctx, arg);
if (ret)
break;
}
srcu_read_unlock(&client->hwctx_srcu, idx);
return ret;
}
void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
{
struct amdxdna_cmd *cmd = abo->mem.kva;
@@ -126,16 +142,12 @@ void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
struct amdxdna_hwctx *hwctx;
unsigned long hwctx_id;
mutex_lock(&client->hwctx_lock);
amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
XDNA_DBG(client->xdna, "PID %d close HW context %d",
client->pid, hwctx->id);
xa_erase(&client->hwctx_xa, hwctx->id);
mutex_unlock(&client->hwctx_lock);
amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
mutex_lock(&client->hwctx_lock);
}
mutex_unlock(&client->hwctx_lock);
}
int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
@@ -225,6 +237,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d
if (!drm_dev_enter(dev, &idx))
return -ENODEV;
mutex_lock(&xdna->dev_lock);
hwctx = xa_erase(&client->hwctx_xa, args->handle);
if (!hwctx) {
ret = -EINVAL;
@@ -241,6 +254,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d
XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
out:
mutex_unlock(&xdna->dev_lock);
drm_dev_exit(idx);
return ret;
}

View File

@@ -139,14 +139,10 @@ amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx)
{
return GENMASK(hwctx->start_col + hwctx->num_col - 1,
hwctx->start_col);
}
void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
int (*walk)(struct amdxdna_hwctx *hwctx, void *arg));
int amdxdna_cmd_submit(struct amdxdna_client *client,
u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt,

View File

@@ -81,7 +81,6 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
ret = -ENODEV;
goto unbind_sva;
}
mutex_init(&client->hwctx_lock);
init_srcu_struct(&client->hwctx_srcu);
xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
mutex_init(&client->mm_lock);
@@ -116,7 +115,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
xa_destroy(&client->hwctx_xa);
cleanup_srcu_struct(&client->hwctx_srcu);
mutex_destroy(&client->hwctx_lock);
mutex_destroy(&client->mm_lock);
if (client->dev_heap)
drm_gem_object_put(to_gobj(client->dev_heap));
@@ -142,8 +140,8 @@ static int amdxdna_flush(struct file *f, fl_owner_t id)
mutex_lock(&xdna->dev_lock);
list_del_init(&client->node);
mutex_unlock(&xdna->dev_lock);
amdxdna_hwctx_remove_all(client);
mutex_unlock(&xdna->dev_lock);
drm_dev_exit(idx);
return 0;
@@ -330,11 +328,8 @@ static void amdxdna_remove(struct pci_dev *pdev)
struct amdxdna_client, node);
while (client) {
list_del_init(&client->node);
mutex_unlock(&xdna->dev_lock);
amdxdna_hwctx_remove_all(client);
mutex_lock(&xdna->dev_lock);
client = list_first_entry_or_null(&xdna->client_list,
struct amdxdna_client, node);
}

View File

@@ -116,8 +116,6 @@ struct amdxdna_device_id {
struct amdxdna_client {
struct list_head node;
pid_t pid;
struct mutex hwctx_lock; /* protect hwctx */
/* do NOT wait this srcu when hwctx_lock is held */
struct srcu_struct hwctx_srcu;
struct xarray hwctx_xa;
u32 next_hwctxid;

View File

@@ -2604,6 +2604,7 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux)
platform->bridge.type = platform->pdata.panel_bridge ?
DRM_MODE_CONNECTOR_eDP :
DRM_MODE_CONNECTOR_DisplayPort;
platform->bridge.support_hdcp = true;
drm_bridge_add(&platform->bridge);

View File

@@ -373,7 +373,8 @@ static int display_connector_probe(struct platform_device *pdev)
if (conn->bridge.ddc)
conn->bridge.ops |= DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_DETECT;
if (conn->hpd_gpio)
/* Detecting the monitor requires reading DPCD */
if (conn->hpd_gpio && type != DRM_MODE_CONNECTOR_DisplayPort)
conn->bridge.ops |= DRM_BRIDGE_OP_DETECT;
if (conn->hpd_irq >= 0)
conn->bridge.ops |= DRM_BRIDGE_OP_HPD;

View File

@@ -20,6 +20,7 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/display/drm_hdcp_helper.h>
#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -641,6 +642,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct drm_bridge *bridge, *panel_bridge = NULL;
unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB);
unsigned int max_bpc = 8;
bool support_hdcp = false;
int connector_type;
int ret;
@@ -763,6 +765,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (drm_bridge_is_panel(bridge))
panel_bridge = bridge;
if (bridge->support_hdcp)
support_hdcp = true;
}
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -849,6 +854,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (panel_bridge)
drm_panel_bridge_set_orientation(connector, panel_bridge);
if (support_hdcp && IS_REACHABLE(CONFIG_DRM_DISPLAY_HELPER) &&
IS_ENABLED(CONFIG_DRM_DISPLAY_HDCP_HELPER))
drm_connector_attach_content_protection_property(connector, true);
return connector;
}
EXPORT_SYMBOL_GPL(drm_bridge_connector_init);

View File

@@ -270,6 +270,29 @@ npages_in_range(unsigned long start, unsigned long end)
return (end - start) >> PAGE_SHIFT;
}
/**
* drm_gpusvm_notifier_find() - Find GPU SVM notifier from GPU SVM
* @gpusvm: Pointer to the GPU SVM structure.
* @start: Start address of the notifier
* @end: End address of the notifier
*
* Return: A pointer to the drm_gpusvm_notifier if found or NULL
*/
struct drm_gpusvm_notifier *
drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
unsigned long end)
{
struct interval_tree_node *itree;
itree = interval_tree_iter_first(&gpusvm->root, start, end - 1);
if (itree)
return container_of(itree, struct drm_gpusvm_notifier, itree);
else
return NULL;
}
EXPORT_SYMBOL_GPL(drm_gpusvm_notifier_find);
/**
* drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
* @notifier: Pointer to the GPU SVM notifier structure.
@@ -293,86 +316,6 @@ drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
/**
* drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
* @range__: Iterator variable for the ranges
* @next__: Iterator variable for the ranges temporay storage
* @notifier__: Pointer to the GPU SVM notifier
* @start__: Start address of the range
* @end__: End address of the range
*
* This macro is used to iterate over GPU SVM ranges in a notifier while
* removing ranges from it.
*/
#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
(next__) = __drm_gpusvm_range_next(range__); \
(range__) && (drm_gpusvm_range_start(range__) < (end__)); \
(range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
/**
* __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
* @notifier: a pointer to the current drm_gpusvm_notifier
*
* Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
* the current notifier is the last one or if the input notifier is
* NULL.
*/
static struct drm_gpusvm_notifier *
__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
{
if (notifier && !list_is_last(&notifier->entry,
&notifier->gpusvm->notifier_list))
return list_next_entry(notifier, entry);
return NULL;
}
static struct drm_gpusvm_notifier *
notifier_iter_first(struct rb_root_cached *root, unsigned long start,
unsigned long last)
{
struct interval_tree_node *itree;
itree = interval_tree_iter_first(root, start, last);
if (itree)
return container_of(itree, struct drm_gpusvm_notifier, itree);
else
return NULL;
}
/**
* drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
* @notifier__: Iterator variable for the notifiers
* @notifier__: Pointer to the GPU SVM notifier
* @start__: Start address of the notifier
* @end__: End address of the notifier
*
* This macro is used to iterate over GPU SVM notifiers in a gpusvm.
*/
#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1); \
(notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
(notifier__) = __drm_gpusvm_notifier_next(notifier__))
/**
* drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
* @notifier__: Iterator variable for the notifiers
* @next__: Iterator variable for the notifiers temporay storage
* @notifier__: Pointer to the GPU SVM notifier
* @start__: Start address of the notifier
* @end__: End address of the notifier
*
* This macro is used to iterate over GPU SVM notifiers in a gpusvm while
* removing notifiers from it.
*/
#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1), \
(next__) = __drm_gpusvm_notifier_next(notifier__); \
(notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
(notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
/**
* drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier.
* @mni: Pointer to the mmu_interval_notifier structure.
@@ -472,22 +415,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
}
EXPORT_SYMBOL_GPL(drm_gpusvm_init);
/**
* drm_gpusvm_notifier_find() - Find GPU SVM notifier
* @gpusvm: Pointer to the GPU SVM structure
* @fault_addr: Fault address
*
* This function finds the GPU SVM notifier associated with the fault address.
*
* Return: Pointer to the GPU SVM notifier on success, NULL otherwise.
*/
static struct drm_gpusvm_notifier *
drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm,
unsigned long fault_addr)
{
return notifier_iter_first(&gpusvm->root, fault_addr, fault_addr + 1);
}
/**
* to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node
* @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct
@@ -943,7 +870,7 @@ drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
if (!mmget_not_zero(mm))
return ERR_PTR(-EFAULT);
notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr);
notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr, fault_addr + 1);
if (!notifier) {
notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr);
if (IS_ERR(notifier)) {
@@ -1107,7 +1034,8 @@ void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
drm_gpusvm_driver_lock_held(gpusvm);
notifier = drm_gpusvm_notifier_find(gpusvm,
drm_gpusvm_range_start(range));
drm_gpusvm_range_start(range),
drm_gpusvm_range_start(range) + 1);
if (WARN_ON_ONCE(!notifier))
return;

View File

@@ -420,6 +420,71 @@
* new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
*/
/**
* DOC: Madvise Logic - Splitting and Traversal
*
* This logic handles GPU VA range updates by generating remap and map operations
* without performing unmaps or merging existing mappings.
*
* 1) The requested range lies entirely within a single drm_gpuva. The logic splits
* the existing mapping at the start and end boundaries and inserts a new map.
*
* ::
* a start end b
* pre: |-----------------------|
* drm_gpuva1
*
* a start end b
* new: |-----|=========|-------|
* remap map remap
*
* one REMAP and one MAP : Same behaviour as SPLIT and MERGE
*
* 2) The requested range spans multiple drm_gpuva regions. The logic traverses
* across boundaries, remapping the start and end segments, and inserting two
* map operations to cover the full range.
*
* :: a start b c end d
* pre: |------------------|--------------|------------------|
* drm_gpuva1 drm_gpuva2 drm_gpuva3
*
* a start b c end d
* new: |-------|==========|--------------|========|---------|
* remap1 map1 drm_gpuva2 map2 remap2
*
* two REMAPS and two MAPS
*
* 3) Either start or end lies within a drm_gpuva. A single remap and map operation
* are generated to update the affected portion.
*
*
* :: a/start b c end d
* pre: |------------------|--------------|------------------|
* drm_gpuva1 drm_gpuva2 drm_gpuva3
*
* a/start b c end d
* new: |------------------|--------------|========|---------|
* drm_gpuva1 drm_gpuva2 map1 remap1
*
* :: a start b c/end d
* pre: |------------------|--------------|------------------|
* drm_gpuva1 drm_gpuva2 drm_gpuva3
*
* a start b c/end d
* new: |-------|==========|--------------|------------------|
* remap1 map1 drm_gpuva2 drm_gpuva3
*
* one REMAP and one MAP
*
* 4) Both start and end align with existing drm_gpuva boundaries. No operations
* are needed as the range is already covered.
*
* 5) No existing drm_gpuvas. No operations.
*
* Unlike drm_gpuvm_sm_map_ops_create, this logic avoids unmaps and merging,
* focusing solely on remap and map operations for efficient traversal and update.
*/
/**
* DOC: Locking
*
@@ -486,13 +551,18 @@
* u64 addr, u64 range,
* struct drm_gem_object *obj, u64 offset)
* {
* struct drm_gpuvm_map_req map_req = {
* .map.va.addr = addr,
* .map.va.range = range,
* .map.gem.obj = obj,
* .map.gem.offset = offset,
* };
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op
* struct drm_gpuvm_bo *vm_bo;
*
* driver_lock_va_space();
* ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
* obj, offset);
* ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
* if (IS_ERR(ops))
* return PTR_ERR(ops);
*
@@ -2054,16 +2124,18 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
static int
op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
u64 addr, u64 range,
struct drm_gem_object *obj, u64 offset)
const struct drm_gpuvm_map_req *req)
{
struct drm_gpuva_op op = {};
if (!req)
return 0;
op.op = DRM_GPUVA_OP_MAP;
op.map.va.addr = addr;
op.map.va.range = range;
op.map.gem.obj = obj;
op.map.gem.offset = offset;
op.map.va.addr = req->map.va.addr;
op.map.va.range = req->map.va.range;
op.map.gem.obj = req->map.gem.obj;
op.map.gem.offset = req->map.gem.offset;
return fn->sm_step_map(&op, priv);
}
@@ -2088,10 +2160,13 @@ op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
struct drm_gpuva *va, bool merge)
struct drm_gpuva *va, bool merge, bool madvise)
{
struct drm_gpuva_op op = {};
if (madvise)
return 0;
op.op = DRM_GPUVA_OP_UNMAP;
op.unmap.va = va;
op.unmap.keep = merge;
@@ -2102,10 +2177,15 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_ops *ops, void *priv,
u64 req_addr, u64 req_range,
struct drm_gem_object *req_obj, u64 req_offset)
const struct drm_gpuvm_map_req *req,
bool madvise)
{
struct drm_gem_object *req_obj = req->map.gem.obj;
const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req;
struct drm_gpuva *va, *next;
u64 req_offset = req->map.gem.offset;
u64 req_range = req->map.va.range;
u64 req_addr = req->map.va.addr;
u64 req_end = req_addr + req_range;
int ret;
@@ -2120,19 +2200,22 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
u64 end = addr + range;
bool merge = !!va->gem.obj;
if (madvise && obj)
continue;
if (addr == req_addr) {
merge &= obj == req_obj &&
offset == req_offset;
if (end == req_end) {
ret = op_unmap_cb(ops, priv, va, merge);
ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
break;
}
if (end < req_end) {
ret = op_unmap_cb(ops, priv, va, merge);
ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
continue;
@@ -2153,6 +2236,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
if (madvise)
op_map = req;
break;
}
} else if (addr < req_addr) {
@@ -2173,6 +2259,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
if (madvise)
op_map = req;
break;
}
@@ -2180,6 +2269,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
if (madvise) {
struct drm_gpuvm_map_req map_req = {
.map.va.addr = req_addr,
.map.va.range = end - req_addr,
};
ret = op_map_cb(ops, priv, &map_req);
if (ret)
return ret;
}
continue;
}
@@ -2195,6 +2296,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, &n, &u);
if (ret)
return ret;
if (madvise)
op_map = req;
break;
}
} else if (addr > req_addr) {
@@ -2203,16 +2307,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
(addr - req_addr);
if (end == req_end) {
ret = op_unmap_cb(ops, priv, va, merge);
ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
break;
}
if (end < req_end) {
ret = op_unmap_cb(ops, priv, va, merge);
ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
continue;
}
@@ -2231,14 +2337,20 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
if (madvise) {
struct drm_gpuvm_map_req map_req = {
.map.va.addr = addr,
.map.va.range = req_end - addr,
};
return op_map_cb(ops, priv, &map_req);
}
break;
}
}
}
return op_map_cb(ops, priv,
req_addr, req_range,
req_obj, req_offset);
return op_map_cb(ops, priv, op_map);
}
static int
@@ -2290,7 +2402,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
if (ret)
return ret;
} else {
ret = op_unmap_cb(ops, priv, va, false);
ret = op_unmap_cb(ops, priv, va, false, false);
if (ret)
return ret;
}
@@ -2303,10 +2415,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
* drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @priv: pointer to a driver private data structure
* @req_addr: the start address of the new mapping
* @req_range: the range of the new mapping
* @req_obj: the &drm_gem_object to map
* @req_offset: the offset within the &drm_gem_object
* @req: ptr to struct drm_gpuvm_map_req
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2333,8 +2442,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
*/
int
drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
u64 req_addr, u64 req_range,
struct drm_gem_object *req_obj, u64 req_offset)
const struct drm_gpuvm_map_req *req)
{
const struct drm_gpuvm_ops *ops = gpuvm->ops;
@@ -2343,9 +2451,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
ops->sm_step_unmap)))
return -EINVAL;
return __drm_gpuvm_sm_map(gpuvm, ops, priv,
req_addr, req_range,
req_obj, req_offset);
return __drm_gpuvm_sm_map(gpuvm, ops, priv, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
@@ -2421,10 +2527,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @exec: the &drm_exec locking context
* @num_fences: for newly mapped objects, the # of fences to reserve
* @req_addr: the start address of the range to unmap
* @req_range: the range of the mappings to unmap
* @req_obj: the &drm_gem_object to map
* @req_offset: the offset within the &drm_gem_object
* @req: ptr to drm_gpuvm_map_req struct
*
* This function locks (drm_exec_lock_obj()) objects that will be unmapped/
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
@@ -2445,9 +2548,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
* break;
* case DRIVER_OP_MAP:
* ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
* op->addr, op->range,
* obj, op->obj_offset);
* ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
* break;
* }
*
@@ -2478,18 +2579,17 @@ static const struct drm_gpuvm_ops lock_ops = {
int
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
struct drm_exec *exec, unsigned int num_fences,
u64 req_addr, u64 req_range,
struct drm_gem_object *req_obj, u64 req_offset)
struct drm_gpuvm_map_req *req)
{
struct drm_gem_object *req_obj = req->map.gem.obj;
if (req_obj) {
int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
if (ret)
return ret;
}
return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
req_addr, req_range,
req_obj, req_offset);
return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
@@ -2608,13 +2708,42 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
.sm_step_unmap = drm_gpuva_sm_step,
};
static struct drm_gpuva_ops *
__drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_map_req *req,
bool madvise)
{
struct drm_gpuva_ops *ops;
struct {
struct drm_gpuvm *vm;
struct drm_gpuva_ops *ops;
} args;
int ret;
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (unlikely(!ops))
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&ops->list);
args.vm = gpuvm;
args.ops = ops;
ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req, madvise);
if (ret)
goto err_free_ops;
return ops;
err_free_ops:
drm_gpuva_ops_free(gpuvm, ops);
return ERR_PTR(ret);
}
/**
* drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @req_addr: the start address of the new mapping
* @req_range: the range of the new mapping
* @req_obj: the &drm_gem_object to map
* @req_offset: the offset within the &drm_gem_object
* @req: map request arguments
*
* This function creates a list of operations to perform splitting and merging
* of existent mapping(s) with the newly requested one.
@@ -2642,39 +2771,49 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
*/
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
u64 req_addr, u64 req_range,
struct drm_gem_object *req_obj, u64 req_offset)
const struct drm_gpuvm_map_req *req)
{
struct drm_gpuva_ops *ops;
struct {
struct drm_gpuvm *vm;
struct drm_gpuva_ops *ops;
} args;
int ret;
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (unlikely(!ops))
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&ops->list);
args.vm = gpuvm;
args.ops = ops;
ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
req_addr, req_range,
req_obj, req_offset);
if (ret)
goto err_free_ops;
return ops;
err_free_ops:
drm_gpuva_ops_free(gpuvm, ops);
return ERR_PTR(ret);
return __drm_gpuvm_sm_map_ops_create(gpuvm, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create);
/**
* drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @req: map request arguments
*
* This function creates a list of operations to perform splitting
* of existent mapping(s) at start or end, based on the request map.
*
* The list can be iterated with &drm_gpuva_for_each_op and must be processed
* in the given order. It can contain map and remap operations, but it
* also can be empty if no operation is required, e.g. if the requested mapping
* already exists is the exact same way.
*
* There will be no unmap operations, a maximum of two remap operations and two
* map operations. The two map operations correspond to: one from start to the
* end of drm_gpuvaX, and another from the start of drm_gpuvaY to end.
*
* Note that before calling this function again with another mapping request it
* is necessary to update the &drm_gpuvm's view of the GPU VA space. The
* previously obtained operations must be either processed or abandoned. To
* update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
* drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
* used.
*
* After the caller finished processing the returned &drm_gpuva_ops, they must
* be freed with &drm_gpuva_ops_free.
*
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
struct drm_gpuva_ops *
drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_map_req *req)
{
return __drm_gpuvm_sm_map_ops_create(gpuvm, req, true);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create);
/**
* drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
* unmap

View File

@@ -1096,6 +1096,43 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
}
EXPORT_SYMBOL(mipi_dsi_dcs_read);
/**
* mipi_dsi_dcs_read_multi() - mipi_dsi_dcs_read() w/ accum_err
* @ctx: Context for multiple DSI transactions
* @cmd: DCS command
* @data: buffer in which to receive data
* @len: size of receive buffer
*
* Like mipi_dsi_dcs_read() but deals with errors in a way that makes it
* convenient to make several calls in a row.
*/
void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd,
void *data, size_t len)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_DCS_READ,
.tx_buf = &cmd,
.tx_len = 1,
.rx_buf = data,
.rx_len = len
};
ssize_t ret;
if (ctx->accum_err)
return;
ret = mipi_dsi_device_transfer(dsi, &msg);
if (ret < 0) {
ctx->accum_err = ret;
dev_err(dev, "dcs read with command %#x failed: %d\n", cmd,
ctx->accum_err);
}
}
EXPORT_SYMBOL(mipi_dsi_dcs_read_multi);
/**
* mipi_dsi_dcs_nop() - send DCS nop packet
* @dsi: DSI peripheral device

View File

@@ -185,12 +185,17 @@ struct pvr_vm_bind_op {
static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
{
switch (bind_op->type) {
case PVR_VM_BIND_TYPE_MAP:
case PVR_VM_BIND_TYPE_MAP: {
const struct drm_gpuvm_map_req map_req = {
.map.va.addr = bind_op->device_addr,
.map.va.range = bind_op->size,
.map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
.map.gem.offset = bind_op->offset,
};
return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
bind_op, bind_op->device_addr,
bind_op->size,
gem_from_pvr_gem(bind_op->pvr_obj),
bind_op->offset);
bind_op, &map_req);
}
case PVR_VM_BIND_TYPE_UNMAP:
return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,

View File

@@ -371,6 +371,12 @@ struct drm_gpuva *
msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
u64 offset, u64 range_start, u64 range_end)
{
struct drm_gpuva_op_map op_map = {
.va.addr = range_start,
.va.range = range_end - range_start,
.gem.obj = obj,
.gem.offset = offset,
};
struct msm_gem_vm *vm = to_msm_vm(gpuvm);
struct drm_gpuvm_bo *vm_bo;
struct msm_gem_vma *vma;
@@ -399,7 +405,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
if (obj)
GEM_WARN_ON((range_end - range_start) > obj->size);
drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
drm_gpuva_init_from_op(&vma->base, &op_map);
vma->mapped = false;
ret = drm_gpuva_insert(&vm->base, &vma->base);
@@ -1171,11 +1177,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
op->obj_offset);
break;
case MSM_VM_BIND_OP_MAP:
case MSM_VM_BIND_OP_MAP_NULL:
ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
op->iova, op->range,
op->obj, op->obj_offset);
case MSM_VM_BIND_OP_MAP_NULL: {
struct drm_gpuvm_map_req map_req = {
.map.va.addr = op->iova,
.map.va.range = op->range,
.map.gem.obj = op->obj,
.map.gem.offset = op->obj_offset,
};
ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
break;
}
default:
/*
* lookup_op() should have already thrown an error for
@@ -1282,10 +1294,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
if (op->flags & MSM_VM_BIND_OP_DUMP)
arg.flags |= MSM_VMA_DUMP;
fallthrough;
case MSM_VM_BIND_OP_MAP_NULL:
ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
op->range, op->obj, op->obj_offset);
case MSM_VM_BIND_OP_MAP_NULL: {
struct drm_gpuvm_map_req map_req = {
.map.va.addr = op->iova,
.map.va.range = op->range,
.map.gem.obj = op->obj,
.map.gem.offset = op->obj_offset,
};
ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
break;
}
default:
/*
* lookup_op() should have already thrown an error for

View File

@@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
break;
case OP_MAP: {
struct nouveau_uvma_region *reg;
struct drm_gpuvm_map_req map_req = {
.map.va.addr = op->va.addr,
.map.va.range = op->va.range,
.map.gem.obj = op->gem.obj,
.map.gem.offset = op->gem.offset,
};
reg = nouveau_uvma_region_find_first(uvmm,
op->va.addr,
@@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
}
op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
op->va.addr,
op->va.range,
op->gem.obj,
op->gem.offset);
&map_req);
if (IS_ERR(op->ops)) {
ret = PTR_ERR(op->ops);
goto unwind_continue;

View File

@@ -148,24 +148,20 @@ static inline struct nt35560 *panel_to_nt35560(struct drm_panel *panel)
static int nt35560_set_brightness(struct backlight_device *bl)
{
struct nt35560 *nt = bl_get_data(bl);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
int period_ns = 1023;
struct mipi_dsi_multi_context dsi_ctx = {
.dsi = to_mipi_dsi_device(nt->dev)
};
int duty_ns = bl->props.brightness;
int period_ns = 1023;
u8 pwm_ratio;
u8 pwm_div;
u8 par;
int ret;
if (backlight_is_blank(bl)) {
/* Disable backlight */
par = 0x00;
ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
&par, 1);
if (ret < 0) {
dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret);
return ret;
}
return 0;
mipi_dsi_dcs_write_seq_multi(&dsi_ctx,
MIPI_DCS_WRITE_CONTROL_DISPLAY,
0x00);
return dsi_ctx.accum_err;
}
/* Calculate the PWM duty cycle in n/256's */
@@ -176,12 +172,6 @@ static int nt35560_set_brightness(struct backlight_device *bl)
/* Set up PWM dutycycle ONE byte (differs from the standard) */
dev_dbg(nt->dev, "calculated duty cycle %02x\n", pwm_ratio);
ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
&pwm_ratio, 1);
if (ret < 0) {
dev_err(nt->dev, "failed to set display PWM ratio (%d)\n", ret);
return ret;
}
/*
* Sequence to write PWMDIV:
@@ -192,46 +182,23 @@ static int nt35560_set_brightness(struct backlight_device *bl)
* 0x22 PWMDIV
* 0x7F 0xAA CMD2 page 1 lock
*/
par = 0xaa;
ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1);
if (ret < 0) {
dev_err(nt->dev, "failed to unlock CMD 2 (%d)\n", ret);
return ret;
}
par = 0x01;
ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1);
if (ret < 0) {
dev_err(nt->dev, "failed to enter page 1 (%d)\n", ret);
return ret;
}
par = 0x01;
ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1);
if (ret < 0) {
dev_err(nt->dev, "failed to disable MTP reload (%d)\n", ret);
return ret;
}
ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1);
if (ret < 0) {
dev_err(nt->dev, "failed to set PWM divisor (%d)\n", ret);
return ret;
}
par = 0xaa;
ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1);
if (ret < 0) {
dev_err(nt->dev, "failed to lock CMD 2 (%d)\n", ret);
return ret;
}
mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx,
MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
pwm_ratio);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0xaa);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x01);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7d, 0x01);
mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx, 0x22, pwm_div);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0xaa);
/* Enable backlight */
par = 0x24;
ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
&par, 1);
if (ret < 0) {
dev_err(nt->dev, "failed to enable display backlight (%d)\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
0x24);
return 0;
return dsi_ctx.accum_err;
}
static const struct backlight_ops nt35560_bl_ops = {
@@ -244,32 +211,23 @@ static const struct backlight_properties nt35560_bl_props = {
.max_brightness = 1023,
};
static int nt35560_read_id(struct nt35560 *nt)
static void nt35560_read_id(struct mipi_dsi_multi_context *dsi_ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
struct device dev = dsi_ctx->dsi->dev;
u8 vendor, version, panel;
u16 val;
int ret;
ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID1, &vendor, 1);
if (ret < 0) {
dev_err(nt->dev, "could not vendor ID byte\n");
return ret;
}
ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID2, &version, 1);
if (ret < 0) {
dev_err(nt->dev, "could not read device version byte\n");
return ret;
}
ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID3, &panel, 1);
if (ret < 0) {
dev_err(nt->dev, "could not read panel ID byte\n");
return ret;
}
mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID1, &vendor, 1);
mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID2, &version, 1);
mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID3, &panel, 1);
if (dsi_ctx->accum_err < 0)
return;
if (vendor == 0x00) {
dev_err(nt->dev, "device vendor ID is zero\n");
return -ENODEV;
dev_err(&dev, "device vendor ID is zero\n");
dsi_ctx->accum_err = -ENODEV;
return;
}
val = (vendor << 8) | panel;
@@ -278,16 +236,16 @@ static int nt35560_read_id(struct nt35560 *nt)
case DISPLAY_SONY_ACX424AKP_ID2:
case DISPLAY_SONY_ACX424AKP_ID3:
case DISPLAY_SONY_ACX424AKP_ID4:
dev_info(nt->dev, "MTP vendor: %02x, version: %02x, panel: %02x\n",
dev_info(&dev,
"MTP vendor: %02x, version: %02x, panel: %02x\n",
vendor, version, panel);
break;
default:
dev_info(nt->dev, "unknown vendor: %02x, version: %02x, panel: %02x\n",
dev_info(&dev,
"unknown vendor: %02x, version: %02x, panel: %02x\n",
vendor, version, panel);
break;
}
return 0;
}
static int nt35560_power_on(struct nt35560 *nt)
@@ -322,92 +280,56 @@ static void nt35560_power_off(struct nt35560 *nt)
static int nt35560_prepare(struct drm_panel *panel)
{
struct nt35560 *nt = panel_to_nt35560(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
const u8 mddi = 3;
struct mipi_dsi_multi_context dsi_ctx = {
.dsi = to_mipi_dsi_device(nt->dev)
};
int ret;
ret = nt35560_power_on(nt);
if (ret)
return ret;
ret = nt35560_read_id(nt);
if (ret) {
dev_err(nt->dev, "failed to read panel ID (%d)\n", ret);
goto err_power_off;
}
nt35560_read_id(&dsi_ctx);
/* Enabe tearing mode: send TE (tearing effect) at VBLANK */
ret = mipi_dsi_dcs_set_tear_on(dsi,
/* Enable tearing mode: send TE (tearing effect) at VBLANK */
mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx,
MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret) {
dev_err(nt->dev, "failed to enable vblank TE (%d)\n", ret);
goto err_power_off;
}
/*
* Set MDDI
*
* This presumably deactivates the Qualcomm MDDI interface and
* selects DSI, similar code is found in other drivers such as the
* Sharp LS043T1LE01 which makes us suspect that this panel may be
* using a Novatek NT35565 or similar display driver chip that shares
* this command. Due to the lack of documentation we cannot know for
* sure.
* Sharp LS043T1LE01.
*/
ret = mipi_dsi_dcs_write(dsi, NT35560_DCS_SET_MDDI,
&mddi, sizeof(mddi));
if (ret < 0) {
dev_err(nt->dev, "failed to set MDDI (%d)\n", ret);
goto err_power_off;
}
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, NT35560_DCS_SET_MDDI, 3);
/* Exit sleep mode */
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret) {
dev_err(nt->dev, "failed to exit sleep mode (%d)\n", ret);
goto err_power_off;
}
msleep(140);
mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
mipi_dsi_msleep(&dsi_ctx, 140);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret) {
dev_err(nt->dev, "failed to turn display on (%d)\n", ret);
goto err_power_off;
}
mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
if (nt->video_mode) {
/* In video mode turn peripheral on */
ret = mipi_dsi_turn_on_peripheral(dsi);
if (ret) {
dev_err(nt->dev, "failed to turn on peripheral\n");
goto err_power_off;
}
mipi_dsi_turn_on_peripheral_multi(&dsi_ctx);
}
return 0;
err_power_off:
nt35560_power_off(nt);
return ret;
if (dsi_ctx.accum_err < 0)
nt35560_power_off(nt);
return dsi_ctx.accum_err;
}
static int nt35560_unprepare(struct drm_panel *panel)
{
struct nt35560 *nt = panel_to_nt35560(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
int ret;
struct mipi_dsi_multi_context dsi_ctx = {
.dsi = to_mipi_dsi_device(nt->dev)
};
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret) {
dev_err(nt->dev, "failed to turn display off (%d)\n", ret);
return ret;
}
mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
if (dsi_ctx.accum_err < 0)
return dsi_ctx.accum_err;
/* Enter sleep mode */
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret) {
dev_err(nt->dev, "failed to enter sleep mode (%d)\n", ret);
return ret;
}
msleep(85);
nt35560_power_off(nt);

View File

@@ -7,7 +7,9 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>

View File

@@ -203,7 +203,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
panfrost_gem_mapping_put(perfcnt->mapping);
perfcnt->mapping = NULL;
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
return 0;
@@ -279,7 +278,6 @@ void panfrost_perfcnt_close(struct drm_file *file_priv)
if (perfcnt->user == pfile)
panfrost_perfcnt_disable_locked(pfdev, file_priv);
mutex_unlock(&perfcnt->lock);
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
}

View File

@@ -8,6 +8,7 @@ panthor-y := \
panthor_gem.o \
panthor_gpu.o \
panthor_heap.o \
panthor_hw.o \
panthor_mmu.o \
panthor_sched.o

View File

@@ -18,6 +18,7 @@
#include "panthor_device.h"
#include "panthor_fw.h"
#include "panthor_gpu.h"
#include "panthor_hw.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
#include "panthor_sched.h"
@@ -244,6 +245,10 @@ int panthor_device_init(struct panthor_device *ptdev)
return ret;
}
ret = panthor_hw_init(ptdev);
if (ret)
goto err_rpm_put;
ret = panthor_gpu_init(ptdev);
if (ret)
goto err_rpm_put;

View File

@@ -1402,3 +1402,8 @@ int panthor_fw_init(struct panthor_device *ptdev)
}
MODULE_FIRMWARE("arm/mali/arch10.8/mali_csffw.bin");
MODULE_FIRMWARE("arm/mali/arch10.10/mali_csffw.bin");
MODULE_FIRMWARE("arm/mali/arch10.12/mali_csffw.bin");
MODULE_FIRMWARE("arm/mali/arch11.8/mali_csffw.bin");
MODULE_FIRMWARE("arm/mali/arch12.8/mali_csffw.bin");
MODULE_FIRMWARE("arm/mali/arch13.8/mali_csffw.bin");

View File

@@ -35,40 +35,9 @@ struct panthor_gpu {
/** @reqs_acked: GPU request wait queue. */
wait_queue_head_t reqs_acked;
};
/**
* struct panthor_model - GPU model description
*/
struct panthor_model {
/** @name: Model name. */
const char *name;
/** @arch_major: Major version number of architecture. */
u8 arch_major;
/** @product_major: Major version number of product. */
u8 product_major;
};
/**
* GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified
* by a combination of the major architecture version and the major product
* version.
* @_name: Name for the GPU model.
* @_arch_major: Architecture major.
* @_product_major: Product major.
*/
#define GPU_MODEL(_name, _arch_major, _product_major) \
{\
.name = __stringify(_name), \
.arch_major = _arch_major, \
.product_major = _product_major, \
}
static const struct panthor_model gpu_models[] = {
GPU_MODEL(g610, 10, 7),
{},
/** @cache_flush_lock: Lock to serialize cache flushes */
struct mutex cache_flush_lock;
};
#define GPU_INTERRUPTS_MASK \
@@ -83,66 +52,6 @@ static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE);
}
static void panthor_gpu_init_info(struct panthor_device *ptdev)
{
const struct panthor_model *model;
u32 arch_major, product_major;
u32 major, minor, status;
unsigned int i;
ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES);
ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES);
ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES);
ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES);
ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES);
ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS);
ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE);
ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES);
for (i = 0; i < 4; i++)
ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i));
ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id);
major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id);
minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id);
status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id);
for (model = gpu_models; model->name; model++) {
if (model->arch_major == arch_major &&
model->product_major == product_major)
break;
}
drm_info(&ptdev->base,
"mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
model->name ?: "unknown", ptdev->gpu_info.gpu_id >> 16,
major, minor, status);
drm_info(&ptdev->base,
"Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x",
ptdev->gpu_info.l2_features,
ptdev->gpu_info.tiler_features,
ptdev->gpu_info.mem_features,
ptdev->gpu_info.mmu_features,
ptdev->gpu_info.as_present);
drm_info(&ptdev->base,
"shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx",
ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present,
ptdev->gpu_info.tiler_present);
}
static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
{
gpu_write(ptdev, GPU_INT_CLEAR, status);
@@ -204,8 +113,8 @@ int panthor_gpu_init(struct panthor_device *ptdev)
spin_lock_init(&gpu->reqs_lock);
init_waitqueue_head(&gpu->reqs_acked);
mutex_init(&gpu->cache_flush_lock);
ptdev->gpu = gpu;
panthor_gpu_init_info(ptdev);
dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
@@ -353,6 +262,9 @@ int panthor_gpu_flush_caches(struct panthor_device *ptdev,
bool timedout = false;
unsigned long flags;
/* Serialize cache flush operations. */
guard(mutex)(&ptdev->gpu->cache_flush_lock);
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if (!drm_WARN_ON(&ptdev->base,
ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {

View File

@@ -0,0 +1,125 @@
// SPDX-License-Identifier: GPL-2.0 or MIT
/* Copyright 2025 ARM Limited. All rights reserved. */
#include "panthor_device.h"
#include "panthor_hw.h"
#include "panthor_regs.h"
#define GPU_PROD_ID_MAKE(arch_major, prod_major) \
(((arch_major) << 24) | (prod_major))
static char *get_gpu_model_name(struct panthor_device *ptdev)
{
const u32 gpu_id = ptdev->gpu_info.gpu_id;
const u32 product_id = GPU_PROD_ID_MAKE(GPU_ARCH_MAJOR(gpu_id),
GPU_PROD_MAJOR(gpu_id));
const bool ray_intersection = !!(ptdev->gpu_info.gpu_features &
GPU_FEATURES_RAY_INTERSECTION);
const u8 shader_core_count = hweight64(ptdev->gpu_info.shader_present);
switch (product_id) {
case GPU_PROD_ID_MAKE(10, 2):
return "Mali-G710";
case GPU_PROD_ID_MAKE(10, 3):
return "Mali-G510";
case GPU_PROD_ID_MAKE(10, 4):
return "Mali-G310";
case GPU_PROD_ID_MAKE(10, 7):
return "Mali-G610";
case GPU_PROD_ID_MAKE(11, 2):
if (shader_core_count > 10 && ray_intersection)
return "Mali-G715-Immortalis";
else if (shader_core_count >= 7)
return "Mali-G715";
fallthrough;
case GPU_PROD_ID_MAKE(11, 3):
return "Mali-G615";
case GPU_PROD_ID_MAKE(12, 0):
if (shader_core_count >= 10 && ray_intersection)
return "Mali-G720-Immortalis";
else if (shader_core_count >= 6)
return "Mali-G720";
fallthrough;
case GPU_PROD_ID_MAKE(12, 1):
return "Mali-G620";
case GPU_PROD_ID_MAKE(13, 0):
if (shader_core_count >= 10 && ray_intersection)
return "Mali-G925-Immortalis";
else if (shader_core_count >= 6)
return "Mali-G725";
fallthrough;
case GPU_PROD_ID_MAKE(13, 1):
return "Mali-G625";
}
return "(Unknown Mali GPU)";
}
static void panthor_gpu_info_init(struct panthor_device *ptdev)
{
unsigned int i;
ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES);
ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES);
ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES);
ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES);
ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES);
ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS);
ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE);
ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES);
for (i = 0; i < 4; i++)
ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i));
ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
/* Introduced in arch 11.x */
ptdev->gpu_info.gpu_features = gpu_read64(ptdev, GPU_FEATURES);
}
static void panthor_hw_info_init(struct panthor_device *ptdev)
{
u32 major, minor, status;
panthor_gpu_info_init(ptdev);
major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id);
minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id);
status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id);
drm_info(&ptdev->base,
"%s id 0x%x major 0x%x minor 0x%x status 0x%x",
get_gpu_model_name(ptdev), ptdev->gpu_info.gpu_id >> 16,
major, minor, status);
drm_info(&ptdev->base,
"Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x",
ptdev->gpu_info.l2_features,
ptdev->gpu_info.tiler_features,
ptdev->gpu_info.mem_features,
ptdev->gpu_info.mmu_features,
ptdev->gpu_info.as_present);
drm_info(&ptdev->base,
"shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx",
ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present,
ptdev->gpu_info.tiler_present);
}
int panthor_hw_init(struct panthor_device *ptdev)
{
panthor_hw_info_init(ptdev);
return 0;
}

View File

@@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
/* Copyright 2025 ARM Limited. All rights reserved. */
#ifndef __PANTHOR_HW_H__
#define __PANTHOR_HW_H__
struct panthor_device;
int panthor_hw_init(struct panthor_device *ptdev);
#endif /* __PANTHOR_HW_H__ */

View File

@@ -29,6 +29,7 @@
#include "panthor_device.h"
#include "panthor_gem.h"
#include "panthor_gpu.h"
#include "panthor_heap.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
@@ -568,6 +569,35 @@ static void lock_region(struct panthor_device *ptdev, u32 as_nr,
write_cmd(ptdev, as_nr, AS_COMMAND_LOCK);
}
static int mmu_hw_do_flush_on_gpu_ctrl(struct panthor_device *ptdev, int as_nr,
u32 op)
{
const u32 l2_flush_op = CACHE_CLEAN | CACHE_INV;
u32 lsc_flush_op = 0;
int ret;
if (op == AS_COMMAND_FLUSH_MEM)
lsc_flush_op = CACHE_CLEAN | CACHE_INV;
ret = wait_ready(ptdev, as_nr);
if (ret)
return ret;
ret = panthor_gpu_flush_caches(ptdev, l2_flush_op, lsc_flush_op, 0);
if (ret)
return ret;
/*
* Explicitly unlock the region as the AS is not unlocked automatically
* at the end of the GPU_CONTROL cache flush command, unlike
* AS_COMMAND_FLUSH_MEM or AS_COMMAND_FLUSH_PT.
*/
write_cmd(ptdev, as_nr, AS_COMMAND_UNLOCK);
/* Wait for the unlock command to complete */
return wait_ready(ptdev, as_nr);
}
static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
u64 iova, u64 size, u32 op)
{
@@ -585,6 +615,9 @@ static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
if (op != AS_COMMAND_UNLOCK)
lock_region(ptdev, as_nr, iova, size);
if (op == AS_COMMAND_FLUSH_MEM || op == AS_COMMAND_FLUSH_PT)
return mmu_hw_do_flush_on_gpu_ctrl(ptdev, as_nr, op);
/* Run the MMU operation */
write_cmd(ptdev, as_nr, op);
@@ -2169,15 +2202,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
mutex_lock(&vm->op_lock);
vm->op_ctx = op;
switch (op_type) {
case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
const struct drm_gpuvm_map_req map_req = {
.map.va.addr = op->va.addr,
.map.va.range = op->va.range,
.map.gem.obj = op->map.vm_bo->obj,
.map.gem.offset = op->map.bo_offset,
};
if (vm->unusable) {
ret = -EINVAL;
break;
}
ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
op->map.vm_bo->obj, op->map.bo_offset);
ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
break;
}
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);

View File

@@ -70,6 +70,9 @@
#define GPU_PWR_OVERRIDE0 0x54
#define GPU_PWR_OVERRIDE1 0x58
#define GPU_FEATURES 0x60
#define GPU_FEATURES_RAY_INTERSECTION BIT(2)
#define GPU_TIMESTAMP_OFFSET 0x88
#define GPU_CYCLE_COUNT 0x90
#define GPU_TIMESTAMP 0x98

View File

@@ -131,9 +131,8 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
* in the plane update callback, and here we just check
* whenever we must force the modeset.
*/
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
output->needs_modeset = true;
}
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {

View File

@@ -162,18 +162,18 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true;
#endif
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID))
vgdev->has_edid = true;
}
if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC))
vgdev->has_indirect = true;
}
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID))
vgdev->has_resource_assign_uuid = true;
}
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB))
vgdev->has_resource_blob = true;
}
if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
if (!devm_request_mem_region(&vgdev->vdev->dev,
@@ -193,9 +193,9 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
(unsigned long)vgdev->host_visible_region.addr,
(unsigned long)vgdev->host_visible_region.len);
}
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) {
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT))
vgdev->has_context_init = true;
}
DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible",
vgdev->has_virgl_3d ? '+' : '-',

View File

@@ -47,6 +47,7 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
*resid = handle + 1;
} else {
int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
if (handle < 0)
return handle;
*resid = handle + 1;
@@ -56,9 +57,8 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
{
if (!virtio_gpu_virglrenderer_workaround) {
if (!virtio_gpu_virglrenderer_workaround)
ida_free(&vgdev->resource_ida, id - 1);
}
}
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)

View File

@@ -120,7 +120,7 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
crtc_state = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_NO_SCALING,

View File

@@ -248,6 +248,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
struct virtio_gpu_ctrl_hdr *cmd;
cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
le32_to_cpu(resp->type),
@@ -468,6 +469,7 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (vbuf->data_size) {
if (is_vmalloc_addr(vbuf->data_buf)) {
int sg_ents;
sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
&sg_ents);
if (!sgt) {

View File

@@ -2316,10 +2316,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
switch (operation) {
case DRM_XE_VM_BIND_OP_MAP:
case DRM_XE_VM_BIND_OP_MAP_USERPTR:
ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
obj, bo_offset_or_userptr);
case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
struct drm_gpuvm_map_req map_req = {
.map.va.addr = addr,
.map.va.range = range,
.map.gem.obj = obj,
.map.gem.offset = bo_offset_or_userptr,
};
ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
break;
}
case DRM_XE_VM_BIND_OP_UNMAP:
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
break;

View File

@@ -1171,6 +1171,10 @@ struct drm_bridge {
* before the peripheral.
*/
bool pre_enable_prev_first;
/**
* @support_hdcp: Indicate that the bridge supports HDCP.
*/
bool support_hdcp;
/**
* @ddc: Associated I2C adapter for DDC access, if any.
*/

View File

@@ -282,6 +282,10 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
unsigned long end);
struct drm_gpusvm_notifier *
drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
unsigned long end);
struct drm_gpusvm_range *
drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
unsigned long end);
@@ -434,4 +438,70 @@ __drm_gpusvm_range_next(struct drm_gpusvm_range *range)
(range__) && (drm_gpusvm_range_start(range__) < (end__)); \
(range__) = __drm_gpusvm_range_next(range__))
/**
* drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
* @range__: Iterator variable for the ranges
* @next__: Iterator variable for the ranges temporay storage
* @notifier__: Pointer to the GPU SVM notifier
* @start__: Start address of the range
* @end__: End address of the range
*
* This macro is used to iterate over GPU SVM ranges in a notifier while
* removing ranges from it.
*/
#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
(next__) = __drm_gpusvm_range_next(range__); \
(range__) && (drm_gpusvm_range_start(range__) < (end__)); \
(range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
/**
* __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
* @notifier: a pointer to the current drm_gpusvm_notifier
*
* Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
* the current notifier is the last one or if the input notifier is
* NULL.
*/
static inline struct drm_gpusvm_notifier *
__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
{
if (notifier && !list_is_last(&notifier->entry,
&notifier->gpusvm->notifier_list))
return list_next_entry(notifier, entry);
return NULL;
}
/**
* drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
* @notifier__: Iterator variable for the notifiers
* @gpusvm__: Pointer to the GPU SVM notifier
* @start__: Start address of the notifier
* @end__: End address of the notifier
*
* This macro is used to iterate over GPU SVM notifiers in a gpusvm.
*/
#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \
(notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
(notifier__) = __drm_gpusvm_notifier_next(notifier__))
/**
* drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
* @notifier__: Iterator variable for the notifiers
* @next__: Iterator variable for the notifiers temporay storage
* @gpusvm__: Pointer to the GPU SVM notifier
* @start__: Start address of the notifier
* @end__: End address of the notifier
*
* This macro is used to iterate over GPU SVM notifiers in a gpusvm while
* removing notifiers from it.
*/
#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \
(next__) = __drm_gpusvm_notifier_next(notifier__); \
(notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
(notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
#endif /* __DRM_GPUSVM_H__ */

View File

@@ -160,15 +160,6 @@ struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
struct drm_gem_object *obj, u64 offset)
{
va->va.addr = addr;
va->va.range = range;
va->gem.obj = obj;
va->gem.offset = offset;
}
/**
* drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
* invalidated
@@ -1058,10 +1049,23 @@ struct drm_gpuva_ops {
*/
#define drm_gpuva_next_op(op) list_next_entry(op, entry)
/**
* struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]()
*/
struct drm_gpuvm_map_req {
/**
* @op_map: struct drm_gpuva_op_map
*/
struct drm_gpuva_op_map map;
};
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range,
struct drm_gem_object *obj, u64 offset);
const struct drm_gpuvm_map_req *req);
struct drm_gpuva_ops *
drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_map_req *req);
struct drm_gpuva_ops *
drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
@@ -1079,8 +1083,10 @@ void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
struct drm_gpuva_op_map *op)
{
drm_gpuva_init(va, op->va.addr, op->va.range,
op->gem.obj, op->gem.offset);
va->va.addr = op->va.addr;
va->va.range = op->va.range;
va->gem.obj = op->gem.obj;
va->gem.offset = op->gem.offset;
}
/**
@@ -1205,16 +1211,14 @@ struct drm_gpuvm_ops {
};
int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
u64 addr, u64 range,
struct drm_gem_object *obj, u64 offset);
const struct drm_gpuvm_map_req *req);
int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
u64 addr, u64 range);
int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
struct drm_exec *exec, unsigned int num_fences,
u64 req_addr, u64 req_range,
struct drm_gem_object *obj, u64 offset);
struct drm_gpuvm_map_req *req);
int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
u64 req_addr, u64 req_range);

View File

@@ -342,6 +342,8 @@ ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
const void *data, size_t len);
ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
size_t len);
void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd,
void *data, size_t len);
int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode);
@@ -403,6 +405,22 @@ void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx);
mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d)); \
} while (0)
/**
* mipi_dsi_generic_write_var_seq_multi - transmit non-constant data using a
* generic write packet
*
* This macro will print errors for you and error handling is optimized for
* callers that call this multiple times in a row.
*
* @ctx: Context for multiple DSI transactions
* @seq: buffer containing the payload
*/
#define mipi_dsi_generic_write_var_seq_multi(ctx, seq...) \
do { \
const u8 d[] = { seq }; \
mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d)); \
} while (0)
/**
* mipi_dsi_dcs_write_seq_multi - transmit a DCS command with payload
*
@@ -419,6 +437,23 @@ void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx);
mipi_dsi_dcs_write_buffer_multi(ctx, d, ARRAY_SIZE(d)); \
} while (0)
/**
* mipi_dsi_dcs_write_var_seq_multi - transmit a DCS command with non-constant
* payload
*
* This macro will print errors for you and error handling is optimized for
* callers that call this multiple times in a row.
*
* @ctx: Context for multiple DSI transactions
* @cmd: Command
* @seq: buffer containing data to be transmitted
*/
#define mipi_dsi_dcs_write_var_seq_multi(ctx, cmd, seq...) \
do { \
const u8 d[] = { cmd, seq }; \
mipi_dsi_dcs_write_buffer_multi(ctx, d, ARRAY_SIZE(d)); \
} while (0)
/**
* mipi_dsi_dual - send the same MIPI DSI command to two interfaces
*

View File

@@ -327,6 +327,9 @@ struct drm_panthor_gpu_info {
/** @pad: MBZ. */
__u32 pad;
/** @gpu_features: Bitmask describing supported GPU-wide features */
__u64 gpu_features;
};
/**

View File

@@ -10,7 +10,8 @@
error::from_err_ptr,
error::Result,
prelude::*,
types::{ARef, AlwaysRefCounted, Opaque},
sync::aref::{ARef, AlwaysRefCounted},
types::Opaque,
};
use core::{mem, ops::Deref, ptr, ptr::NonNull};

View File

@@ -8,7 +8,7 @@
bindings, device, devres, drm,
error::{to_result, Result},
prelude::*,
types::ARef,
sync::aref::ARef,
};
use macros::vtable;

View File

@@ -10,7 +10,8 @@
drm::driver::{AllocImpl, AllocOps},
error::{to_result, Result},
prelude::*,
types::{ARef, AlwaysRefCounted, Opaque},
sync::aref::{ARef, AlwaysRefCounted},
types::Opaque,
};
use core::{mem, ops::Deref, ptr::NonNull};