mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-05 04:46:54 -04:00
Currently this is very broken if someone attempts to create a bind
queue and share it across multiple VMs. For example currently we assume
it is safe to acquire the user VM lock to protect some of the bind queue
state, but if allow sharing the bind queue with multiple VMs then this
quickly breaks down.
To fix this reject using a bind queue with any VM that is not the same
VM that was originally passed when creating the bind queue. This a uAPI
change, however this was more of an oversight on kernel side that we
didn't reject this, and expectation is that userspace shouldn't be using
bind queues in this way, so in theory this change should go unnoticed.
Based on a patch from Matt Brost.
v2 (Matt B):
- Hold the vm lock over queue create, to ensure it can't be closed as
we attach the user_vm to the queue.
- Make sure we actually check for NULL user_vm in destruction path.
v3:
- Fix error path handling.
Fixes: dd08ebf6c3 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Reported-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: José Roberto de Souza <jose.souza@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Michal Mrozek <michal.mrozek@intel.com>
Cc: Carl Zhang <carl.zhang@intel.com>
Cc: <stable@vger.kernel.org> # v6.8+
Acked-by: José Roberto de Souza <jose.souza@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Arvind Yadav <arvind.yadav@intel.com>
Acked-by: Michal Mrozek <michal.mrozek@intel.com>
Link: https://patch.msgid.link/20260120110609.77958-3-matthew.auld@intel.com
(cherry picked from commit 9dd08fdecc0c98d6516c2d2d1fa189c1332f8dab)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
116 lines
3.5 KiB
C
116 lines
3.5 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2021 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _XE_EXEC_QUEUE_H_
|
|
#define _XE_EXEC_QUEUE_H_
|
|
|
|
#include "xe_exec_queue_types.h"
|
|
#include "xe_vm_types.h"
|
|
|
|
struct drm_device;
|
|
struct drm_file;
|
|
struct xe_device;
|
|
struct xe_file;
|
|
|
|
#define for_each_tlb_inval(__i) \
|
|
for (__i = XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT; \
|
|
__i <= XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT; ++__i)
|
|
|
|
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
|
|
u32 logical_mask, u16 width,
|
|
struct xe_hw_engine *hw_engine, u32 flags,
|
|
u64 extensions);
|
|
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
|
|
struct xe_vm *vm,
|
|
enum xe_engine_class class,
|
|
u32 flags, u64 extensions);
|
|
struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
|
|
struct xe_tile *tile,
|
|
struct xe_vm *user_vm,
|
|
u32 flags, u64 extensions);
|
|
|
|
void xe_exec_queue_fini(struct xe_exec_queue *q);
|
|
void xe_exec_queue_destroy(struct kref *ref);
|
|
void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance);
|
|
|
|
static inline struct xe_exec_queue *
|
|
xe_exec_queue_get_unless_zero(struct xe_exec_queue *q)
|
|
{
|
|
if (kref_get_unless_zero(&q->refcount))
|
|
return q;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id);
|
|
|
|
static inline struct xe_exec_queue *xe_exec_queue_get(struct xe_exec_queue *q)
|
|
{
|
|
kref_get(&q->refcount);
|
|
return q;
|
|
}
|
|
|
|
static inline void xe_exec_queue_put(struct xe_exec_queue *q)
|
|
{
|
|
kref_put(&q->refcount, xe_exec_queue_destroy);
|
|
}
|
|
|
|
static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
|
|
{
|
|
return q->width > 1;
|
|
}
|
|
|
|
static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q)
|
|
{
|
|
return q->pxp.type;
|
|
}
|
|
|
|
bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
|
|
|
|
bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
|
|
|
|
void xe_exec_queue_kill(struct xe_exec_queue *q);
|
|
|
|
int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *file);
|
|
int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *file);
|
|
int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *file);
|
|
enum xe_exec_queue_priority xe_exec_queue_device_get_max_priority(struct xe_device *xe);
|
|
|
|
void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
|
|
void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
|
|
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
|
|
struct xe_vm *vm);
|
|
struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *e,
|
|
struct xe_vm *vm);
|
|
void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
|
|
struct dma_fence *fence);
|
|
|
|
void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
|
|
struct xe_vm *vm,
|
|
unsigned int type);
|
|
|
|
void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
|
|
unsigned int type);
|
|
|
|
struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
|
|
struct xe_vm *vm,
|
|
unsigned int type);
|
|
|
|
void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
|
|
struct xe_vm *vm,
|
|
struct dma_fence *fence,
|
|
unsigned int type);
|
|
|
|
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
|
|
|
|
int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch);
|
|
|
|
struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q);
|
|
|
|
#endif
|