Files
linux/drivers/gpu/drm/xe/xe_exec_queue.h
Matthew Brost 1a2cf01e1c drm/xe: Remove last fence dependency check from binds and execs
Eliminate redundant last fence dependency checks in exec and bind jobs,
as they are now equivalent to xe_exec_queue_is_idle. Simplify the code
by removing this dead logic.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/20251031234050.3043507-7-matthew.brost@intel.com
2025-11-04 08:21:18 -08:00

115 lines
3.4 KiB
C

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef _XE_EXEC_QUEUE_H_
#define _XE_EXEC_QUEUE_H_
#include "xe_exec_queue_types.h"
#include "xe_vm_types.h"
struct drm_device;
struct drm_file;
struct xe_device;
struct xe_file;
#define for_each_tlb_inval(__i) \
for (__i = XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT; \
__i <= XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT; ++__i)
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
struct xe_hw_engine *hw_engine, u32 flags,
u64 extensions);
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
struct xe_vm *vm,
enum xe_engine_class class,
u32 flags, u64 extensions);
struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
struct xe_tile *tile,
u32 flags, u64 extensions);
void xe_exec_queue_fini(struct xe_exec_queue *q);
void xe_exec_queue_destroy(struct kref *ref);
void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance);
static inline struct xe_exec_queue *
xe_exec_queue_get_unless_zero(struct xe_exec_queue *q)
{
if (kref_get_unless_zero(&q->refcount))
return q;
return NULL;
}
struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id);
static inline struct xe_exec_queue *xe_exec_queue_get(struct xe_exec_queue *q)
{
kref_get(&q->refcount);
return q;
}
static inline void xe_exec_queue_put(struct xe_exec_queue *q)
{
kref_put(&q->refcount, xe_exec_queue_destroy);
}
static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
{
return q->width > 1;
}
static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q)
{
return q->pxp.type;
}
bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
void xe_exec_queue_kill(struct xe_exec_queue *q);
int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
enum xe_exec_queue_priority xe_exec_queue_device_get_max_priority(struct xe_device *xe);
void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
struct xe_vm *vm);
struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *e,
struct xe_vm *vm);
void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
struct dma_fence *fence);
void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
struct xe_vm *vm,
unsigned int type);
void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
unsigned int type);
struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
struct xe_vm *vm,
unsigned int type);
void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
struct xe_vm *vm,
struct dma_fence *fence,
unsigned int type);
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch);
struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q);
#endif