Files
linux/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
Tvrtko Ursulin 2bb026f3fb drm/xe: Rename XE_BO_FLAG_SCANOUT to XE_BO_FLAG_FORCE_WC
Rename XE_BO_FLAG_SCANOUT to XE_BO_FLAG_FORCE_WC so that the usage of the
flag can legitimately be expanded to more than just the actual frame-
buffer objects.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Suggested-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patch.msgid.link/20260324084018.20353-2-tvrtko.ursulin@igalia.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
2026-03-24 09:29:10 -04:00

103 lines
2.4 KiB
C

// SPDX-License-Identifier: MIT
/*
* Copyright 2023, Intel Corporation.
*/
#include <drm/intel/display_parent_interface.h>
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_dsb_buffer.h"
struct intel_dsb_buffer {
u32 *cmd_buf;
struct xe_bo *bo;
size_t buf_size;
};
static u32 xe_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
{
return xe_bo_ggtt_addr(dsb_buf->bo);
}
static void xe_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
{
iosys_map_wr(&dsb_buf->bo->vmap, idx * 4, u32, val);
}
static u32 xe_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
{
return iosys_map_rd(&dsb_buf->bo->vmap, idx * 4, u32);
}
static void xe_dsb_buffer_fill(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
{
WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
iosys_map_memset(&dsb_buf->bo->vmap, idx * 4, val, size);
}
static struct intel_dsb_buffer *xe_dsb_buffer_create(struct drm_device *drm, size_t size)
{
struct xe_device *xe = to_xe_device(drm);
struct intel_dsb_buffer *dsb_buf;
struct xe_bo *obj;
int ret;
dsb_buf = kzalloc_obj(*dsb_buf);
if (!dsb_buf)
return ERR_PTR(-ENOMEM);
/* Set scanout flag for WC mapping */
obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
PAGE_ALIGN(size),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
XE_BO_FLAG_FORCE_WC |
XE_BO_FLAG_GGTT,
false);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_pin_map;
}
dsb_buf->bo = obj;
dsb_buf->buf_size = size;
return dsb_buf;
err_pin_map:
kfree(dsb_buf);
return ERR_PTR(ret);
}
static void xe_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
{
xe_bo_unpin_map_no_vm(dsb_buf->bo);
kfree(dsb_buf);
}
static void xe_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
{
struct xe_device *xe = dsb_buf->bo->tile->xe;
/*
* The memory barrier here is to ensure coherency of DSB vs MMIO,
* both for weak ordering archs and discrete cards.
*/
xe_device_wmb(xe);
xe_device_l2_flush(xe);
}
const struct intel_display_dsb_interface xe_display_dsb_interface = {
.ggtt_offset = xe_dsb_buffer_ggtt_offset,
.write = xe_dsb_buffer_write,
.read = xe_dsb_buffer_read,
.fill = xe_dsb_buffer_fill,
.create = xe_dsb_buffer_create,
.cleanup = xe_dsb_buffer_cleanup,
.flush_map = xe_dsb_buffer_flush_map,
};