Merge tag 'drm-misc-next-2023-03-07' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for v6.4-rc1:

Note: Only changes since pull request from 2023-02-23 are included here.

UAPI Changes:
- Convert rockchip bindings to YAML.
- Constify kobj_type structure in dma-buf.
- FBDEV cmdline parser fixes, and other small fbdev fixes for mode
   parsing.

Cross-subsystem Changes:
- Add Neil Armstrong as linaro maintainer.
- Actually signal the private stub dma-fence.

Core Changes:
- Add function for adding syncobj dep to sched_job and use it in panfrost, v3d.
- Improve DisplayID 2.0 topology parsing and EDID parsing in general.
- Add a gem eviction function and callback for generic GEM shrinker
  purposes.
- Prepare to convert shmem helper to use the GEM reservation lock instead of own
  locking. (Actual commit itself got reverted for now)
- Move the suballocator from radeon and amdgpu drivers to core in preparation
  for Xe.
- Assorted small fixes and documentation.
- Fixes to HPD polling.
- Assorted small fixes in simpledrm, bridge, accel, shmem-helper,
   and the selftest of format-helper.
- Remove dummy resource when ttm bo is created, and during pipelined
   gutting. Fix all drivers to accept a NULL ttm_bo->resource.
- Handle pinned BO moving prevention in ttm core.
- Set drm panel-bridge orientation before connector is registered.
- Remove dumb_destroy callback.
- Add documentation to GEM_CLOSE, PRIME_HANDLE_TO_FD, PRIME_FD_TO_HANDLE, GETFB2 ioctl's.
- Add atomic enable_plane callback, use it in ast, mgag200, tidss.

Driver Changes:
- Use drm_gem_objects_lookup in vc4.
- Assorted small fixes to virtio, ast, bridge/tc358762, meson, nouveau.
- Allow virtio KMS to be disabled and compiled out.
- Add Radxa 8/10HD, Samsung AMS495QA01 panels.
- Fix ivpu compiler errors.
- Assorted fixes to drm/panel, malidp, rockchip, ivpu, amdgpu, vgem,
   nouveau, vc4.
- Assorted cleanups, simplifications and fixes to vmwgfx.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ac1f5186-54bb-02f4-ac56-907f5b76f3de@linux.intel.com
This commit is contained in:
Dave Airlie
2023-03-14 12:17:27 +10:00
155 changed files with 4430 additions and 3332 deletions

View File

@@ -10,13 +10,13 @@ menuconfig DRM
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
select DRM_PANEL_ORIENTATION_QUIRKS
select HDMI
select FB_CMDLINE
select I2C
select DMA_SHARED_BUFFER
select SYNC_FILE
# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate
# device and dmabuf fd. Let's make sure that is available for our userspace.
select KCMP
select VIDEO_CMDLINE
select VIDEO_NOMODESET
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
@@ -232,6 +232,10 @@ config DRM_GEM_SHMEM_HELPER
help
Choose this if you need the GEM shmem helper functions
config DRM_SUBALLOC_HELPER
tristate
depends on DRM
config DRM_SCHED
tristate
depends on DRM

View File

@@ -88,6 +88,9 @@ obj-$(CONFIG_DRM_GEM_DMA_HELPER) += drm_dma_helper.o
drm_shmem_helper-y := drm_gem_shmem_helper.o
obj-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_shmem_helper.o
drm_suballoc_helper-y := drm_suballoc.o
obj-$(CONFIG_DRM_SUBALLOC_HELPER) += drm_suballoc_helper.o
drm_vram_helper-y := drm_gem_vram_helper.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o

View File

@@ -19,6 +19,7 @@ config DRM_AMDGPU
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
select DRM_BUDDY
select DRM_SUBALLOC_HELPER
# amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI

View File

@@ -424,29 +424,11 @@ struct amdgpu_clock {
* alignment).
*/
#define AMDGPU_SA_NUM_FENCE_LISTS 32
struct amdgpu_sa_manager {
wait_queue_head_t wq;
struct amdgpu_bo *bo;
struct list_head *hole;
struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
struct list_head olist;
unsigned size;
uint64_t gpu_addr;
void *cpu_ptr;
uint32_t domain;
uint32_t align;
};
/* sub-allocation buffer */
struct amdgpu_sa_bo {
struct list_head olist;
struct list_head flist;
struct amdgpu_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
struct dma_fence *fence;
struct drm_suballoc_manager base;
struct amdgpu_bo *bo;
uint64_t gpu_addr;
void *cpu_ptr;
};
int amdgpu_fence_slab_init(void);

View File

@@ -69,7 +69,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (size) {
r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
&ib->sa_bo, size, 256);
&ib->sa_bo, size);
if (r) {
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
return r;
@@ -309,8 +309,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
AMDGPU_IB_POOL_SIZE,
AMDGPU_GPU_PAGE_SIZE,
AMDGPU_IB_POOL_SIZE, 256,
AMDGPU_GEM_DOMAIN_GTT);
if (r)
goto error;

View File

@@ -600,7 +600,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
@@ -1346,7 +1346,6 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
unsigned long offset;
int r;
/* Remember that this BO was accessed by the CPU */
@@ -1355,8 +1354,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
offset = bo->resource->start << PAGE_SHIFT;
if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
if (amdgpu_bo_in_cpu_visible_vram(abo))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -1378,10 +1376,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
else if (unlikely(r))
return VM_FAULT_SIGBUS;
offset = bo->resource->start << PAGE_SHIFT;
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
(offset + bo->base.size) > adev->gmc.visible_vram_size)
!amdgpu_bo_in_cpu_visible_vram(abo))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);

View File

@@ -336,15 +336,22 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
/*
* sub allocation
*/
static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
static inline struct amdgpu_sa_manager *
to_amdgpu_sa_manager(struct drm_suballoc_manager *manager)
{
return sa_bo->manager->gpu_addr + sa_bo->soffset;
return container_of(manager, struct amdgpu_sa_manager, base);
}
static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo)
{
return sa_bo->manager->cpu_ptr + sa_bo->soffset;
return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr +
drm_suballoc_soffset(sa_bo);
}
static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo)
{
return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr +
drm_suballoc_soffset(sa_bo);
}
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
@@ -355,11 +362,11 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align);
struct drm_suballoc **sa_bo,
unsigned int size);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
struct amdgpu_sa_bo **sa_bo,
struct dma_fence *fence);
struct drm_suballoc **sa_bo,
struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);

View File

@@ -27,6 +27,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_print.h>
#include <drm/drm_suballoc.h>
struct amdgpu_device;
struct amdgpu_ring;
@@ -92,7 +93,7 @@ enum amdgpu_ib_pool_type {
};
struct amdgpu_ib {
struct amdgpu_sa_bo *sa_bo;
struct drm_suballoc *sa_bo;
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;

View File

@@ -44,327 +44,63 @@
#include "amdgpu.h"
static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager,
unsigned size, u32 align, u32 domain)
unsigned int size, u32 suballoc_align, u32 domain)
{
int i, r;
int r;
init_waitqueue_head(&sa_manager->wq);
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
sa_manager->align = align;
sa_manager->hole = &sa_manager->olist;
INIT_LIST_HEAD(&sa_manager->olist);
for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
INIT_LIST_HEAD(&sa_manager->flist[i]);
r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
&sa_manager->gpu_addr, &sa_manager->cpu_ptr);
r = amdgpu_bo_create_kernel(adev, size, AMDGPU_GPU_PAGE_SIZE, domain,
&sa_manager->bo, &sa_manager->gpu_addr,
&sa_manager->cpu_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
}
memset(sa_manager->cpu_ptr, 0, sa_manager->size);
memset(sa_manager->cpu_ptr, 0, size);
drm_suballoc_manager_init(&sa_manager->base, size, suballoc_align);
return r;
}
void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager)
{
struct amdgpu_sa_bo *sa_bo, *tmp;
if (sa_manager->bo == NULL) {
dev_err(adev->dev, "no bo for sa manager\n");
return;
}
if (!list_empty(&sa_manager->olist)) {
sa_manager->hole = &sa_manager->olist,
amdgpu_sa_bo_try_free(sa_manager);
if (!list_empty(&sa_manager->olist)) {
dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
}
}
list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
amdgpu_sa_bo_remove_locked(sa_bo);
}
drm_suballoc_manager_fini(&sa_manager->base);
amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
sa_manager->size = 0;
}
static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
{
struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
if (sa_manager->hole == &sa_bo->olist) {
sa_manager->hole = sa_bo->olist.prev;
}
list_del_init(&sa_bo->olist);
list_del_init(&sa_bo->flist);
dma_fence_put(sa_bo->fence);
kfree(sa_bo);
}
static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
{
struct amdgpu_sa_bo *sa_bo, *tmp;
if (sa_manager->hole->next == &sa_manager->olist)
return;
sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
if (sa_bo->fence == NULL ||
!dma_fence_is_signaled(sa_bo->fence)) {
return;
}
amdgpu_sa_bo_remove_locked(sa_bo);
}
}
static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
{
struct list_head *hole = sa_manager->hole;
if (hole != &sa_manager->olist) {
return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
}
return 0;
}
static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
{
struct list_head *hole = sa_manager->hole;
if (hole->next != &sa_manager->olist) {
return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
}
return sa_manager->size;
}
static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo *sa_bo,
unsigned size, unsigned align)
{
unsigned soffset, eoffset, wasted;
soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
wasted = (align - (soffset % align)) % align;
if ((eoffset - soffset) >= (size + wasted)) {
soffset += wasted;
sa_bo->manager = sa_manager;
sa_bo->soffset = soffset;
sa_bo->eoffset = soffset + size;
list_add(&sa_bo->olist, sa_manager->hole);
INIT_LIST_HEAD(&sa_bo->flist);
sa_manager->hole = &sa_bo->olist;
return true;
}
return false;
}
/**
* amdgpu_sa_event - Check if we can stop waiting
*
* @sa_manager: pointer to the sa_manager
* @size: number of bytes we want to allocate
* @align: alignment we need to match
*
* Check if either there is a fence we can wait for or
* enough free memory to satisfy the allocation directly
*/
static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
unsigned size, unsigned align)
{
unsigned soffset, eoffset, wasted;
int i;
for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
if (!list_empty(&sa_manager->flist[i]))
return true;
soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
wasted = (align - (soffset % align)) % align;
if ((eoffset - soffset) >= (size + wasted)) {
return true;
}
return false;
}
static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
struct dma_fence **fences,
unsigned *tries)
{
struct amdgpu_sa_bo *best_bo = NULL;
unsigned i, soffset, best, tmp;
/* if hole points to the end of the buffer */
if (sa_manager->hole->next == &sa_manager->olist) {
/* try again with its beginning */
sa_manager->hole = &sa_manager->olist;
return true;
}
soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
/* to handle wrap around we add sa_manager->size */
best = sa_manager->size * 2;
/* go over all fence list and try to find the closest sa_bo
* of the current last
*/
for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
struct amdgpu_sa_bo *sa_bo;
fences[i] = NULL;
if (list_empty(&sa_manager->flist[i]))
continue;
sa_bo = list_first_entry(&sa_manager->flist[i],
struct amdgpu_sa_bo, flist);
if (!dma_fence_is_signaled(sa_bo->fence)) {
fences[i] = sa_bo->fence;
continue;
}
/* limit the number of tries each ring gets */
if (tries[i] > 2) {
continue;
}
tmp = sa_bo->soffset;
if (tmp < soffset) {
/* wrap around, pretend it's after */
tmp += sa_manager->size;
}
tmp -= soffset;
if (tmp < best) {
/* this sa bo is the closest one */
best = tmp;
best_bo = sa_bo;
}
}
if (best_bo) {
uint32_t idx = best_bo->fence->context;
idx %= AMDGPU_SA_NUM_FENCE_LISTS;
++tries[idx];
sa_manager->hole = best_bo->olist.prev;
/* we knew that this one is signaled,
so it's save to remote it */
amdgpu_sa_bo_remove_locked(best_bo);
return true;
}
return false;
}
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align)
struct drm_suballoc **sa_bo,
unsigned int size)
{
struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned count;
int i, r;
signed long t;
struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
GFP_KERNEL, true, 0);
if (WARN_ON_ONCE(align > sa_manager->align))
return -EINVAL;
if (IS_ERR(sa)) {
*sa_bo = NULL;
if (WARN_ON_ONCE(size > sa_manager->size))
return -EINVAL;
return PTR_ERR(sa);
}
*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
if (!(*sa_bo))
return -ENOMEM;
(*sa_bo)->manager = sa_manager;
(*sa_bo)->fence = NULL;
INIT_LIST_HEAD(&(*sa_bo)->olist);
INIT_LIST_HEAD(&(*sa_bo)->flist);
spin_lock(&sa_manager->wq.lock);
do {
for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
tries[i] = 0;
do {
amdgpu_sa_bo_try_free(sa_manager);
if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
size, align)) {
spin_unlock(&sa_manager->wq.lock);
return 0;
}
/* see if we can skip over some allocations */
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
if (fences[i])
fences[count++] = dma_fence_get(fences[i]);
if (count) {
spin_unlock(&sa_manager->wq.lock);
t = dma_fence_wait_any_timeout(fences, count, false,
MAX_SCHEDULE_TIMEOUT,
NULL);
for (i = 0; i < count; ++i)
dma_fence_put(fences[i]);
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
} else {
/* if we have nothing to wait for block */
r = wait_event_interruptible_locked(
sa_manager->wq,
amdgpu_sa_event(sa_manager, size, align)
);
}
} while (!r);
spin_unlock(&sa_manager->wq.lock);
kfree(*sa_bo);
*sa_bo = NULL;
return r;
*sa_bo = sa;
return 0;
}
void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct drm_suballoc **sa_bo,
struct dma_fence *fence)
{
struct amdgpu_sa_manager *sa_manager;
if (sa_bo == NULL || *sa_bo == NULL) {
return;
}
sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->wq.lock);
if (fence && !dma_fence_is_signaled(fence)) {
uint32_t idx;
(*sa_bo)->fence = dma_fence_get(fence);
idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
} else {
amdgpu_sa_bo_remove_locked(*sa_bo);
}
wake_up_all_locked(&sa_manager->wq);
spin_unlock(&sa_manager->wq.lock);
drm_suballoc_free(*sa_bo, fence);
*sa_bo = NULL;
}
@@ -373,26 +109,8 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m)
{
struct amdgpu_sa_bo *i;
struct drm_printer p = drm_seq_file_printer(m);
spin_lock(&sa_manager->wq.lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
uint64_t soffset = i->soffset + sa_manager->gpu_addr;
uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
if (&i->olist == sa_manager->hole) {
seq_printf(m, ">");
} else {
seq_printf(m, " ");
}
seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
soffset, eoffset, eoffset - soffset);
if (i->fence)
seq_printf(m, " protected by 0x%016llx on context %llu",
i->fence->seqno, i->fence->context);
seq_printf(m, "\n");
}
spin_unlock(&sa_manager->wq.lock);
drm_suballoc_dump_debug_info(&sa_manager->base, &p, sa_manager->gpu_addr);
}
#endif

View File

@@ -466,11 +466,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
}
/* Can't move a pinned BO */
abo = ttm_to_amdgpu_bo(bo);
if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
return -EINVAL;
adev = amdgpu_ttm_adev(bo->bdev);
if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&

View File

@@ -649,7 +649,7 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
struct drm_device *drm = dev_get_drvdata(dev);
struct malidp_drm *malidp = drm_to_malidp(drm);
return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id);
return sysfs_emit(buf, "%08x\n", malidp->core_id);
}
static DEVICE_ATTR_RO(core_id);

View File

@@ -9,7 +9,7 @@
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
u8 i = 0, j = 0;
/*
@@ -125,7 +125,7 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower)
u8 bDPTX = 0;
u8 bDPExecute = 1;
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
// S3 come back, need more time to wait BMC ready.
if (bPower)
WaitCount = 300;
@@ -172,7 +172,7 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower)
void ast_dp_power_on_off(struct drm_device *dev, bool on)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
// Read and Turn off DP PHY sleep
u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, AST_DP_VIDEO_ENABLE);
@@ -188,7 +188,7 @@ void ast_dp_power_on_off(struct drm_device *dev, bool on)
void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
// Video On/Off
@@ -208,7 +208,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode)
{
struct ast_private *ast = to_ast_private(crtc->dev);
struct ast_device *ast = to_ast_device(crtc->dev);
u32 ulRefreshRateIndex;
u8 ModeIdx;

View File

@@ -10,7 +10,7 @@ MODULE_FIRMWARE("ast_dp501_fw.bin");
static void ast_release_firmware(void *data)
{
struct ast_private *ast = data;
struct ast_device *ast = data;
release_firmware(ast->dp501_fw);
ast->dp501_fw = NULL;
@@ -18,7 +18,7 @@ static void ast_release_firmware(void *data)
static int ast_load_dp501_microcode(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
int ret;
ret = request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev);
@@ -28,7 +28,7 @@ static int ast_load_dp501_microcode(struct drm_device *dev)
return devm_add_action_or_reset(dev->dev, ast_release_firmware, ast);
}
static void send_ack(struct ast_private *ast)
static void send_ack(struct ast_device *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
@@ -36,7 +36,7 @@ static void send_ack(struct ast_private *ast)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
static void send_nack(struct ast_private *ast)
static void send_nack(struct ast_device *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
@@ -44,7 +44,7 @@ static void send_nack(struct ast_private *ast)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
static bool wait_ack(struct ast_private *ast)
static bool wait_ack(struct ast_device *ast)
{
u8 waitack;
u32 retry = 0;
@@ -60,7 +60,7 @@ static bool wait_ack(struct ast_private *ast)
return false;
}
static bool wait_nack(struct ast_private *ast)
static bool wait_nack(struct ast_device *ast)
{
u8 waitack;
u32 retry = 0;
@@ -76,18 +76,18 @@ static bool wait_nack(struct ast_private *ast)
return false;
}
static void set_cmd_trigger(struct ast_private *ast)
static void set_cmd_trigger(struct ast_device *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40);
}
static void clear_cmd_trigger(struct ast_private *ast)
static void clear_cmd_trigger(struct ast_device *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00);
}
#if 0
static bool wait_fw_ready(struct ast_private *ast)
static bool wait_fw_ready(struct ast_device *ast)
{
u8 waitready;
u32 retry = 0;
@@ -106,7 +106,7 @@ static bool wait_fw_ready(struct ast_private *ast)
static bool ast_write_cmd(struct drm_device *dev, u8 data)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
int retry = 0;
if (wait_nack(ast)) {
send_nack(ast);
@@ -128,7 +128,7 @@ static bool ast_write_cmd(struct drm_device *dev, u8 data)
static bool ast_write_data(struct drm_device *dev, u8 data)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
if (wait_nack(ast)) {
send_nack(ast);
@@ -146,7 +146,7 @@ static bool ast_write_data(struct drm_device *dev, u8 data)
#if 0
static bool ast_read_data(struct drm_device *dev, u8 *data)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
u8 tmp;
*data = 0;
@@ -163,7 +163,7 @@ static bool ast_read_data(struct drm_device *dev, u8 *data)
return true;
}
static void clear_cmd(struct ast_private *ast)
static void clear_cmd(struct ast_device *ast)
{
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, 0x00);
@@ -178,14 +178,14 @@ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
msleep(10);
}
static u32 get_fw_base(struct ast_private *ast)
static u32 get_fw_base(struct ast_device *ast)
{
return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff;
}
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
u32 i, data;
u32 boot_address;
@@ -204,7 +204,7 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
static bool ast_launch_m68k(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
u32 i, data, len = 0;
u32 boot_address;
u8 *fw_addr = NULL;
@@ -274,7 +274,7 @@ static bool ast_launch_m68k(struct drm_device *dev)
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
u32 i, boot_address, offset, data;
u32 *pEDIDidx;
@@ -334,7 +334,7 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
static bool ast_init_dvo(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
u8 jreg;
u32 data;
ast_write32(ast, 0xf004, 0x1e6e0000);
@@ -407,7 +407,7 @@ static bool ast_init_dvo(struct drm_device *dev)
static void ast_init_analog(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
u32 data;
/*
@@ -434,7 +434,7 @@ static void ast_init_analog(struct drm_device *dev)
void ast_init_3rdtx(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
u8 jreg;
if (ast->chip == AST2300 || ast->chip == AST2400) {

View File

@@ -105,7 +105,7 @@ static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev)
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct ast_private *ast;
struct ast_device *ast;
struct drm_device *dev;
int ret;

View File

@@ -157,7 +157,7 @@ to_ast_sil164_connector(struct drm_connector *connector)
* Device
*/
struct ast_private {
struct ast_device {
struct drm_device base;
struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */
@@ -210,14 +210,14 @@ struct ast_private {
const struct firmware *dp501_fw; /* dp501 fw */
};
static inline struct ast_private *to_ast_private(struct drm_device *dev)
static inline struct ast_device *to_ast_device(struct drm_device *dev)
{
return container_of(dev, struct ast_private, base);
return container_of(dev, struct ast_device, base);
}
struct ast_private *ast_device_create(const struct drm_driver *drv,
struct pci_dev *pdev,
unsigned long flags);
struct ast_device *ast_device_create(const struct drm_driver *drv,
struct pci_dev *pdev,
unsigned long flags);
#define AST_IO_AR_PORT_WRITE (0x40)
#define AST_IO_MISC_PORT_WRITE (0x42)
@@ -238,62 +238,44 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
#define __ast_read(x) \
static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
u##x val = 0;\
val = ioread##x(ast->regs + reg); \
return val;\
static inline u32 ast_read32(struct ast_device *ast, u32 reg)
{
return ioread32(ast->regs + reg);
}
__ast_read(8);
__ast_read(16);
__ast_read(32)
#define __ast_io_read(x) \
static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \
u##x val = 0;\
val = ioread##x(ast->ioregs + reg); \
return val;\
static inline void ast_write32(struct ast_device *ast, u32 reg, u32 val)
{
iowrite32(val, ast->regs + reg);
}
__ast_io_read(8);
__ast_io_read(16);
__ast_io_read(32);
static inline u8 ast_io_read8(struct ast_device *ast, u32 reg)
{
return ioread8(ast->ioregs + reg);
}
#define __ast_write(x) \
static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\
iowrite##x(val, ast->regs + reg);\
}
static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val)
{
iowrite8(val, ast->ioregs + reg);
}
__ast_write(8);
__ast_write(16);
__ast_write(32);
#define __ast_io_write(x) \
static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\
iowrite##x(val, ast->ioregs + reg);\
}
__ast_io_write(8);
__ast_io_write(16);
#undef __ast_io_write
static inline void ast_set_index_reg(struct ast_private *ast,
static inline void ast_set_index_reg(struct ast_device *ast,
uint32_t base, uint8_t index,
uint8_t val)
{
ast_io_write16(ast, base, ((u16)val << 8) | index);
ast_io_write8(ast, base, index);
++base;
ast_io_write8(ast, base, val);
}
void ast_set_index_reg_mask(struct ast_private *ast,
void ast_set_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index,
uint8_t mask, uint8_t val);
uint8_t ast_get_index_reg(struct ast_private *ast,
uint8_t ast_get_index_reg(struct ast_device *ast,
uint32_t base, uint8_t index);
uint8_t ast_get_index_reg_mask(struct ast_private *ast,
uint8_t ast_get_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index, uint8_t mask);
static inline void ast_open_key(struct ast_private *ast)
static inline void ast_open_key(struct ast_device *ast)
{
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
}
@@ -352,7 +334,7 @@ struct ast_crtc_state {
#define to_ast_crtc_state(state) container_of(state, struct ast_crtc_state, base)
int ast_mode_config_init(struct ast_private *ast);
int ast_mode_config_init(struct ast_device *ast);
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
@@ -476,16 +458,16 @@ int ast_mode_config_init(struct ast_private *ast);
#define ASTDP_1366x768_60 0x1E
#define ASTDP_1152x864_75 0x1F
int ast_mm_init(struct ast_private *ast);
int ast_mm_init(struct ast_device *ast);
/* ast post */
void ast_enable_vga(struct drm_device *dev);
void ast_enable_mmio(struct drm_device *dev);
bool ast_is_vga_enabled(struct drm_device *dev);
void ast_post_gpu(struct drm_device *dev);
u32 ast_mindwm(struct ast_private *ast, u32 r);
void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
void ast_patch_ahb_2500(struct ast_private *ast);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
void ast_patch_ahb_2500(struct ast_device *ast);
/* ast dp501 */
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);

View File

@@ -29,7 +29,7 @@
static void ast_i2c_setsda(void *i2c_priv, int data)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = to_ast_private(i2c->dev);
struct ast_device *ast = to_ast_device(i2c->dev);
int i;
u8 ujcrb7, jtemp;
@@ -45,7 +45,7 @@ static void ast_i2c_setsda(void *i2c_priv, int data)
static void ast_i2c_setscl(void *i2c_priv, int clock)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = to_ast_private(i2c->dev);
struct ast_device *ast = to_ast_device(i2c->dev);
int i;
u8 ujcrb7, jtemp;
@@ -61,7 +61,7 @@ static void ast_i2c_setscl(void *i2c_priv, int clock)
static int ast_i2c_getsda(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = to_ast_private(i2c->dev);
struct ast_device *ast = to_ast_device(i2c->dev);
uint32_t val, val2, count, pass;
count = 0;
@@ -83,7 +83,7 @@ static int ast_i2c_getsda(void *i2c_priv)
static int ast_i2c_getscl(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = to_ast_private(i2c->dev);
struct ast_device *ast = to_ast_device(i2c->dev);
uint32_t val, val2, count, pass;
count = 0;

View File

@@ -35,7 +35,7 @@
#include "ast_drv.h"
void ast_set_index_reg_mask(struct ast_private *ast,
void ast_set_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index,
uint8_t mask, uint8_t val)
{
@@ -45,7 +45,7 @@ void ast_set_index_reg_mask(struct ast_private *ast,
ast_set_index_reg(ast, base, index, tmp);
}
uint8_t ast_get_index_reg(struct ast_private *ast,
uint8_t ast_get_index_reg(struct ast_device *ast,
uint32_t base, uint8_t index)
{
uint8_t ret;
@@ -54,7 +54,7 @@ uint8_t ast_get_index_reg(struct ast_private *ast,
return ret;
}
uint8_t ast_get_index_reg_mask(struct ast_private *ast,
uint8_t ast_get_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index, uint8_t mask)
{
uint8_t ret;
@@ -66,7 +66,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
{
struct device_node *np = dev->dev->of_node;
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t data, jregd0, jregd1;
@@ -122,7 +122,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t jreg, scu_rev;
@@ -271,7 +271,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
static int ast_get_dram_info(struct drm_device *dev)
{
struct device_node *np = dev->dev->of_node;
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
uint32_t denum, num, div, ref_pll, dsel;
@@ -394,22 +394,22 @@ static int ast_get_dram_info(struct drm_device *dev)
*/
static void ast_device_release(void *data)
{
struct ast_private *ast = data;
struct ast_device *ast = data;
/* enable standard VGA decode */
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
}
struct ast_private *ast_device_create(const struct drm_driver *drv,
struct pci_dev *pdev,
unsigned long flags)
struct ast_device *ast_device_create(const struct drm_driver *drv,
struct pci_dev *pdev,
unsigned long flags)
{
struct drm_device *dev;
struct ast_private *ast;
struct ast_device *ast;
bool need_post;
int ret = 0;
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_private, base);
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
if (IS_ERR(ast))
return ast;
dev = &ast->base;

View File

@@ -33,7 +33,7 @@
#include "ast_drv.h"
static u32 ast_get_vram_size(struct ast_private *ast)
static u32 ast_get_vram_size(struct ast_device *ast)
{
u8 jreg;
u32 vram_size;
@@ -73,7 +73,7 @@ static u32 ast_get_vram_size(struct ast_private *ast)
return vram_size;
}
int ast_mm_init(struct ast_private *ast)
int ast_mm_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);

View File

@@ -51,7 +51,7 @@
#define AST_LUT_SIZE 256
static inline void ast_load_palette_index(struct ast_private *ast,
static inline void ast_load_palette_index(struct ast_device *ast,
u8 index, u8 red, u8 green,
u8 blue)
{
@@ -65,7 +65,7 @@ static inline void ast_load_palette_index(struct ast_private *ast,
ast_io_read8(ast, AST_IO_SEQ_PORT);
}
static void ast_crtc_set_gamma_linear(struct ast_private *ast,
static void ast_crtc_set_gamma_linear(struct ast_device *ast,
const struct drm_format_info *format)
{
int i;
@@ -84,7 +84,7 @@ static void ast_crtc_set_gamma_linear(struct ast_private *ast,
}
}
static void ast_crtc_set_gamma(struct ast_private *ast,
static void ast_crtc_set_gamma(struct ast_device *ast,
const struct drm_format_info *format,
struct drm_color_lut *lut)
{
@@ -232,7 +232,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
return true;
}
static void ast_set_vbios_color_reg(struct ast_private *ast,
static void ast_set_vbios_color_reg(struct ast_device *ast,
const struct drm_format_info *format,
const struct ast_vbios_mode_info *vbios_mode)
{
@@ -263,7 +263,7 @@ static void ast_set_vbios_color_reg(struct ast_private *ast,
}
}
static void ast_set_vbios_mode_reg(struct ast_private *ast,
static void ast_set_vbios_mode_reg(struct ast_device *ast,
const struct drm_display_mode *adjusted_mode,
const struct ast_vbios_mode_info *vbios_mode)
{
@@ -287,7 +287,7 @@ static void ast_set_vbios_mode_reg(struct ast_private *ast,
}
}
static void ast_set_std_reg(struct ast_private *ast,
static void ast_set_std_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -335,7 +335,7 @@ static void ast_set_std_reg(struct ast_private *ast,
ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
}
static void ast_set_crtc_reg(struct ast_private *ast,
static void ast_set_crtc_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -450,7 +450,7 @@ static void ast_set_crtc_reg(struct ast_private *ast,
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
}
static void ast_set_offset_reg(struct ast_private *ast,
static void ast_set_offset_reg(struct ast_device *ast,
struct drm_framebuffer *fb)
{
u16 offset;
@@ -460,7 +460,7 @@ static void ast_set_offset_reg(struct ast_private *ast,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
}
static void ast_set_dclk_reg(struct ast_private *ast,
static void ast_set_dclk_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -478,7 +478,7 @@ static void ast_set_dclk_reg(struct ast_private *ast,
((clk_info->param3 & 0x3) << 4));
}
static void ast_set_color_reg(struct ast_private *ast,
static void ast_set_color_reg(struct ast_device *ast,
const struct drm_format_info *format)
{
u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
@@ -507,7 +507,7 @@ static void ast_set_color_reg(struct ast_private *ast,
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
}
static void ast_set_crtthd_reg(struct ast_private *ast)
static void ast_set_crtthd_reg(struct ast_device *ast)
{
/* Set Threshold */
if (ast->chip == AST2600) {
@@ -529,7 +529,7 @@ static void ast_set_crtthd_reg(struct ast_private *ast)
}
}
static void ast_set_sync_reg(struct ast_private *ast,
static void ast_set_sync_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -544,7 +544,7 @@ static void ast_set_sync_reg(struct ast_private *ast,
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
}
static void ast_set_start_address_crt1(struct ast_private *ast,
static void ast_set_start_address_crt1(struct ast_device *ast,
unsigned int offset)
{
u32 addr;
@@ -556,7 +556,7 @@ static void ast_set_start_address_crt1(struct ast_private *ast,
}
static void ast_wait_for_vretrace(struct ast_private *ast)
static void ast_wait_for_vretrace(struct ast_device *ast)
{
unsigned long timeout = jiffies + HZ;
u8 vgair1;
@@ -645,7 +645,7 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *dev = plane->dev;
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
@@ -672,23 +672,34 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
/*
* Some BMCs stop scanning out the video signal after the driver
* reprogrammed the offset or scanout address. This stalls display
* output for several seconds and makes the display unusable.
* Therefore only update the offset if it changes and reprogram the
* address after enabling the plane.
* reprogrammed the offset. This stalls display output for several
* seconds and makes the display unusable. Therefore only update
* the offset if it changes.
*/
if (!old_fb || old_fb->pitches[0] != fb->pitches[0])
ast_set_offset_reg(ast, fb);
if (!old_fb) {
ast_set_start_address_crt1(ast, (u32)ast_plane->offset);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
}
}
static void ast_primary_plane_helper_atomic_enable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ast_device *ast = to_ast_device(plane->dev);
struct ast_plane *ast_plane = to_ast_plane(plane);
/*
* Some BMCs stop scanning out the video signal after the driver
* reprogrammed the scanout address. This stalls display
* output for several seconds and makes the display unusable.
* Therefore only reprogram the address after enabling the plane.
*/
ast_set_start_address_crt1(ast, (u32)ast_plane->offset);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
}
static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ast_private *ast = to_ast_private(plane->dev);
struct ast_device *ast = to_ast_device(plane->dev);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
}
@@ -697,6 +708,7 @@ static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ast_primary_plane_helper_atomic_check,
.atomic_update = ast_primary_plane_helper_atomic_update,
.atomic_enable = ast_primary_plane_helper_atomic_enable,
.atomic_disable = ast_primary_plane_helper_atomic_disable,
};
@@ -707,7 +719,7 @@ static const struct drm_plane_funcs ast_primary_plane_funcs = {
DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int ast_primary_plane_init(struct ast_private *ast)
static int ast_primary_plane_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct ast_plane *ast_primary_plane = &ast->primary_plane;
@@ -800,7 +812,7 @@ static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, i
writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
}
static void ast_set_cursor_base(struct ast_private *ast, u64 address)
static void ast_set_cursor_base(struct ast_device *ast, u64 address)
{
u8 addr0 = (address >> 3) & 0xff;
u8 addr1 = (address >> 11) & 0xff;
@@ -811,7 +823,7 @@ static void ast_set_cursor_base(struct ast_private *ast, u64 address)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
}
static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y,
static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y,
u8 x_offset, u8 y_offset)
{
u8 x0 = (x & 0x00ff);
@@ -827,7 +839,7 @@ static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1);
}
static void ast_set_cursor_enabled(struct ast_private *ast, bool enabled)
static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled)
{
static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP |
AST_IO_VGACRCB_HWC_ENABLED);
@@ -876,7 +888,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct ast_private *ast = to_ast_private(plane->dev);
struct ast_device *ast = to_ast_device(plane->dev);
struct iosys_map src_map = shadow_plane_state->data[0];
struct drm_rect damage;
const u8 *src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
@@ -931,7 +943,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ast_private *ast = to_ast_private(plane->dev);
struct ast_device *ast = to_ast_device(plane->dev);
ast_set_cursor_enabled(ast, false);
}
@@ -950,7 +962,7 @@ static const struct drm_plane_funcs ast_cursor_plane_funcs = {
DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int ast_cursor_plane_init(struct ast_private *ast)
static int ast_cursor_plane_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct ast_plane *ast_cursor_plane = &ast->cursor_plane;
@@ -995,7 +1007,7 @@ static int ast_cursor_plane_init(struct ast_private *ast)
static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct ast_private *ast = to_ast_private(crtc->dev);
struct ast_device *ast = to_ast_device(crtc->dev);
u8 ch = AST_DPMS_VSYNC_OFF | AST_DPMS_HSYNC_OFF;
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
@@ -1052,7 +1064,7 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
static enum drm_mode_status
ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
{
struct ast_private *ast = to_ast_private(crtc->dev);
struct ast_device *ast = to_ast_device(crtc->dev);
enum drm_mode_status status;
uint32_t jtemp;
@@ -1177,7 +1189,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
@@ -1202,7 +1214,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info =
@@ -1224,7 +1236,7 @@ static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
@@ -1312,7 +1324,7 @@ static const struct drm_crtc_funcs ast_crtc_funcs = {
static int ast_crtc_init(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
struct drm_crtc *crtc = &ast->crtc;
int ret;
@@ -1338,7 +1350,7 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
{
struct ast_vga_connector *ast_vga_connector = to_ast_vga_connector(connector);
struct drm_device *dev = connector->dev;
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
struct edid *edid;
int count;
@@ -1411,7 +1423,7 @@ static int ast_vga_connector_init(struct drm_device *dev,
return 0;
}
static int ast_vga_output_init(struct ast_private *ast)
static int ast_vga_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1444,7 +1456,7 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
{
struct ast_sil164_connector *ast_sil164_connector = to_ast_sil164_connector(connector);
struct drm_device *dev = connector->dev;
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
struct edid *edid;
int count;
@@ -1517,7 +1529,7 @@ static int ast_sil164_connector_init(struct drm_device *dev,
return 0;
}
static int ast_sil164_output_init(struct ast_private *ast)
static int ast_sil164_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1604,7 +1616,7 @@ static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector
return 0;
}
static int ast_dp501_output_init(struct ast_private *ast)
static int ast_dp501_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1691,7 +1703,7 @@ static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector
return 0;
}
static int ast_astdp_output_init(struct ast_private *ast)
static int ast_astdp_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1721,7 +1733,7 @@ static int ast_astdp_output_init(struct ast_private *ast)
static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *state)
{
struct ast_private *ast = to_ast_private(state->dev);
struct ast_device *ast = to_ast_device(state->dev);
/*
* Concurrent operations could possibly trigger a call to
@@ -1742,7 +1754,7 @@ static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
unsigned long fbsize, fbpages, max_fbpages;
max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
@@ -1763,7 +1775,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
int ast_mode_config_init(struct ast_private *ast)
int ast_mode_config_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
int ret;

View File

@@ -39,7 +39,7 @@ static void ast_post_chip_2500(struct drm_device *dev);
void ast_enable_vga(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01);
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01);
@@ -47,7 +47,7 @@ void ast_enable_vga(struct drm_device *dev)
void ast_enable_mmio(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
}
@@ -55,7 +55,7 @@ void ast_enable_mmio(struct drm_device *dev)
bool ast_is_vga_enabled(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
u8 ch;
ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
@@ -70,7 +70,7 @@ static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
static void
ast_set_def_ext_reg(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 i, index, reg;
const u8 *ext_reg_info;
@@ -110,7 +110,7 @@ ast_set_def_ext_reg(struct drm_device *dev)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
}
u32 ast_mindwm(struct ast_private *ast, u32 r)
u32 ast_mindwm(struct ast_device *ast, u32 r)
{
uint32_t data;
@@ -123,7 +123,7 @@ u32 ast_mindwm(struct ast_private *ast, u32 r)
return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
}
void ast_moutdwm(struct ast_private *ast, u32 r, u32 v)
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
{
uint32_t data;
ast_write32(ast, 0xf004, r & 0xffff0000);
@@ -162,7 +162,7 @@ static const u32 pattern_AST2150[14] = {
0x20F050E0
};
static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen)
{
u32 data, timeout;
@@ -192,7 +192,7 @@ static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
}
#if 0 /* unused in DDX driver - here for completeness */
static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
static u32 mmctestsingle2_ast2150(struct ast_device *ast, u32 datagen)
{
u32 data, timeout;
@@ -212,7 +212,7 @@ static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
}
#endif
static int cbrtest_ast2150(struct ast_private *ast)
static int cbrtest_ast2150(struct ast_device *ast)
{
int i;
@@ -222,7 +222,7 @@ static int cbrtest_ast2150(struct ast_private *ast)
return 1;
}
static int cbrscan_ast2150(struct ast_private *ast, int busw)
static int cbrscan_ast2150(struct ast_device *ast, int busw)
{
u32 patcnt, loop;
@@ -239,7 +239,7 @@ static int cbrscan_ast2150(struct ast_private *ast, int busw)
}
static void cbrdlli_ast2150(struct ast_private *ast, int busw)
static void cbrdlli_ast2150(struct ast_device *ast, int busw)
{
u32 dll_min[4], dll_max[4], dlli, data, passcnt;
@@ -273,7 +273,7 @@ static void cbrdlli_ast2150(struct ast_private *ast, int busw)
static void ast_init_dram_reg(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
u8 j;
u32 data, temp, i;
const struct ast_dramstruct *dram_reg_info;
@@ -366,7 +366,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
void ast_post_gpu(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 reg;
@@ -449,7 +449,7 @@ static const u32 pattern[8] = {
0x7C61D253
};
static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl)
static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
@@ -469,7 +469,7 @@ static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl)
return true;
}
static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl)
static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
@@ -490,32 +490,32 @@ static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl)
}
static bool mmc_test_burst(struct ast_private *ast, u32 datagen)
static bool mmc_test_burst(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc1);
}
static u32 mmc_test_burst2(struct ast_private *ast, u32 datagen)
static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen)
{
return mmc_test2(ast, datagen, 0x41);
}
static bool mmc_test_single(struct ast_private *ast, u32 datagen)
static bool mmc_test_single(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc5);
}
static u32 mmc_test_single2(struct ast_private *ast, u32 datagen)
static u32 mmc_test_single2(struct ast_device *ast, u32 datagen)
{
return mmc_test2(ast, datagen, 0x05);
}
static bool mmc_test_single_2500(struct ast_private *ast, u32 datagen)
static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0x85);
}
static int cbr_test(struct ast_private *ast)
static int cbr_test(struct ast_device *ast)
{
u32 data;
int i;
@@ -534,7 +534,7 @@ static int cbr_test(struct ast_private *ast)
return 1;
}
static int cbr_scan(struct ast_private *ast)
static int cbr_scan(struct ast_device *ast)
{
u32 data, data2, patcnt, loop;
@@ -555,7 +555,7 @@ static int cbr_scan(struct ast_private *ast)
return data2;
}
static u32 cbr_test2(struct ast_private *ast)
static u32 cbr_test2(struct ast_device *ast)
{
u32 data;
@@ -569,7 +569,7 @@ static u32 cbr_test2(struct ast_private *ast)
return ~data & 0xffff;
}
static u32 cbr_scan2(struct ast_private *ast)
static u32 cbr_scan2(struct ast_device *ast)
{
u32 data, data2, patcnt, loop;
@@ -590,7 +590,7 @@ static u32 cbr_scan2(struct ast_private *ast)
return data2;
}
static bool cbr_test3(struct ast_private *ast)
static bool cbr_test3(struct ast_device *ast)
{
if (!mmc_test_burst(ast, 0))
return false;
@@ -599,7 +599,7 @@ static bool cbr_test3(struct ast_private *ast)
return true;
}
static bool cbr_scan3(struct ast_private *ast)
static bool cbr_scan3(struct ast_device *ast)
{
u32 patcnt, loop;
@@ -615,7 +615,7 @@ static bool cbr_scan3(struct ast_private *ast)
return true;
}
static bool finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
bool status = false;
@@ -714,7 +714,7 @@ static bool finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *pa
return status;
} /* finetuneDQI_L */
static void finetuneDQSI(struct ast_private *ast)
static void finetuneDQSI(struct ast_device *ast)
{
u32 dlli, dqsip, dqidly;
u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
@@ -804,7 +804,7 @@ static void finetuneDQSI(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
}
static bool cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
bool status = false;
@@ -860,7 +860,7 @@ static bool cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
return status;
} /* CBRDLL2 */
static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 trap, trap_AC2, trap_MRS;
@@ -1102,7 +1102,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
}
static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 data, data2, retry = 0;
@@ -1225,7 +1225,7 @@ static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
}
static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param)
static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 trap, trap_AC2, trap_MRS;
@@ -1472,7 +1472,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
}
}
static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 data, data2, retry = 0;
@@ -1600,7 +1600,7 @@ static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
static void ast_post_chip_2300(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
struct ast2300_dram_param param;
u32 temp;
u8 reg;
@@ -1681,7 +1681,7 @@ static void ast_post_chip_2300(struct drm_device *dev)
} while ((reg & 0x40) == 0);
}
static bool cbr_test_2500(struct ast_private *ast)
static bool cbr_test_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
@@ -1692,7 +1692,7 @@ static bool cbr_test_2500(struct ast_private *ast)
return true;
}
static bool ddr_test_2500(struct ast_private *ast)
static bool ddr_test_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
@@ -1709,7 +1709,7 @@ static bool ddr_test_2500(struct ast_private *ast)
return true;
}
static void ddr_init_common_2500(struct ast_private *ast)
static void ddr_init_common_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
@@ -1732,7 +1732,7 @@ static void ddr_init_common_2500(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
}
static void ddr_phy_init_2500(struct ast_private *ast)
static void ddr_phy_init_2500(struct ast_device *ast)
{
u32 data, pass, timecnt;
@@ -1766,7 +1766,7 @@ static void ddr_phy_init_2500(struct ast_private *ast)
* 4Gb : 0x80000000 ~ 0x9FFFFFFF
* 8Gb : 0x80000000 ~ 0xBFFFFFFF
*/
static void check_dram_size_2500(struct ast_private *ast, u32 tRFC)
static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
{
u32 reg_04, reg_14;
@@ -1797,7 +1797,7 @@ static void check_dram_size_2500(struct ast_private *ast, u32 tRFC)
ast_moutdwm(ast, 0x1E6E0014, reg_14);
}
static void enable_cache_2500(struct ast_private *ast)
static void enable_cache_2500(struct ast_device *ast)
{
u32 reg_04, data;
@@ -1810,7 +1810,7 @@ static void enable_cache_2500(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
}
static void set_mpll_2500(struct ast_private *ast)
static void set_mpll_2500(struct ast_device *ast)
{
u32 addr, data, param;
@@ -1837,7 +1837,7 @@ static void set_mpll_2500(struct ast_private *ast)
udelay(100);
}
static void reset_mmc_2500(struct ast_private *ast)
static void reset_mmc_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E78505C, 0x00000004);
ast_moutdwm(ast, 0x1E785044, 0x00000001);
@@ -1848,7 +1848,7 @@ static void reset_mmc_2500(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
}
static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table)
static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
{
ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
@@ -1892,7 +1892,7 @@ static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table)
ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
}
static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table)
static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
{
u32 data, data2, pass, retrycnt;
u32 ddr_vref, phy_vref;
@@ -2002,7 +2002,7 @@ static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table)
ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
}
static bool ast_dram_init_2500(struct ast_private *ast)
static bool ast_dram_init_2500(struct ast_device *ast)
{
u32 data;
u32 max_tries = 5;
@@ -2030,7 +2030,7 @@ static bool ast_dram_init_2500(struct ast_private *ast)
return true;
}
void ast_patch_ahb_2500(struct ast_private *ast)
void ast_patch_ahb_2500(struct ast_device *ast)
{
u32 data;
@@ -2066,7 +2066,7 @@ void ast_patch_ahb_2500(struct ast_private *ast)
void ast_post_chip_2500(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
struct ast_device *ast = to_ast_device(dev);
u32 temp;
u8 reg;

View File

@@ -326,7 +326,7 @@ config DRM_TI_DLPC3433
input that produces a DMD output in RGB565, RGB666, RGB888
formats.
It supports upto 720p resolution with 60 and 120 Hz refresh
It supports up to 720p resolution with 60 and 120 Hz refresh
rates.
config DRM_TI_TFP410

View File

@@ -81,6 +81,8 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
return ret;
}
drm_panel_bridge_set_orientation(connector, bridge);
drm_connector_attach_encoder(&panel_bridge->connector,
bridge->encoder);

View File

@@ -229,6 +229,7 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
ctx->bridge.funcs = &tc358762_bridge_funcs;
ctx->bridge.type = DRM_MODE_CONNECTOR_DPI;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
drm_bridge_add(&ctx->bridge);

View File

@@ -2702,6 +2702,11 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs->atomic_disable(plane, old_state);
} else if (new_plane_state->crtc || disabling) {
funcs->atomic_update(plane, old_state);
if (!disabling && funcs->atomic_enable) {
if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
funcs->atomic_enable(plane, old_state);
}
}
}
@@ -2762,6 +2767,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
struct drm_plane_state *new_plane_state =
drm_atomic_get_new_plane_state(old_state, plane);
const struct drm_plane_helper_funcs *plane_funcs;
bool disabling;
plane_funcs = plane->helper_private;
@@ -2771,12 +2777,18 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
WARN_ON(new_plane_state->crtc &&
new_plane_state->crtc != crtc);
if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
plane_funcs->atomic_disable)
disabling = drm_atomic_plane_disabling(old_plane_state, new_plane_state);
if (disabling && plane_funcs->atomic_disable) {
plane_funcs->atomic_disable(plane, old_state);
else if (new_plane_state->crtc ||
drm_atomic_plane_disabling(old_plane_state, new_plane_state))
} else if (new_plane_state->crtc || disabling) {
plane_funcs->atomic_update(plane, old_state);
if (!disabling && plane_funcs->atomic_enable) {
if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
plane_funcs->atomic_enable(plane, old_state);
}
}
}
if (crtc_funcs && crtc_funcs->atomic_flush)

View File

@@ -33,9 +33,11 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_utils.h>
#include <linux/fb.h>
#include <linux/property.h>
#include <linux/uaccess.h>
#include <video/cmdline.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -154,9 +156,10 @@ EXPORT_SYMBOL(drm_get_connector_type_name);
static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
{
struct drm_cmdline_mode *mode = &connector->cmdline_mode;
char *option = NULL;
const char *option;
if (fb_get_options(connector->name, &option))
option = video_get_options(connector->name);
if (!option)
return;
if (!drm_mode_parse_command_line_for_connector(option,
@@ -1446,6 +1449,20 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* a firmware handled hotkey. Therefor userspace must not include the
* privacy-screen sw-state in an atomic commit unless it wants to change
* its value.
*
* left margin, right margin, top margin, bottom margin:
* Add margins to the connector's viewport. This is typically used to
* mitigate underscan on TVs.
*
* The value is the size in pixels of the black border which will be
* added. The attached CRTC's content will be scaled to fill the whole
* area inside the margin.
*
* The margins configuration might be sent to the sink, e.g. via HDMI AVI
* InfoFrames.
*
* Drivers can set up these properties by calling
* drm_mode_create_tv_margin_properties().
*/
int drm_connector_create_standard_properties(struct drm_device *dev)
@@ -1590,10 +1607,6 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
/*
* TODO: Document the properties:
* - left margin
* - right margin
* - top margin
* - bottom margin
* - brightness
* - contrast
* - flicker reduction
@@ -1602,7 +1615,6 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
* - overscan
* - saturation
* - select subconnector
* - subconnector
*/
/**
* DOC: Analog TV Connector Properties

View File

@@ -7,13 +7,29 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
static int validate_displayid(const u8 *displayid, int length, int idx)
static const struct displayid_header *
displayid_get_header(const u8 *displayid, int length, int index)
{
const struct displayid_header *base;
if (sizeof(*base) > length - index)
return ERR_PTR(-EINVAL);
base = (const struct displayid_header *)&displayid[index];
return base;
}
static const struct displayid_header *
validate_displayid(const u8 *displayid, int length, int idx)
{
int i, dispid_length;
u8 csum = 0;
const struct displayid_header *base;
base = (const struct displayid_header *)&displayid[idx];
base = displayid_get_header(displayid, length, idx);
if (IS_ERR(base))
return base;
DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
base->rev, base->bytes, base->prod_id, base->ext_count);
@@ -21,16 +37,16 @@ static int validate_displayid(const u8 *displayid, int length, int idx)
/* +1 for DispID checksum */
dispid_length = sizeof(*base) + base->bytes + 1;
if (dispid_length > length - idx)
return -EINVAL;
return ERR_PTR(-EINVAL);
for (i = 0; i < dispid_length; i++)
csum += displayid[idx + i];
if (csum) {
DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
return -EINVAL;
return ERR_PTR(-EINVAL);
}
return 0;
return base;
}
static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
@@ -39,7 +55,6 @@ static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
{
const u8 *displayid = drm_find_edid_extension(drm_edid, DISPLAYID_EXT, ext_index);
const struct displayid_header *base;
int ret;
if (!displayid)
return NULL;
@@ -48,11 +63,10 @@ static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
*length = EDID_LENGTH - 1;
*idx = 1;
ret = validate_displayid(displayid, *length, *idx);
if (ret)
base = validate_displayid(displayid, *length, *idx);
if (IS_ERR(base))
return NULL;
base = (const struct displayid_header *)&displayid[*idx];
*length = *idx + sizeof(*base) + base->bytes;
return displayid;
@@ -109,6 +123,9 @@ __displayid_iter_next(struct displayid_iter *iter)
}
for (;;) {
/* The first section we encounter is the base section */
bool base_section = !iter->section;
iter->section = drm_find_displayid_extension(iter->drm_edid,
&iter->length,
&iter->idx,
@@ -118,6 +135,18 @@ __displayid_iter_next(struct displayid_iter *iter)
return NULL;
}
/* Save the structure version and primary use case. */
if (base_section) {
const struct displayid_header *base;
base = displayid_get_header(iter->section, iter->length,
iter->idx);
if (!IS_ERR(base)) {
iter->version = base->rev;
iter->primary_use = base->prod_id;
}
}
iter->idx += sizeof(struct displayid_header);
block = displayid_iter_block(iter);
@@ -130,3 +159,18 @@ void displayid_iter_end(struct displayid_iter *iter)
{
memset(iter, 0, sizeof(*iter));
}
/* DisplayID Structure Version/Revision from the Base Section. */
u8 displayid_version(const struct displayid_iter *iter)
{
return iter->version;
}
/*
* DisplayID Primary Use Case (2.0+) or Product Type Identifier (1.0-1.3) from
* the Base Section.
*/
u8 displayid_primary_use(const struct displayid_iter *iter)
{
return iter->primary_use;
}

View File

@@ -139,10 +139,7 @@ int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
if (!dev->driver->dumb_create)
return -ENOSYS;
if (dev->driver->dumb_destroy)
return dev->driver->dumb_destroy(file_priv, dev, handle);
else
return drm_gem_dumb_destroy(file_priv, dev, handle);
return drm_gem_handle_delete(file_priv, handle);
}
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,

View File

@@ -3424,10 +3424,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
connector->base.id, connector->name);
return NULL;
}
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n",
connector->base.id, connector->name);
}
/* it is incorrect if hsync/vsync width is zero */
if (!hsync_pulse_width || !vsync_pulse_width) {
@@ -3474,10 +3470,27 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
} else {
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
switch (pt->misc & DRM_EDID_PT_SYNC_MASK) {
case DRM_EDID_PT_ANALOG_CSYNC:
case DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC:
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Analog composite sync!\n",
connector->base.id, connector->name);
mode->flags |= DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC;
break;
case DRM_EDID_PT_DIGITAL_CSYNC:
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Digital composite sync!\n",
connector->base.id, connector->name);
mode->flags |= DRM_MODE_FLAG_CSYNC;
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
DRM_MODE_FLAG_PCSYNC : DRM_MODE_FLAG_NCSYNC;
break;
case DRM_EDID_PT_DIGITAL_SEPARATE_SYNC:
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
break;
}
}
set_size:
@@ -6433,6 +6446,29 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->quirks = 0;
}
static void update_displayid_info(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
struct drm_display_info *info = &connector->display_info;
const struct displayid_block *block;
struct displayid_iter iter;
displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
if (displayid_version(&iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
(displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_VR ||
displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_AR))
info->non_desktop = true;
/*
* We're only interested in the base section here, no need to
* iterate further.
*/
break;
}
displayid_iter_end(&iter);
}
static void update_display_info(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
@@ -6463,6 +6499,8 @@ static void update_display_info(struct drm_connector *connector,
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
drm_parse_cea_ext(connector, drm_edid);
update_displayid_info(connector, drm_edid);
/*
* Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
*
@@ -7242,6 +7280,15 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
}
}
static bool displayid_is_tiled_block(const struct displayid_iter *iter,
const struct displayid_block *block)
{
return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 &&
block->tag == DATA_BLOCK_TILED_DISPLAY) ||
(displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY);
}
static void _drm_update_tile_info(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
@@ -7252,7 +7299,7 @@ static void _drm_update_tile_info(struct drm_connector *connector,
displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
if (block->tag == DATA_BLOCK_TILED_DISPLAY)
if (displayid_is_tiled_block(&iter, block))
drm_parse_tiled_block(connector, block);
}
displayid_iter_end(&iter);

View File

@@ -336,13 +336,6 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
u32 handle)
{
return drm_gem_handle_delete(file, handle);
}
/**
* drm_gem_handle_create_tail - internal functions to create a handle
* @file_priv: drm file-private structure to register the handle for
@@ -1466,3 +1459,21 @@ drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
return freed;
}
EXPORT_SYMBOL(drm_gem_lru_scan);
/**
* drm_gem_evict - helper to evict backing pages for a GEM object
* @obj: obj in question
*/
int drm_gem_evict(struct drm_gem_object *obj)
{
dma_resv_assert_held(obj->resv);
if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ))
return -EBUSY;
if (obj->funcs->evict)
return obj->funcs->evict(obj);
return 0;
}
EXPORT_SYMBOL(drm_gem_evict);

View File

@@ -141,7 +141,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
WARN_ON(shmem->vmap_use_count);
drm_WARN_ON(obj->dev, shmem->vmap_use_count);
if (obj->import_attach) {
drm_prime_gem_destroy(obj, shmem->sgt);
@@ -156,7 +156,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
drm_gem_shmem_put_pages(shmem);
}
WARN_ON(shmem->pages_use_count);
drm_WARN_ON(obj->dev, shmem->pages_use_count);
drm_gem_object_release(obj);
mutex_destroy(&shmem->pages_lock);
@@ -175,7 +175,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
PTR_ERR(pages));
shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
@@ -207,9 +208,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
*/
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
int ret;
WARN_ON(shmem->base.import_attach);
drm_WARN_ON(obj->dev, obj->import_attach);
ret = mutex_lock_interruptible(&shmem->pages_lock);
if (ret)
@@ -225,7 +227,7 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
if (WARN_ON_ONCE(!shmem->pages_use_count))
if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
return;
if (--shmem->pages_use_count > 0)
@@ -268,7 +270,9 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages);
*/
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
{
WARN_ON(shmem->base.import_attach);
struct drm_gem_object *obj = &shmem->base;
drm_WARN_ON(obj->dev, obj->import_attach);
return drm_gem_shmem_get_pages(shmem);
}
@@ -283,7 +287,9 @@ EXPORT_SYMBOL(drm_gem_shmem_pin);
*/
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
{
WARN_ON(shmem->base.import_attach);
struct drm_gem_object *obj = &shmem->base;
drm_WARN_ON(obj->dev, obj->import_attach);
drm_gem_shmem_put_pages(shmem);
}
@@ -295,24 +301,22 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
if (shmem->vmap_use_count++ > 0) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
}
if (obj->import_attach) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
if (WARN_ON(map->is_iomem)) {
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
ret = -EIO;
goto err_put_pages;
return -EIO;
}
shmem->vaddr = map->vaddr;
}
} else {
pgprot_t prot = PAGE_KERNEL;
if (shmem->vmap_use_count++ > 0) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
}
ret = drm_gem_shmem_get_pages(shmem);
if (ret)
goto err_zero_use;
@@ -328,7 +332,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
}
if (ret) {
DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
goto err_put_pages;
}
@@ -378,15 +382,15 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
{
struct drm_gem_object *obj = &shmem->base;
if (WARN_ON_ONCE(!shmem->vmap_use_count))
return;
if (--shmem->vmap_use_count > 0)
return;
if (obj->import_attach) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
return;
if (--shmem->vmap_use_count > 0)
return;
vunmap(shmem->vaddr);
drm_gem_shmem_put_pages(shmem);
}
@@ -461,7 +465,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
@@ -550,7 +554,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
mutex_lock(&shmem->pages_lock);
if (page_offset >= num_pages ||
WARN_ON_ONCE(!shmem->pages) ||
drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
shmem->madv < 0) {
ret = VM_FAULT_SIGBUS;
} else {
@@ -569,7 +573,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
WARN_ON(shmem->base.import_attach);
drm_WARN_ON(obj->dev, obj->import_attach);
mutex_lock(&shmem->pages_lock);
@@ -578,7 +582,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
if (!WARN_ON_ONCE(!shmem->pages_use_count))
if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
shmem->pages_use_count++;
mutex_unlock(&shmem->pages_lock);
@@ -648,6 +652,9 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
struct drm_printer *p, unsigned int indent)
{
if (shmem->base.import_attach)
return;
drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
@@ -672,7 +679,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
WARN_ON(shmem->base.import_attach);
drm_WARN_ON(obj->dev, obj->import_attach);
return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
}
@@ -687,7 +694,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
if (shmem->sgt)
return shmem->sgt;
WARN_ON(obj->import_attach);
drm_WARN_ON(obj->dev, obj->import_attach);
ret = drm_gem_shmem_get_pages_locked(shmem);
if (ret)

View File

@@ -916,6 +916,17 @@ static int bo_driver_move(struct ttm_buffer_object *bo,
{
struct drm_gem_vram_object *gbo;
if (!bo->resource) {
if (new_mem->mem_type != TTM_PL_SYSTEM) {
hop->mem_type = TTM_PL_SYSTEM;
hop->flags = TTM_PL_FLAG_TEMPORARY;
return -EMULTIHOP;
}
ttm_bo_move_null(bo, new_mem);
return 0;
}
gbo = drm_gem_vram_of_bo(bo);
return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);

View File

@@ -178,9 +178,6 @@ void drm_gem_unpin(struct drm_gem_object *obj);
int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
u32 handle);
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
int drm_debugfs_init(struct drm_minor *minor, int minor_id,

View File

@@ -2339,8 +2339,7 @@ static int drm_mode_parse_cmdline_named_mode(const char *name,
* @mode: preallocated drm_cmdline_mode structure to fill out
*
* This parses @mode_option command line modeline for modes and options to
* configure the connector. If @mode_option is NULL the default command line
* modeline in fb_mode_option will be parsed instead.
* configure the connector.
*
* This uses the same parameters as the fb modedb.c, except for an extra
* force-enable, force-enable-digital and force-disable bit at the end::

View File

@@ -10,6 +10,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -493,3 +494,53 @@ int drm_of_get_data_lanes_count_ep(const struct device_node *port,
return ret;
}
EXPORT_SYMBOL_GPL(drm_of_get_data_lanes_count_ep);
#if IS_ENABLED(CONFIG_DRM_MIPI_DSI)
/**
* drm_of_get_dsi_bus - find the DSI bus for a given device
* @dev: parent device of display (SPI, I2C)
*
* Gets parent DSI bus for a DSI device controlled through a bus other
* than MIPI-DCS (SPI, I2C, etc.) using the Device Tree.
*
* Returns pointer to mipi_dsi_host if successful, -EINVAL if the
* request is unsupported, -EPROBE_DEFER if the DSI host is found but
* not available, or -ENODEV otherwise.
*/
struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev)
{
struct mipi_dsi_host *dsi_host;
struct device_node *endpoint, *dsi_host_node;
/*
* Get first endpoint child from device.
*/
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
if (!endpoint)
return ERR_PTR(-ENODEV);
/*
* Follow the first endpoint to get the DSI host node and then
* release the endpoint since we no longer need it.
*/
dsi_host_node = of_graph_get_remote_port_parent(endpoint);
of_node_put(endpoint);
if (!dsi_host_node)
return ERR_PTR(-ENODEV);
/*
* Get the DSI host from the DSI host node. If we get an error
* or the return is null assume we're not ready to probe just
* yet. Release the DSI host node since we're done with it.
*/
dsi_host = of_find_mipi_dsi_host_by_node(dsi_host_node);
of_node_put(dsi_host_node);
if (IS_ERR_OR_NULL(dsi_host))
return ERR_PTR(-EPROBE_DEFER);
return dsi_host;
}
EXPORT_SYMBOL_GPL(drm_of_get_dsi_bus);
#endif /* CONFIG_DRM_MIPI_DSI */

View File

@@ -590,8 +590,9 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
*/
dev->mode_config.delayed_event = true;
if (dev->mode_config.poll_enabled)
schedule_delayed_work(&dev->mode_config.output_poll_work,
0);
mod_delayed_work(system_wq,
&dev->mode_config.output_poll_work,
0);
}
/* Re-enable polling in case the global poll config changed. */

View File

@@ -0,0 +1,457 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2011 Red Hat Inc.
* Copyright 2023 Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/* Algorithm:
*
* We store the last allocated bo in "hole", we always try to allocate
* after the last allocated bo. Principle is that in a linear GPU ring
* progression was is after last is the oldest bo we allocated and thus
* the first one that should no longer be in use by the GPU.
*
* If it's not the case we skip over the bo after last to the closest
* done bo if such one exist. If none exist and we are not asked to
* block we report failure to allocate.
*
* If we are asked to block we wait on all the oldest fence of all
* rings. We just wait for any of those fence to complete.
*/
#include <drm/drm_suballoc.h>
#include <drm/drm_print.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/dma-fence.h>
static void drm_suballoc_remove_locked(struct drm_suballoc *sa);
static void drm_suballoc_try_free(struct drm_suballoc_manager *sa_manager);
/**
* drm_suballoc_manager_init() - Initialise the drm_suballoc_manager
* @sa_manager: pointer to the sa_manager
* @size: number of bytes we want to suballocate
* @align: alignment for each suballocated chunk
*
* Prepares the suballocation manager for suballocations.
*/
void drm_suballoc_manager_init(struct drm_suballoc_manager *sa_manager,
size_t size, size_t align)
{
unsigned int i;
BUILD_BUG_ON(!is_power_of_2(DRM_SUBALLOC_MAX_QUEUES));
if (!align)
align = 1;
/* alignment must be a power of 2 */
if (WARN_ON_ONCE(align & (align - 1)))
align = roundup_pow_of_two(align);
init_waitqueue_head(&sa_manager->wq);
sa_manager->size = size;
sa_manager->align = align;
sa_manager->hole = &sa_manager->olist;
INIT_LIST_HEAD(&sa_manager->olist);
for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
INIT_LIST_HEAD(&sa_manager->flist[i]);
}
EXPORT_SYMBOL(drm_suballoc_manager_init);
/**
* drm_suballoc_manager_fini() - Destroy the drm_suballoc_manager
* @sa_manager: pointer to the sa_manager
*
* Cleans up the suballocation manager after use. All fences added
* with drm_suballoc_free() must be signaled, or we cannot clean up
* the entire manager.
*/
void drm_suballoc_manager_fini(struct drm_suballoc_manager *sa_manager)
{
struct drm_suballoc *sa, *tmp;
if (!sa_manager->size)
return;
if (!list_empty(&sa_manager->olist)) {
sa_manager->hole = &sa_manager->olist;
drm_suballoc_try_free(sa_manager);
if (!list_empty(&sa_manager->olist))
DRM_ERROR("sa_manager is not empty, clearing anyway\n");
}
list_for_each_entry_safe(sa, tmp, &sa_manager->olist, olist) {
drm_suballoc_remove_locked(sa);
}
sa_manager->size = 0;
}
EXPORT_SYMBOL(drm_suballoc_manager_fini);
static void drm_suballoc_remove_locked(struct drm_suballoc *sa)
{
struct drm_suballoc_manager *sa_manager = sa->manager;
if (sa_manager->hole == &sa->olist)
sa_manager->hole = sa->olist.prev;
list_del_init(&sa->olist);
list_del_init(&sa->flist);
dma_fence_put(sa->fence);
kfree(sa);
}
static void drm_suballoc_try_free(struct drm_suballoc_manager *sa_manager)
{
struct drm_suballoc *sa, *tmp;
if (sa_manager->hole->next == &sa_manager->olist)
return;
sa = list_entry(sa_manager->hole->next, struct drm_suballoc, olist);
list_for_each_entry_safe_from(sa, tmp, &sa_manager->olist, olist) {
if (!sa->fence || !dma_fence_is_signaled(sa->fence))
return;
drm_suballoc_remove_locked(sa);
}
}
static size_t drm_suballoc_hole_soffset(struct drm_suballoc_manager *sa_manager)
{
struct list_head *hole = sa_manager->hole;
if (hole != &sa_manager->olist)
return list_entry(hole, struct drm_suballoc, olist)->eoffset;
return 0;
}
static size_t drm_suballoc_hole_eoffset(struct drm_suballoc_manager *sa_manager)
{
struct list_head *hole = sa_manager->hole;
if (hole->next != &sa_manager->olist)
return list_entry(hole->next, struct drm_suballoc, olist)->soffset;
return sa_manager->size;
}
static bool drm_suballoc_try_alloc(struct drm_suballoc_manager *sa_manager,
struct drm_suballoc *sa,
size_t size, size_t align)
{
size_t soffset, eoffset, wasted;
soffset = drm_suballoc_hole_soffset(sa_manager);
eoffset = drm_suballoc_hole_eoffset(sa_manager);
wasted = round_up(soffset, align) - soffset;
if ((eoffset - soffset) >= (size + wasted)) {
soffset += wasted;
sa->manager = sa_manager;
sa->soffset = soffset;
sa->eoffset = soffset + size;
list_add(&sa->olist, sa_manager->hole);
INIT_LIST_HEAD(&sa->flist);
sa_manager->hole = &sa->olist;
return true;
}
return false;
}
static bool __drm_suballoc_event(struct drm_suballoc_manager *sa_manager,
size_t size, size_t align)
{
size_t soffset, eoffset, wasted;
unsigned int i;
for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
if (!list_empty(&sa_manager->flist[i]))
return true;
soffset = drm_suballoc_hole_soffset(sa_manager);
eoffset = drm_suballoc_hole_eoffset(sa_manager);
wasted = round_up(soffset, align) - soffset;
return ((eoffset - soffset) >= (size + wasted));
}
/**
* drm_suballoc_event() - Check if we can stop waiting
* @sa_manager: pointer to the sa_manager
* @size: number of bytes we want to allocate
* @align: alignment we need to match
*
* Return: true if either there is a fence we can wait for or
* enough free memory to satisfy the allocation directly.
* false otherwise.
*/
static bool drm_suballoc_event(struct drm_suballoc_manager *sa_manager,
size_t size, size_t align)
{
bool ret;
spin_lock(&sa_manager->wq.lock);
ret = __drm_suballoc_event(sa_manager, size, align);
spin_unlock(&sa_manager->wq.lock);
return ret;
}
static bool drm_suballoc_next_hole(struct drm_suballoc_manager *sa_manager,
struct dma_fence **fences,
unsigned int *tries)
{
struct drm_suballoc *best_bo = NULL;
unsigned int i, best_idx;
size_t soffset, best, tmp;
/* if hole points to the end of the buffer */
if (sa_manager->hole->next == &sa_manager->olist) {
/* try again with its beginning */
sa_manager->hole = &sa_manager->olist;
return true;
}
soffset = drm_suballoc_hole_soffset(sa_manager);
/* to handle wrap around we add sa_manager->size */
best = sa_manager->size * 2;
/* go over all fence list and try to find the closest sa
* of the current last
*/
for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i) {
struct drm_suballoc *sa;
fences[i] = NULL;
if (list_empty(&sa_manager->flist[i]))
continue;
sa = list_first_entry(&sa_manager->flist[i],
struct drm_suballoc, flist);
if (!dma_fence_is_signaled(sa->fence)) {
fences[i] = sa->fence;
continue;
}
/* limit the number of tries each freelist gets */
if (tries[i] > 2)
continue;
tmp = sa->soffset;
if (tmp < soffset) {
/* wrap around, pretend it's after */
tmp += sa_manager->size;
}
tmp -= soffset;
if (tmp < best) {
/* this sa bo is the closest one */
best = tmp;
best_idx = i;
best_bo = sa;
}
}
if (best_bo) {
++tries[best_idx];
sa_manager->hole = best_bo->olist.prev;
/*
* We know that this one is signaled,
* so it's safe to remove it.
*/
drm_suballoc_remove_locked(best_bo);
return true;
}
return false;
}
/**
* drm_suballoc_new() - Make a suballocation.
* @sa_manager: pointer to the sa_manager
* @size: number of bytes we want to suballocate.
* @gfp: gfp flags used for memory allocation. Typically GFP_KERNEL but
* the argument is provided for suballocations from reclaim context or
* where the caller wants to avoid pipelining rather than wait for
* reclaim.
* @intr: Whether to perform waits interruptible. This should typically
* always be true, unless the caller needs to propagate a
* non-interruptible context from above layers.
* @align: Alignment. Must not exceed the default manager alignment.
* If @align is zero, then the manager alignment is used.
*
* Try to make a suballocation of size @size, which will be rounded
* up to the alignment specified in specified in drm_suballoc_manager_init().
*
* Return: a new suballocated bo, or an ERR_PTR.
*/
struct drm_suballoc *
drm_suballoc_new(struct drm_suballoc_manager *sa_manager, size_t size,
gfp_t gfp, bool intr, size_t align)
{
struct dma_fence *fences[DRM_SUBALLOC_MAX_QUEUES];
unsigned int tries[DRM_SUBALLOC_MAX_QUEUES];
unsigned int count;
int i, r;
struct drm_suballoc *sa;
if (WARN_ON_ONCE(align > sa_manager->align))
return ERR_PTR(-EINVAL);
if (WARN_ON_ONCE(size > sa_manager->size || !size))
return ERR_PTR(-EINVAL);
if (!align)
align = sa_manager->align;
sa = kmalloc(sizeof(*sa), gfp);
if (!sa)
return ERR_PTR(-ENOMEM);
sa->manager = sa_manager;
sa->fence = NULL;
INIT_LIST_HEAD(&sa->olist);
INIT_LIST_HEAD(&sa->flist);
spin_lock(&sa_manager->wq.lock);
do {
for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
tries[i] = 0;
do {
drm_suballoc_try_free(sa_manager);
if (drm_suballoc_try_alloc(sa_manager, sa,
size, align)) {
spin_unlock(&sa_manager->wq.lock);
return sa;
}
/* see if we can skip over some allocations */
} while (drm_suballoc_next_hole(sa_manager, fences, tries));
for (i = 0, count = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
if (fences[i])
fences[count++] = dma_fence_get(fences[i]);
if (count) {
long t;
spin_unlock(&sa_manager->wq.lock);
t = dma_fence_wait_any_timeout(fences, count, intr,
MAX_SCHEDULE_TIMEOUT,
NULL);
for (i = 0; i < count; ++i)
dma_fence_put(fences[i]);
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
} else if (intr) {
/* if we have nothing to wait for block */
r = wait_event_interruptible_locked
(sa_manager->wq,
__drm_suballoc_event(sa_manager, size, align));
} else {
spin_unlock(&sa_manager->wq.lock);
wait_event(sa_manager->wq,
drm_suballoc_event(sa_manager, size, align));
r = 0;
spin_lock(&sa_manager->wq.lock);
}
} while (!r);
spin_unlock(&sa_manager->wq.lock);
kfree(sa);
return ERR_PTR(r);
}
EXPORT_SYMBOL(drm_suballoc_new);
/**
* drm_suballoc_free - Free a suballocation
* @suballoc: pointer to the suballocation
* @fence: fence that signals when suballocation is idle
*
* Free the suballocation. The suballocation can be re-used after @fence signals.
*/
void drm_suballoc_free(struct drm_suballoc *suballoc,
struct dma_fence *fence)
{
struct drm_suballoc_manager *sa_manager;
if (!suballoc)
return;
sa_manager = suballoc->manager;
spin_lock(&sa_manager->wq.lock);
if (fence && !dma_fence_is_signaled(fence)) {
u32 idx;
suballoc->fence = dma_fence_get(fence);
idx = fence->context & (DRM_SUBALLOC_MAX_QUEUES - 1);
list_add_tail(&suballoc->flist, &sa_manager->flist[idx]);
} else {
drm_suballoc_remove_locked(suballoc);
}
wake_up_all_locked(&sa_manager->wq);
spin_unlock(&sa_manager->wq.lock);
}
EXPORT_SYMBOL(drm_suballoc_free);
#ifdef CONFIG_DEBUG_FS
void drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager,
struct drm_printer *p,
unsigned long long suballoc_base)
{
struct drm_suballoc *i;
spin_lock(&sa_manager->wq.lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
unsigned long long soffset = i->soffset;
unsigned long long eoffset = i->eoffset;
if (&i->olist == sa_manager->hole)
drm_puts(p, ">");
else
drm_puts(p, " ");
drm_printf(p, "[0x%010llx 0x%010llx] size %8lld",
suballoc_base + soffset, suballoc_base + eoffset,
eoffset - soffset);
if (i->fence)
drm_printf(p, " protected by 0x%016llx on context %llu",
(unsigned long long)i->fence->seqno,
(unsigned long long)i->fence->context);
drm_puts(p, "\n");
}
spin_unlock(&sa_manager->wq.lock);
}
EXPORT_SYMBOL(drm_suballoc_dump_debug_info);
#endif
MODULE_AUTHOR("Multiple");
MODULE_DESCRIPTION("Range suballocator helper");
MODULE_LICENSE("Dual MIT/GPL");

View File

@@ -472,7 +472,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
struct ttm_placement place = {};
int ret;
if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
if (!bo->ttm || i915_ttm_cpu_maps_iomem(bo->resource))
return 0;
GEM_BUG_ON(!i915_tt->is_shmem);
@@ -511,7 +511,13 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
if (bo->resource && !i915_ttm_is_ghost_object(bo)) {
/*
* This gets called twice by ttm, so long as we have a ttm resource or
* ttm_tt then we can still safely call this. Due to pipeline-gutting,
* we maybe have NULL bo->resource, but in that case we should always
* have a ttm alive (like if the pages are swapped out).
*/
if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) {
__i915_gem_object_pages_fini(obj);
i915_ttm_free_cached_io_rsgt(obj);
}
@@ -1067,11 +1073,12 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
.interruptible = true,
.no_wait_gpu = true, /* should be idle already */
};
int err;
GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
ret = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
if (ret) {
err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
if (err) {
dma_resv_unlock(bo->base.resv);
return VM_FAULT_SIGBUS;
}

View File

@@ -98,7 +98,7 @@ static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem)
static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
{
/* Once / if we support GGTT, this is also false for cached ttm_tts */
return mem->mem_type != I915_PL_SYSTEM;
return mem && mem->mem_type != I915_PL_SYSTEM;
}
bool i915_ttm_resource_mappable(struct ttm_resource *res);

View File

@@ -711,6 +711,10 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
assert_object_held(dst);
assert_object_held(src);
if (GEM_WARN_ON(!src_bo->resource || !dst_bo->resource))
return -EINVAL;
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
ret = dma_resv_reserve_fences(src_bo->base.resv, 1);

View File

@@ -53,7 +53,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
unsigned int flags;
int err = 0;
if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup)
return 0;
if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
@@ -187,7 +187,10 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
return err;
/* Content may have been swapped. */
err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
if (!backup_bo->resource)
err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx);
if (!err)
err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
if (!err) {
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
false);

View File

@@ -866,10 +866,10 @@ meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))
return MODE_BAD;
if (mode->hdisplay < 640 || mode->hdisplay > 1920)
if (mode->hdisplay < 400 || mode->hdisplay > 1920)
return MODE_BAD_HVALUE;
if (mode->vdisplay < 480 || mode->vdisplay > 1200)
if (mode->vdisplay < 480 || mode->vdisplay > 1920)
return MODE_BAD_VVALUE;
return MODE_OK;

View File

@@ -375,12 +375,15 @@ int mgag200_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state);
void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *old_state);
void mgag200_primary_plane_helper_atomic_enable(struct drm_plane *plane,
struct drm_atomic_state *state);
void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *old_state);
#define MGAG200_PRIMARY_PLANE_HELPER_FUNCS \
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
.atomic_check = mgag200_primary_plane_helper_atomic_check, \
.atomic_update = mgag200_primary_plane_helper_atomic_update, \
.atomic_enable = mgag200_primary_plane_helper_atomic_enable, \
.atomic_disable = mgag200_primary_plane_helper_atomic_disable
#define MGAG200_PRIMARY_PLANE_FUNCS \

View File

@@ -501,10 +501,6 @@ void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_framebuffer *fb = plane_state->fb;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
u8 seq1;
if (!fb)
return;
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
@@ -514,13 +510,19 @@ void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
/* Always scanout image at VRAM offset 0 */
mgag200_set_startadd(mdev, (u32)0);
mgag200_set_offset(mdev, fb);
}
if (!old_plane_state->crtc && plane_state->crtc) { // enabling
RREG_SEQ(0x01, seq1);
seq1 &= ~MGAREG_SEQ1_SCROFF;
WREG_SEQ(0x01, seq1);
msleep(20);
}
void mgag200_primary_plane_helper_atomic_enable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *dev = plane->dev;
struct mga_device *mdev = to_mga_device(dev);
u8 seq1;
RREG_SEQ(0x01, seq1);
seq1 &= ~MGAREG_SEQ1_SCROFF;
WREG_SEQ(0x01, seq1);
msleep(20);
}
void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,

View File

@@ -1015,9 +1015,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
if (ret)
goto out_ntfy;
if (nvbo->bo.pin_count)
NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
if (ret)

View File

@@ -41,7 +41,7 @@ static ssize_t
nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d,
struct device_attribute *a, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", 100);
return sysfs_emit(buf, "%d\n", 100);
}
static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, 0444,
nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0);
@@ -54,8 +54,8 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
return sysfs_emit(buf, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
}
static ssize_t
nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
@@ -87,8 +87,8 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
return sysfs_emit(buf, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
}
static ssize_t
nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,

View File

@@ -27,7 +27,7 @@
#include "nouveau_drv.h"
struct led_classdev;
#include <linux/leds.h>
struct nouveau_led {
struct drm_device *dev;

View File

@@ -318,6 +318,17 @@ config DRM_PANEL_LG_LG4573
Say Y here if you want to enable support for LG4573 RGB panel.
To compile this driver as a module, choose M here.
config DRM_PANEL_MAGNACHIP_D53E6EA8966
tristate "Magnachip D53E6EA8966 DSI panel"
depends on OF && SPI
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
select DRM_MIPI_DBI
help
DRM panel driver for the Samsung AMS495QA01 panel controlled
with the Magnachip D53E6EA8966 panel IC. This panel receives
video data via DSI but commands via 9-bit SPI using DBI.
config DRM_PANEL_NEC_NL8048HL11
tristate "NEC NL8048HL11 RGB panel"
depends on GPIOLIB && OF && SPI

View File

@@ -29,6 +29,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966) += panel-magnachip-d53e6ea8966.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3051D) += panel-newvision-nv3051d.o
obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3052C) += panel-newvision-nv3052c.o

View File

@@ -167,6 +167,202 @@ static const struct drm_panel_funcs jadard_funcs = {
.get_modes = jadard_get_modes,
};
static const struct jadard_init_cmd radxa_display_8hd_ad002_init_cmds[] = {
{ .data = { 0xE0, 0x00 } },
{ .data = { 0xE1, 0x93 } },
{ .data = { 0xE2, 0x65 } },
{ .data = { 0xE3, 0xF8 } },
{ .data = { 0x80, 0x03 } },
{ .data = { 0xE0, 0x01 } },
{ .data = { 0x00, 0x00 } },
{ .data = { 0x01, 0x7E } },
{ .data = { 0x03, 0x00 } },
{ .data = { 0x04, 0x65 } },
{ .data = { 0x0C, 0x74 } },
{ .data = { 0x17, 0x00 } },
{ .data = { 0x18, 0xB7 } },
{ .data = { 0x19, 0x00 } },
{ .data = { 0x1A, 0x00 } },
{ .data = { 0x1B, 0xB7 } },
{ .data = { 0x1C, 0x00 } },
{ .data = { 0x24, 0xFE } },
{ .data = { 0x37, 0x19 } },
{ .data = { 0x38, 0x05 } },
{ .data = { 0x39, 0x00 } },
{ .data = { 0x3A, 0x01 } },
{ .data = { 0x3B, 0x01 } },
{ .data = { 0x3C, 0x70 } },
{ .data = { 0x3D, 0xFF } },
{ .data = { 0x3E, 0xFF } },
{ .data = { 0x3F, 0xFF } },
{ .data = { 0x40, 0x06 } },
{ .data = { 0x41, 0xA0 } },
{ .data = { 0x43, 0x1E } },
{ .data = { 0x44, 0x0F } },
{ .data = { 0x45, 0x28 } },
{ .data = { 0x4B, 0x04 } },
{ .data = { 0x55, 0x02 } },
{ .data = { 0x56, 0x01 } },
{ .data = { 0x57, 0xA9 } },
{ .data = { 0x58, 0x0A } },
{ .data = { 0x59, 0x0A } },
{ .data = { 0x5A, 0x37 } },
{ .data = { 0x5B, 0x19 } },
{ .data = { 0x5D, 0x78 } },
{ .data = { 0x5E, 0x63 } },
{ .data = { 0x5F, 0x54 } },
{ .data = { 0x60, 0x49 } },
{ .data = { 0x61, 0x45 } },
{ .data = { 0x62, 0x38 } },
{ .data = { 0x63, 0x3D } },
{ .data = { 0x64, 0x28 } },
{ .data = { 0x65, 0x43 } },
{ .data = { 0x66, 0x41 } },
{ .data = { 0x67, 0x43 } },
{ .data = { 0x68, 0x62 } },
{ .data = { 0x69, 0x50 } },
{ .data = { 0x6A, 0x57 } },
{ .data = { 0x6B, 0x49 } },
{ .data = { 0x6C, 0x44 } },
{ .data = { 0x6D, 0x37 } },
{ .data = { 0x6E, 0x23 } },
{ .data = { 0x6F, 0x10 } },
{ .data = { 0x70, 0x78 } },
{ .data = { 0x71, 0x63 } },
{ .data = { 0x72, 0x54 } },
{ .data = { 0x73, 0x49 } },
{ .data = { 0x74, 0x45 } },
{ .data = { 0x75, 0x38 } },
{ .data = { 0x76, 0x3D } },
{ .data = { 0x77, 0x28 } },
{ .data = { 0x78, 0x43 } },
{ .data = { 0x79, 0x41 } },
{ .data = { 0x7A, 0x43 } },
{ .data = { 0x7B, 0x62 } },
{ .data = { 0x7C, 0x50 } },
{ .data = { 0x7D, 0x57 } },
{ .data = { 0x7E, 0x49 } },
{ .data = { 0x7F, 0x44 } },
{ .data = { 0x80, 0x37 } },
{ .data = { 0x81, 0x23 } },
{ .data = { 0x82, 0x10 } },
{ .data = { 0xE0, 0x02 } },
{ .data = { 0x00, 0x47 } },
{ .data = { 0x01, 0x47 } },
{ .data = { 0x02, 0x45 } },
{ .data = { 0x03, 0x45 } },
{ .data = { 0x04, 0x4B } },
{ .data = { 0x05, 0x4B } },
{ .data = { 0x06, 0x49 } },
{ .data = { 0x07, 0x49 } },
{ .data = { 0x08, 0x41 } },
{ .data = { 0x09, 0x1F } },
{ .data = { 0x0A, 0x1F } },
{ .data = { 0x0B, 0x1F } },
{ .data = { 0x0C, 0x1F } },
{ .data = { 0x0D, 0x1F } },
{ .data = { 0x0E, 0x1F } },
{ .data = { 0x0F, 0x5F } },
{ .data = { 0x10, 0x5F } },
{ .data = { 0x11, 0x57 } },
{ .data = { 0x12, 0x77 } },
{ .data = { 0x13, 0x35 } },
{ .data = { 0x14, 0x1F } },
{ .data = { 0x15, 0x1F } },
{ .data = { 0x16, 0x46 } },
{ .data = { 0x17, 0x46 } },
{ .data = { 0x18, 0x44 } },
{ .data = { 0x19, 0x44 } },
{ .data = { 0x1A, 0x4A } },
{ .data = { 0x1B, 0x4A } },
{ .data = { 0x1C, 0x48 } },
{ .data = { 0x1D, 0x48 } },
{ .data = { 0x1E, 0x40 } },
{ .data = { 0x1F, 0x1F } },
{ .data = { 0x20, 0x1F } },
{ .data = { 0x21, 0x1F } },
{ .data = { 0x22, 0x1F } },
{ .data = { 0x23, 0x1F } },
{ .data = { 0x24, 0x1F } },
{ .data = { 0x25, 0x5F } },
{ .data = { 0x26, 0x5F } },
{ .data = { 0x27, 0x57 } },
{ .data = { 0x28, 0x77 } },
{ .data = { 0x29, 0x35 } },
{ .data = { 0x2A, 0x1F } },
{ .data = { 0x2B, 0x1F } },
{ .data = { 0x58, 0x40 } },
{ .data = { 0x59, 0x00 } },
{ .data = { 0x5A, 0x00 } },
{ .data = { 0x5B, 0x10 } },
{ .data = { 0x5C, 0x06 } },
{ .data = { 0x5D, 0x40 } },
{ .data = { 0x5E, 0x01 } },
{ .data = { 0x5F, 0x02 } },
{ .data = { 0x60, 0x30 } },
{ .data = { 0x61, 0x01 } },
{ .data = { 0x62, 0x02 } },
{ .data = { 0x63, 0x03 } },
{ .data = { 0x64, 0x6B } },
{ .data = { 0x65, 0x05 } },
{ .data = { 0x66, 0x0C } },
{ .data = { 0x67, 0x73 } },
{ .data = { 0x68, 0x09 } },
{ .data = { 0x69, 0x03 } },
{ .data = { 0x6A, 0x56 } },
{ .data = { 0x6B, 0x08 } },
{ .data = { 0x6C, 0x00 } },
{ .data = { 0x6D, 0x04 } },
{ .data = { 0x6E, 0x04 } },
{ .data = { 0x6F, 0x88 } },
{ .data = { 0x70, 0x00 } },
{ .data = { 0x71, 0x00 } },
{ .data = { 0x72, 0x06 } },
{ .data = { 0x73, 0x7B } },
{ .data = { 0x74, 0x00 } },
{ .data = { 0x75, 0xF8 } },
{ .data = { 0x76, 0x00 } },
{ .data = { 0x77, 0xD5 } },
{ .data = { 0x78, 0x2E } },
{ .data = { 0x79, 0x12 } },
{ .data = { 0x7A, 0x03 } },
{ .data = { 0x7B, 0x00 } },
{ .data = { 0x7C, 0x00 } },
{ .data = { 0x7D, 0x03 } },
{ .data = { 0x7E, 0x7B } },
{ .data = { 0xE0, 0x04 } },
{ .data = { 0x00, 0x0E } },
{ .data = { 0x02, 0xB3 } },
{ .data = { 0x09, 0x60 } },
{ .data = { 0x0E, 0x2A } },
{ .data = { 0x36, 0x59 } },
{ .data = { 0xE0, 0x00 } },
};
static const struct jadard_panel_desc radxa_display_8hd_ad002_desc = {
.mode = {
.clock = 70000,
.hdisplay = 800,
.hsync_start = 800 + 40,
.hsync_end = 800 + 40 + 18,
.htotal = 800 + 40 + 18 + 20,
.vdisplay = 1280,
.vsync_start = 1280 + 20,
.vsync_end = 1280 + 20 + 4,
.vtotal = 1280 + 20 + 4 + 20,
.width_mm = 127,
.height_mm = 199,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
},
.lanes = 4,
.format = MIPI_DSI_FMT_RGB888,
.init_cmds = radxa_display_8hd_ad002_init_cmds,
.num_init_cmds = ARRAY_SIZE(radxa_display_8hd_ad002_init_cmds),
};
static const struct jadard_init_cmd cz101b4001_init_cmds[] = {
{ .data = { 0xE0, 0x00 } },
{ .data = { 0xE1, 0x93 } },
@@ -452,7 +648,18 @@ static void jadard_dsi_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id jadard_of_match[] = {
{ .compatible = "chongzhou,cz101b4001", .data = &cz101b4001_desc },
{
.compatible = "chongzhou,cz101b4001",
.data = &cz101b4001_desc
},
{
.compatible = "radxa,display-10hd-ad001",
.data = &cz101b4001_desc
},
{
.compatible = "radxa,display-8hd-ad002",
.data = &radxa_display_8hd_ad002_desc
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, jadard_of_match);

View File

@@ -0,0 +1,522 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Magnachip d53e6ea8966 MIPI-DSI panel driver
* Copyright (C) 2023 Chris Morgan
*/
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
/* Forward declaration for use in backlight function */
struct d53e6ea8966;
/* Panel info, unique to each panel */
struct d53e6ea8966_panel_info {
/** @display_modes: the supported display modes */
const struct drm_display_mode *display_modes;
/** @num_modes: the number of supported display modes */
unsigned int num_modes;
/** @width_mm: panel width in mm */
u16 width_mm;
/** @height_mm: panel height in mm */
u16 height_mm;
/** @bus_flags: drm bus flags for panel */
u32 bus_flags;
/** @panel_init_seq: panel specific init sequence */
void (*panel_init_seq)(struct d53e6ea8966 *db);
/** @backlight_register: panel backlight registration or NULL */
int (*backlight_register)(struct d53e6ea8966 *db);
};
struct d53e6ea8966 {
/** @dev: the container device */
struct device *dev;
/** @dbi: the DBI bus abstraction handle */
struct mipi_dbi dbi;
/** @panel: the DRM panel instance for this device */
struct drm_panel panel;
/** @reset: reset GPIO line */
struct gpio_desc *reset;
/** @enable: enable GPIO line */
struct gpio_desc *enable;
/** @reg_vdd: VDD supply regulator for panel logic */
struct regulator *reg_vdd;
/** @reg_elvdd: ELVDD supply regulator for panel display */
struct regulator *reg_elvdd;
/** @dsi_dev: DSI child device (panel) */
struct mipi_dsi_device *dsi_dev;
/** @bl_dev: pseudo-backlight device for oled panel */
struct backlight_device *bl_dev;
/** @panel_info: struct containing panel timing and info */
const struct d53e6ea8966_panel_info *panel_info;
};
#define NUM_GAMMA_LEVELS 16
#define GAMMA_TABLE_COUNT 23
#define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1)
#define MCS_ELVSS_ON 0xb1
#define MCS_TEMP_SWIRE 0xb2
#define MCS_PASSWORD_0 0xf0
#define MCS_PASSWORD_1 0xf1
#define MCS_ANALOG_PWR_CTL_0 0xf4
#define MCS_ANALOG_PWR_CTL_1 0xf5
#define MCS_GTCON_SET 0xf7
#define MCS_GATELESS_SIGNAL_SET 0xf8
#define MCS_SET_GAMMA 0xf9
static inline struct d53e6ea8966 *to_d53e6ea8966(struct drm_panel *panel)
{
return container_of(panel, struct d53e6ea8966, panel);
}
/* Table of gamma values provided in datasheet */
static u8 ams495qa01_gamma[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = {
{0x01, 0x79, 0x78, 0x8d, 0xd9, 0xdf, 0xd5, 0xcb, 0xcf, 0xc5,
0xe5, 0xe0, 0xe4, 0xdc, 0xb8, 0xd4, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x7d, 0x7c, 0x92, 0xd7, 0xdd, 0xd2, 0xcb, 0xd0, 0xc6,
0xe5, 0xe1, 0xe3, 0xda, 0xbd, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x7f, 0x7e, 0x95, 0xd7, 0xde, 0xd2, 0xcb, 0xcf, 0xc5,
0xe5, 0xe3, 0xe3, 0xda, 0xbf, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x82, 0x81, 0x99, 0xd6, 0xdd, 0xd1, 0xca, 0xcf, 0xc3,
0xe4, 0xe3, 0xe3, 0xda, 0xc2, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x84, 0x83, 0x9b, 0xd7, 0xde, 0xd2, 0xc8, 0xce, 0xc2,
0xe4, 0xe3, 0xe2, 0xd9, 0xc3, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x87, 0x86, 0x9f, 0xd6, 0xdd, 0xd1, 0xc7, 0xce, 0xc1,
0xe4, 0xe3, 0xe2, 0xd9, 0xc6, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x89, 0x89, 0xa2, 0xd5, 0xdb, 0xcf, 0xc8, 0xcf, 0xc2,
0xe3, 0xe3, 0xe1, 0xd9, 0xc7, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x8b, 0x8b, 0xa5, 0xd5, 0xdb, 0xcf, 0xc7, 0xce, 0xc0,
0xe3, 0xe3, 0xe1, 0xd8, 0xc7, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x8d, 0x8d, 0xa7, 0xd5, 0xdb, 0xcf, 0xc6, 0xce, 0xc0,
0xe4, 0xe4, 0xe1, 0xd7, 0xc8, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x8f, 0x8f, 0xaa, 0xd4, 0xdb, 0xce, 0xc6, 0xcd, 0xbf,
0xe3, 0xe3, 0xe1, 0xd7, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x91, 0x91, 0xac, 0xd3, 0xda, 0xce, 0xc5, 0xcd, 0xbe,
0xe3, 0xe3, 0xe0, 0xd7, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x93, 0x93, 0xaf, 0xd3, 0xda, 0xcd, 0xc5, 0xcd, 0xbe,
0xe2, 0xe3, 0xdf, 0xd6, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x95, 0x95, 0xb1, 0xd2, 0xd9, 0xcc, 0xc4, 0xcd, 0xbe,
0xe2, 0xe3, 0xdf, 0xd7, 0xcc, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x99, 0x99, 0xb6, 0xd1, 0xd9, 0xcc, 0xc3, 0xcb, 0xbc,
0xe2, 0xe4, 0xdf, 0xd6, 0xcc, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x9c, 0x9c, 0xba, 0xd0, 0xd8, 0xcb, 0xc3, 0xcb, 0xbb,
0xe2, 0xe4, 0xdf, 0xd6, 0xce, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
{0x01, 0x9f, 0x9f, 0xbe, 0xcf, 0xd7, 0xc9, 0xc2, 0xcb, 0xbb,
0xe1, 0xe3, 0xde, 0xd6, 0xd0, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
0x00, 0x2f},
};
/*
* Table of elvss values provided in datasheet and corresponds to
* gamma values.
*/
static u8 ams495qa01_elvss[NUM_GAMMA_LEVELS] = {
0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
0x15, 0x15, 0x14, 0x14, 0x13, 0x12,
};
static int ams495qa01_update_gamma(struct mipi_dbi *dbi, int brightness)
{
int tmp = brightness;
mipi_dbi_command_buf(dbi, MCS_SET_GAMMA, ams495qa01_gamma[tmp],
ARRAY_SIZE(ams495qa01_gamma[tmp]));
mipi_dbi_command(dbi, MCS_SET_GAMMA, 0x00);
/* Undocumented command */
mipi_dbi_command(dbi, 0x26, 0x00);
mipi_dbi_command(dbi, MCS_TEMP_SWIRE, ams495qa01_elvss[tmp]);
return 0;
}
static void ams495qa01_panel_init(struct d53e6ea8966 *db)
{
struct mipi_dbi *dbi = &db->dbi;
mipi_dbi_command(dbi, MCS_PASSWORD_0, 0x5a, 0x5a);
mipi_dbi_command(dbi, MCS_PASSWORD_1, 0x5a, 0x5a);
/* Undocumented commands */
mipi_dbi_command(dbi, 0xb0, 0x02);
mipi_dbi_command(dbi, 0xf3, 0x3b);
mipi_dbi_command(dbi, MCS_ANALOG_PWR_CTL_0, 0x33, 0x42, 0x00, 0x08);
mipi_dbi_command(dbi, MCS_ANALOG_PWR_CTL_1, 0x00, 0x06, 0x26, 0x35, 0x03);
/* Undocumented commands */
mipi_dbi_command(dbi, 0xf6, 0x02);
mipi_dbi_command(dbi, 0xc6, 0x0b, 0x00, 0x00, 0x3c, 0x00, 0x22,
0x00, 0x00, 0x00, 0x00);
mipi_dbi_command(dbi, MCS_GTCON_SET, 0x20);
mipi_dbi_command(dbi, MCS_TEMP_SWIRE, 0x06, 0x06, 0x06, 0x06);
mipi_dbi_command(dbi, MCS_ELVSS_ON, 0x07, 0x00, 0x10);
mipi_dbi_command(dbi, MCS_GATELESS_SIGNAL_SET, 0x7f, 0x7a,
0x89, 0x67, 0x26, 0x38, 0x00, 0x00, 0x09,
0x67, 0x70, 0x88, 0x7a, 0x76, 0x05, 0x09,
0x23, 0x23, 0x23);
/* Undocumented commands */
mipi_dbi_command(dbi, 0xb5, 0xff, 0xef, 0x35, 0x42, 0x0d, 0xd7,
0xff, 0x07, 0xff, 0xff, 0xfd, 0x00, 0x01,
0xff, 0x05, 0x12, 0x0f, 0xff, 0xff, 0xff,
0xff);
mipi_dbi_command(dbi, 0xb4, 0x15);
mipi_dbi_command(dbi, 0xb3, 0x00);
ams495qa01_update_gamma(dbi, MAX_BRIGHTNESS);
}
static int d53e6ea8966_prepare(struct drm_panel *panel)
{
struct d53e6ea8966 *db = to_d53e6ea8966(panel);
int ret;
/* Power up */
ret = regulator_enable(db->reg_vdd);
if (ret) {
dev_err(db->dev, "failed to enable vdd regulator: %d\n", ret);
return ret;
}
if (db->reg_elvdd) {
ret = regulator_enable(db->reg_elvdd);
if (ret) {
dev_err(db->dev,
"failed to enable elvdd regulator: %d\n", ret);
regulator_disable(db->reg_vdd);
return ret;
}
}
/* Enable */
if (db->enable)
gpiod_set_value_cansleep(db->enable, 1);
msleep(50);
/* Reset */
gpiod_set_value_cansleep(db->reset, 1);
usleep_range(1000, 5000);
gpiod_set_value_cansleep(db->reset, 0);
msleep(20);
db->panel_info->panel_init_seq(db);
return 0;
}
static int d53e6ea8966_enable(struct drm_panel *panel)
{
struct d53e6ea8966 *db = to_d53e6ea8966(panel);
struct mipi_dbi *dbi = &db->dbi;
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(200);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
usleep_range(10000, 15000);
return 0;
}
static int d53e6ea8966_disable(struct drm_panel *panel)
{
struct d53e6ea8966 *db = to_d53e6ea8966(panel);
struct mipi_dbi *dbi = &db->dbi;
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
msleep(20);
mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE);
msleep(100);
return 0;
}
static int d53e6ea8966_unprepare(struct drm_panel *panel)
{
struct d53e6ea8966 *db = to_d53e6ea8966(panel);
if (db->enable)
gpiod_set_value_cansleep(db->enable, 0);
gpiod_set_value_cansleep(db->reset, 1);
if (db->reg_elvdd)
regulator_disable(db->reg_elvdd);
regulator_disable(db->reg_vdd);
msleep(100);
return 0;
}
static int d53e6ea8966_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct d53e6ea8966 *db = to_d53e6ea8966(panel);
const struct d53e6ea8966_panel_info *panel_info = db->panel_info;
struct drm_display_mode *mode;
static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
unsigned int i;
for (i = 0; i < panel_info->num_modes; i++) {
mode = drm_mode_duplicate(connector->dev,
&panel_info->display_modes[i]);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
}
connector->display_info.bpc = 8;
connector->display_info.width_mm = panel_info->width_mm;
connector->display_info.height_mm = panel_info->height_mm;
connector->display_info.bus_flags = panel_info->bus_flags;
drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
return 1;
}
static const struct drm_panel_funcs d53e6ea8966_panel_funcs = {
.disable = d53e6ea8966_disable,
.enable = d53e6ea8966_enable,
.get_modes = d53e6ea8966_get_modes,
.prepare = d53e6ea8966_prepare,
.unprepare = d53e6ea8966_unprepare,
};
static int ams495qa01_set_brightness(struct backlight_device *bd)
{
struct d53e6ea8966 *db = bl_get_data(bd);
struct mipi_dbi *dbi = &db->dbi;
int brightness = backlight_get_brightness(bd);
ams495qa01_update_gamma(dbi, brightness);
return 0;
}
static const struct backlight_ops ams495qa01_backlight_ops = {
.update_status = ams495qa01_set_brightness,
};
static int ams495qa01_backlight_register(struct d53e6ea8966 *db)
{
struct backlight_properties props = {
.type = BACKLIGHT_RAW,
.brightness = MAX_BRIGHTNESS,
.max_brightness = MAX_BRIGHTNESS,
};
struct device *dev = db->dev;
int ret = 0;
db->bl_dev = devm_backlight_device_register(dev, "panel", dev, db,
&ams495qa01_backlight_ops,
&props);
if (IS_ERR(db->bl_dev)) {
ret = PTR_ERR(db->bl_dev);
dev_err(dev, "error registering backlight device (%d)\n", ret);
}
return ret;
}
static int d53e6ea8966_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct mipi_dsi_host *dsi_host;
struct d53e6ea8966 *db;
int ret;
struct mipi_dsi_device_info info = {
.type = "d53e6ea8966",
.channel = 0,
.node = NULL,
};
db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL);
if (!db)
return -ENOMEM;
spi_set_drvdata(spi, db);
db->dev = dev;
db->panel_info = of_device_get_match_data(dev);
if (!db->panel_info)
return -EINVAL;
db->reg_vdd = devm_regulator_get(dev, "vdd");
if (IS_ERR(db->reg_vdd))
return dev_err_probe(dev, PTR_ERR(db->reg_vdd),
"Failed to get vdd supply\n");
db->reg_elvdd = devm_regulator_get_optional(dev, "elvdd");
if (IS_ERR(db->reg_elvdd))
db->reg_elvdd = NULL;
db->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(db->reset)) {
ret = PTR_ERR(db->reset);
return dev_err_probe(dev, ret, "no RESET GPIO\n");
}
db->enable = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(db->enable)) {
ret = PTR_ERR(db->enable);
return dev_err_probe(dev, ret, "cannot get ENABLE GPIO\n");
}
ret = mipi_dbi_spi_init(spi, &db->dbi, NULL);
if (ret)
return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
dsi_host = drm_of_get_dsi_bus(dev);
if (IS_ERR(dsi_host)) {
ret = PTR_ERR(dsi_host);
return dev_err_probe(dev, ret, "Error attaching DSI bus\n");
}
db->dsi_dev = devm_mipi_dsi_device_register_full(dev, dsi_host, &info);
if (IS_ERR(db->dsi_dev)) {
dev_err(dev, "failed to register dsi device: %ld\n",
PTR_ERR(db->dsi_dev));
ret = PTR_ERR(db->dsi_dev);
}
db->dsi_dev->lanes = 2;
db->dsi_dev->format = MIPI_DSI_FMT_RGB888;
db->dsi_dev->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
drm_panel_init(&db->panel, dev, &d53e6ea8966_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
if (db->panel_info->backlight_register) {
ret = db->panel_info->backlight_register(db);
if (ret < 0)
return ret;
db->panel.backlight = db->bl_dev;
}
drm_panel_add(&db->panel);
ret = devm_mipi_dsi_attach(dev, db->dsi_dev);
if (ret < 0) {
dev_err(dev, "mipi_dsi_attach failed: %d\n", ret);
drm_panel_remove(&db->panel);
return ret;
}
return 0;
}
static void d53e6ea8966_remove(struct spi_device *spi)
{
struct d53e6ea8966 *db = spi_get_drvdata(spi);
drm_panel_remove(&db->panel);
}
static const struct drm_display_mode ams495qa01_modes[] = {
{ /* 60hz */
.clock = 33500,
.hdisplay = 960,
.hsync_start = 960 + 10,
.hsync_end = 960 + 10 + 2,
.htotal = 960 + 10 + 2 + 10,
.vdisplay = 544,
.vsync_start = 544 + 10,
.vsync_end = 544 + 10 + 2,
.vtotal = 544 + 10 + 2 + 10,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
},
{ /* 50hz */
.clock = 27800,
.hdisplay = 960,
.hsync_start = 960 + 10,
.hsync_end = 960 + 10 + 2,
.htotal = 960 + 10 + 2 + 10,
.vdisplay = 544,
.vsync_start = 544 + 10,
.vsync_end = 544 + 10 + 2,
.vtotal = 544 + 10 + 2 + 10,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.type = DRM_MODE_TYPE_DRIVER,
},
};
static const struct d53e6ea8966_panel_info ams495qa01_info = {
.display_modes = ams495qa01_modes,
.num_modes = ARRAY_SIZE(ams495qa01_modes),
.width_mm = 117,
.height_mm = 74,
.bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
.panel_init_seq = ams495qa01_panel_init,
.backlight_register = ams495qa01_backlight_register,
};
static const struct of_device_id d53e6ea8966_match[] = {
{ .compatible = "samsung,ams495qa01", .data = &ams495qa01_info },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, d53e6ea8966_match);
static const struct spi_device_id d53e6ea8966_ids[] = {
{ "ams495qa01", 0 },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(spi, d53e6ea8966_ids);
static struct spi_driver d53e6ea8966_driver = {
.driver = {
.name = "d53e6ea8966-panel",
.of_match_table = d53e6ea8966_match,
},
.id_table = d53e6ea8966_ids,
.probe = d53e6ea8966_probe,
.remove = d53e6ea8966_remove,
};
module_spi_driver(d53e6ea8966_driver);
MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
MODULE_DESCRIPTION("Magnachip d53e6ea8966 panel driver");
MODULE_LICENSE("GPL");

View File

@@ -220,15 +220,8 @@ panfrost_copy_in_sync(struct drm_device *dev,
}
for (i = 0; i < in_fence_count; i++) {
struct dma_fence *fence;
ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
&fence);
if (ret)
goto fail;
ret = drm_sched_job_add_dependency(&job->base, fence);
ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv,
handles[i], 0);
if (ret)
goto fail;
}

View File

@@ -143,6 +143,17 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = bo->resource;
int ret;
if (!old_mem) {
if (new_mem->mem_type != TTM_PL_SYSTEM) {
hop->mem_type = TTM_PL_SYSTEM;
hop->flags = TTM_PL_FLAG_TEMPORARY;
return -EMULTIHOP;
}
ttm_bo_move_null(bo, new_mem);
return 0;
}
qxl_bo_move_notify(bo, new_mem);
ret = ttm_bo_wait_ctx(bo, ctx);

View File

@@ -8,6 +8,7 @@ config DRM_RADEON
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_SUBALLOC_HELPER
select DRM_TTM
select DRM_TTM_HELPER
select SND_HDA_COMPONENT if SND_HDA_CORE

View File

@@ -79,6 +79,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_suballoc.h>
#include "radeon_family.h"
#include "radeon_mode.h"
@@ -511,52 +512,12 @@ struct radeon_bo {
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
/* sub-allocation manager, it has to be protected by another lock.
* By conception this is an helper for other part of the driver
* like the indirect buffer or semaphore, which both have their
* locking.
*
* Principe is simple, we keep a list of sub allocation in offset
* order (first entry has offset == 0, last entry has the highest
* offset).
*
* When allocating new object we first check if there is room at
* the end total_size - (last_object_offset + last_object_size) >=
* alloc_size. If so we allocate new object there.
*
* When there is not enough room at the end, we start waiting for
* each sub object until we reach object_offset+object_size >=
* alloc_size, this object then become the sub object we return.
*
* Alignment can't be bigger than page size.
*
* Hole are not considered for allocation to keep things simple.
* Assumption is that there won't be hole (all object on same
* alignment).
*/
struct radeon_sa_manager {
wait_queue_head_t wq;
struct radeon_bo *bo;
struct list_head *hole;
struct list_head flist[RADEON_NUM_RINGS];
struct list_head olist;
unsigned size;
uint64_t gpu_addr;
void *cpu_ptr;
uint32_t domain;
uint32_t align;
};
struct radeon_sa_bo;
/* sub-allocation buffer */
struct radeon_sa_bo {
struct list_head olist;
struct list_head flist;
struct radeon_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
struct radeon_fence *fence;
struct drm_suballoc_manager base;
struct radeon_bo *bo;
uint64_t gpu_addr;
void *cpu_ptr;
u32 domain;
};
/*
@@ -587,7 +548,7 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
* Semaphores.
*/
struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
struct drm_suballoc *sa_bo;
signed waiters;
uint64_t gpu_addr;
};
@@ -816,7 +777,7 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
*/
struct radeon_ib {
struct radeon_sa_bo *sa_bo;
struct drm_suballoc *sa_bo;
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;

View File

@@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
{
int r;
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
r = radeon_sa_bo_new(&rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
if (r) {
dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
return r;
@@ -77,7 +77,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
/* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
* space and soffset is the offset inside the pool bo
*/
ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
ib->gpu_addr = drm_suballoc_soffset(ib->sa_bo) + RADEON_VA_IB_OFFSET;
} else {
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
}
@@ -97,7 +97,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
radeon_sync_free(rdev, &ib->sync, ib->fence);
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
radeon_sa_bo_free(&ib->sa_bo, ib->fence);
radeon_fence_unref(&ib->fence);
}
@@ -201,8 +201,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (rdev->family >= CHIP_BONAIRE) {
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024,
RADEON_GPU_PAGE_SIZE,
RADEON_IB_POOL_SIZE*64*1024, 256,
RADEON_GEM_DOMAIN_GTT,
RADEON_GEM_GTT_WC);
} else {
@@ -210,8 +209,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
* to the command stream checking
*/
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024,
RADEON_GPU_PAGE_SIZE,
RADEON_IB_POOL_SIZE*64*1024, 256,
RADEON_GEM_DOMAIN_GTT, 0);
}
if (r) {

View File

@@ -169,15 +169,22 @@ extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
/*
* sub allocation
*/
static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
static inline struct radeon_sa_manager *
to_radeon_sa_manager(struct drm_suballoc_manager *manager)
{
return sa_bo->manager->gpu_addr + sa_bo->soffset;
return container_of(manager, struct radeon_sa_manager, base);
}
static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
static inline uint64_t radeon_sa_bo_gpu_addr(struct drm_suballoc *sa_bo)
{
return sa_bo->manager->cpu_ptr + sa_bo->soffset;
return to_radeon_sa_manager(sa_bo->manager)->gpu_addr +
drm_suballoc_soffset(sa_bo);
}
static inline void *radeon_sa_bo_cpu_addr(struct drm_suballoc *sa_bo)
{
return to_radeon_sa_manager(sa_bo->manager)->cpu_ptr +
drm_suballoc_soffset(sa_bo);
}
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
@@ -190,12 +197,10 @@ extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
unsigned size, unsigned align);
extern void radeon_sa_bo_free(struct radeon_device *rdev,
struct radeon_sa_bo **sa_bo,
extern int radeon_sa_bo_new(struct radeon_sa_manager *sa_manager,
struct drm_suballoc **sa_bo,
unsigned int size, unsigned int align);
extern void radeon_sa_bo_free(struct drm_suballoc **sa_bo,
struct radeon_fence *fence);
#if defined(CONFIG_DEBUG_FS)
extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,

View File

@@ -44,53 +44,32 @@
#include "radeon.h"
static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 align, u32 domain, u32 flags)
unsigned int size, u32 sa_align, u32 domain,
u32 flags)
{
int i, r;
int r;
init_waitqueue_head(&sa_manager->wq);
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
sa_manager->align = align;
sa_manager->hole = &sa_manager->olist;
INIT_LIST_HEAD(&sa_manager->olist);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
INIT_LIST_HEAD(&sa_manager->flist[i]);
}
r = radeon_bo_create(rdev, size, align, true,
r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
domain, flags, NULL, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
}
sa_manager->domain = domain;
drm_suballoc_manager_init(&sa_manager->base, size, sa_align);
return r;
}
void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager)
{
struct radeon_sa_bo *sa_bo, *tmp;
if (!list_empty(&sa_manager->olist)) {
sa_manager->hole = &sa_manager->olist,
radeon_sa_bo_try_free(sa_manager);
if (!list_empty(&sa_manager->olist)) {
dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
}
}
list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
radeon_sa_bo_remove_locked(sa_bo);
}
drm_suballoc_manager_fini(&sa_manager->base);
radeon_bo_unref(&sa_manager->bo);
sa_manager->size = 0;
}
int radeon_sa_bo_manager_start(struct radeon_device *rdev,
@@ -139,260 +118,34 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
return r;
}
static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
int radeon_sa_bo_new(struct radeon_sa_manager *sa_manager,
struct drm_suballoc **sa_bo,
unsigned int size, unsigned int align)
{
struct radeon_sa_manager *sa_manager = sa_bo->manager;
if (sa_manager->hole == &sa_bo->olist) {
sa_manager->hole = sa_bo->olist.prev;
struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
GFP_KERNEL, true, align);
if (IS_ERR(sa)) {
*sa_bo = NULL;
return PTR_ERR(sa);
}
list_del_init(&sa_bo->olist);
list_del_init(&sa_bo->flist);
radeon_fence_unref(&sa_bo->fence);
kfree(sa_bo);
}
static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
{
struct radeon_sa_bo *sa_bo, *tmp;
if (sa_manager->hole->next == &sa_manager->olist)
return;
sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
return;
}
radeon_sa_bo_remove_locked(sa_bo);
}
}
static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
{
struct list_head *hole = sa_manager->hole;
if (hole != &sa_manager->olist) {
return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
}
*sa_bo = sa;
return 0;
}
static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
{
struct list_head *hole = sa_manager->hole;
if (hole->next != &sa_manager->olist) {
return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
}
return sa_manager->size;
}
static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo *sa_bo,
unsigned size, unsigned align)
{
unsigned soffset, eoffset, wasted;
soffset = radeon_sa_bo_hole_soffset(sa_manager);
eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
wasted = (align - (soffset % align)) % align;
if ((eoffset - soffset) >= (size + wasted)) {
soffset += wasted;
sa_bo->manager = sa_manager;
sa_bo->soffset = soffset;
sa_bo->eoffset = soffset + size;
list_add(&sa_bo->olist, sa_manager->hole);
INIT_LIST_HEAD(&sa_bo->flist);
sa_manager->hole = &sa_bo->olist;
return true;
}
return false;
}
/**
* radeon_sa_event - Check if we can stop waiting
*
* @sa_manager: pointer to the sa_manager
* @size: number of bytes we want to allocate
* @align: alignment we need to match
*
* Check if either there is a fence we can wait for or
* enough free memory to satisfy the allocation directly
*/
static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
unsigned size, unsigned align)
{
unsigned soffset, eoffset, wasted;
int i;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!list_empty(&sa_manager->flist[i])) {
return true;
}
}
soffset = radeon_sa_bo_hole_soffset(sa_manager);
eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
wasted = (align - (soffset % align)) % align;
if ((eoffset - soffset) >= (size + wasted)) {
return true;
}
return false;
}
static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
struct radeon_fence **fences,
unsigned *tries)
{
struct radeon_sa_bo *best_bo = NULL;
unsigned i, soffset, best, tmp;
/* if hole points to the end of the buffer */
if (sa_manager->hole->next == &sa_manager->olist) {
/* try again with its beginning */
sa_manager->hole = &sa_manager->olist;
return true;
}
soffset = radeon_sa_bo_hole_soffset(sa_manager);
/* to handle wrap around we add sa_manager->size */
best = sa_manager->size * 2;
/* go over all fence list and try to find the closest sa_bo
* of the current last
*/
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
struct radeon_sa_bo *sa_bo;
fences[i] = NULL;
if (list_empty(&sa_manager->flist[i])) {
continue;
}
sa_bo = list_first_entry(&sa_manager->flist[i],
struct radeon_sa_bo, flist);
if (!radeon_fence_signaled(sa_bo->fence)) {
fences[i] = sa_bo->fence;
continue;
}
/* limit the number of tries each ring gets */
if (tries[i] > 2) {
continue;
}
tmp = sa_bo->soffset;
if (tmp < soffset) {
/* wrap around, pretend it's after */
tmp += sa_manager->size;
}
tmp -= soffset;
if (tmp < best) {
/* this sa bo is the closest one */
best = tmp;
best_bo = sa_bo;
}
}
if (best_bo) {
++tries[best_bo->fence->ring];
sa_manager->hole = best_bo->olist.prev;
/* we knew that this one is signaled,
so it's save to remote it */
radeon_sa_bo_remove_locked(best_bo);
return true;
}
return false;
}
int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
unsigned size, unsigned align)
{
struct radeon_fence *fences[RADEON_NUM_RINGS];
unsigned tries[RADEON_NUM_RINGS];
int i, r;
BUG_ON(align > sa_manager->align);
BUG_ON(size > sa_manager->size);
*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
if ((*sa_bo) == NULL) {
return -ENOMEM;
}
(*sa_bo)->manager = sa_manager;
(*sa_bo)->fence = NULL;
INIT_LIST_HEAD(&(*sa_bo)->olist);
INIT_LIST_HEAD(&(*sa_bo)->flist);
spin_lock(&sa_manager->wq.lock);
do {
for (i = 0; i < RADEON_NUM_RINGS; ++i)
tries[i] = 0;
do {
radeon_sa_bo_try_free(sa_manager);
if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
size, align)) {
spin_unlock(&sa_manager->wq.lock);
return 0;
}
/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
for (i = 0; i < RADEON_NUM_RINGS; ++i)
radeon_fence_ref(fences[i]);
spin_unlock(&sa_manager->wq.lock);
r = radeon_fence_wait_any(rdev, fences, false);
for (i = 0; i < RADEON_NUM_RINGS; ++i)
radeon_fence_unref(&fences[i]);
spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */
if (r == -ENOENT) {
r = wait_event_interruptible_locked(
sa_manager->wq,
radeon_sa_event(sa_manager, size, align)
);
}
} while (!r);
spin_unlock(&sa_manager->wq.lock);
kfree(*sa_bo);
*sa_bo = NULL;
return r;
}
void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
void radeon_sa_bo_free(struct drm_suballoc **sa_bo,
struct radeon_fence *fence)
{
struct radeon_sa_manager *sa_manager;
if (sa_bo == NULL || *sa_bo == NULL) {
return;
}
sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->wq.lock);
if (fence && !radeon_fence_signaled(fence)) {
(*sa_bo)->fence = radeon_fence_ref(fence);
list_add_tail(&(*sa_bo)->flist,
&sa_manager->flist[fence->ring]);
} else {
radeon_sa_bo_remove_locked(*sa_bo);
}
wake_up_all_locked(&sa_manager->wq);
spin_unlock(&sa_manager->wq.lock);
if (fence)
drm_suballoc_free(*sa_bo, &fence->base);
else
drm_suballoc_free(*sa_bo, NULL);
*sa_bo = NULL;
}
@@ -400,25 +153,8 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
struct seq_file *m)
{
struct radeon_sa_bo *i;
struct drm_printer p = drm_seq_file_printer(m);
spin_lock(&sa_manager->wq.lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
uint64_t soffset = i->soffset + sa_manager->gpu_addr;
uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
if (&i->olist == sa_manager->hole) {
seq_printf(m, ">");
} else {
seq_printf(m, " ");
}
seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
soffset, eoffset, eoffset - soffset);
if (i->fence) {
seq_printf(m, " protected by 0x%016llx on ring %d",
i->fence->seq, i->fence->ring);
}
seq_printf(m, "\n");
}
spin_unlock(&sa_manager->wq.lock);
drm_suballoc_dump_debug_info(&sa_manager->base, &p, sa_manager->gpu_addr);
}
#endif

View File

@@ -40,7 +40,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
if (*semaphore == NULL) {
return -ENOMEM;
}
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
r = radeon_sa_bo_new(&rdev->ring_tmp_bo,
&(*semaphore)->sa_bo, 8, 8);
if (r) {
kfree(*semaphore);
@@ -100,7 +100,7 @@ void radeon_semaphore_free(struct radeon_device *rdev,
dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
" hardware lockup imminent!\n", *semaphore);
}
radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
radeon_sa_bo_free(&(*semaphore)->sa_bo, fence);
kfree(*semaphore);
*semaphore = NULL;
}

View File

@@ -211,13 +211,10 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r)
return r;
/* Can't move a pinned BO */
rbo = container_of(bo, struct radeon_bo, tbo);
if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
return -EINVAL;
rdev = radeon_get_rdev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
bo->ttm == NULL)) {
ttm_bo_move_null(bo, new_mem);
goto out;
}

View File

@@ -261,9 +261,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
else
ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
if (ret)
drm_gem_vm_close(vma);
return ret;
}
@@ -518,8 +515,14 @@ int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
if (rk_obj->pages) {
void *vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
void *vaddr;
if (rk_obj->kvaddr)
vaddr = rk_obj->kvaddr;
else
vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!vaddr)
return -ENOMEM;
iosys_map_set_vaddr(map, vaddr);
@@ -539,7 +542,8 @@ void rockchip_gem_prime_vunmap(struct drm_gem_object *obj,
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
if (rk_obj->pages) {
vunmap(map->vaddr);
if (map->vaddr != rk_obj->kvaddr)
vunmap(map->vaddr);
return;
}

View File

@@ -316,13 +316,10 @@ static int vop_convert_afbc_format(uint32_t format)
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
return AFBC_FMT_RGB565;
/* either of the below should not be reachable */
default:
DRM_WARN_ONCE("unsupported AFBC format[%08x]\n", format);
DRM_DEBUG_KMS("unsupported AFBC format[%08x]\n", format);
return -EINVAL;
}
return -EINVAL;
}
static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
@@ -2221,7 +2218,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
goto err_disable_pm_runtime;
if (vop->data->feature & VOP_FEATURE_INTERNAL_RGB) {
vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev);
vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev, 0);
if (IS_ERR(vop->rgb)) {
ret = PTR_ERR(vop->rgb);
goto err_disable_pm_runtime;

View File

@@ -38,6 +38,7 @@
#include "rockchip_drm_gem.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_vop2.h"
#include "rockchip_rgb.h"
/*
* VOP2 architecture
@@ -211,6 +212,9 @@ struct vop2 {
struct clk *hclk;
struct clk *aclk;
/* optional internal rgb encoder */
struct rockchip_rgb *rgb;
/* must be put at the end of the struct */
struct vop2_win win[];
};
@@ -2245,7 +2249,7 @@ static struct vop2_video_port *find_vp_without_primary(struct vop2 *vop2)
#define NR_LAYERS 6
static int vop2_create_crtc(struct vop2 *vop2)
static int vop2_create_crtcs(struct vop2 *vop2)
{
const struct vop2_data *vop2_data = vop2->data;
struct drm_device *drm = vop2->drm;
@@ -2321,10 +2325,11 @@ static int vop2_create_crtc(struct vop2 *vop2)
/* change the unused primary window to overlay window */
win->type = DRM_PLANE_TYPE_OVERLAY;
}
}
if (win->type == DRM_PLANE_TYPE_OVERLAY)
} else if (win->type == DRM_PLANE_TYPE_OVERLAY) {
possible_crtcs = (1 << nvps) - 1;
} else {
possible_crtcs = 0;
}
ret = vop2_plane_init(vop2, win, possible_crtcs);
if (ret) {
@@ -2370,15 +2375,44 @@ static int vop2_create_crtc(struct vop2 *vop2)
return 0;
}
static void vop2_destroy_crtc(struct drm_crtc *crtc)
static void vop2_destroy_crtcs(struct vop2 *vop2)
{
of_node_put(crtc->port);
struct drm_device *drm = vop2->drm;
struct list_head *crtc_list = &drm->mode_config.crtc_list;
struct list_head *plane_list = &drm->mode_config.plane_list;
struct drm_crtc *crtc, *tmpc;
struct drm_plane *plane, *tmpp;
list_for_each_entry_safe(plane, tmpp, plane_list, head)
drm_plane_cleanup(plane);
/*
* Destroy CRTC after vop2_plane_destroy() since vop2_disable_plane()
* references the CRTC.
*/
drm_crtc_cleanup(crtc);
list_for_each_entry_safe(crtc, tmpc, crtc_list, head) {
of_node_put(crtc->port);
drm_crtc_cleanup(crtc);
}
}
static int vop2_find_rgb_encoder(struct vop2 *vop2)
{
struct device_node *node = vop2->dev->of_node;
struct device_node *endpoint;
int i;
for (i = 0; i < vop2->data->nr_vps; i++) {
endpoint = of_graph_get_endpoint_by_regs(node, i,
ROCKCHIP_VOP2_EP_RGB0);
if (!endpoint)
continue;
of_node_put(endpoint);
return i;
}
return -ENOENT;
}
static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = {
@@ -2682,33 +2716,45 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
ret = vop2_create_crtc(vop2);
ret = vop2_create_crtcs(vop2);
if (ret)
return ret;
ret = vop2_find_rgb_encoder(vop2);
if (ret >= 0) {
vop2->rgb = rockchip_rgb_init(dev, &vop2->vps[ret].crtc,
vop2->drm, ret);
if (IS_ERR(vop2->rgb)) {
if (PTR_ERR(vop2->rgb) == -EPROBE_DEFER) {
ret = PTR_ERR(vop2->rgb);
goto err_crtcs;
}
vop2->rgb = NULL;
}
}
rockchip_drm_dma_init_device(vop2->drm, vop2->dev);
pm_runtime_enable(&pdev->dev);
return 0;
err_crtcs:
vop2_destroy_crtcs(vop2);
return ret;
}
static void vop2_unbind(struct device *dev, struct device *master, void *data)
{
struct vop2 *vop2 = dev_get_drvdata(dev);
struct drm_device *drm = vop2->drm;
struct list_head *plane_list = &drm->mode_config.plane_list;
struct list_head *crtc_list = &drm->mode_config.crtc_list;
struct drm_crtc *crtc, *tmpc;
struct drm_plane *plane, *tmpp;
pm_runtime_disable(dev);
list_for_each_entry_safe(plane, tmpp, plane_list, head)
drm_plane_cleanup(plane);
if (vop2->rgb)
rockchip_rgb_fini(vop2->rgb);
list_for_each_entry_safe(crtc, tmpc, crtc_list, head)
vop2_destroy_crtc(crtc);
vop2_destroy_crtcs(vop2);
}
const struct component_ops vop2_component_ops = {

View File

@@ -22,13 +22,11 @@
#include "rockchip_drm_vop.h"
#include "rockchip_rgb.h"
#define encoder_to_rgb(c) container_of(c, struct rockchip_rgb, encoder)
struct rockchip_rgb {
struct device *dev;
struct drm_device *drm_dev;
struct drm_bridge *bridge;
struct drm_encoder encoder;
struct rockchip_encoder encoder;
struct drm_connector connector;
int output_mode;
};
@@ -74,7 +72,8 @@ struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = {
struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
struct drm_crtc *crtc,
struct drm_device *drm_dev)
struct drm_device *drm_dev,
int video_port)
{
struct rockchip_rgb *rgb;
struct drm_encoder *encoder;
@@ -92,7 +91,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
rgb->dev = dev;
rgb->drm_dev = drm_dev;
port = of_graph_get_port_by_id(dev->of_node, 0);
port = of_graph_get_port_by_id(dev->of_node, video_port);
if (!port)
return ERR_PTR(-EINVAL);
@@ -105,8 +104,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
continue;
child_count++;
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
&panel, &bridge);
ret = drm_of_find_panel_or_bridge(dev->of_node, video_port,
endpoint_id, &panel, &bridge);
if (!ret) {
of_node_put(endpoint);
break;
@@ -125,7 +124,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
return ERR_PTR(ret);
}
encoder = &rgb->encoder;
encoder = &rgb->encoder.encoder;
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_NONE);
@@ -161,6 +160,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
goto err_free_encoder;
}
rgb->encoder.crtc_endpoint_id = endpoint_id;
ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
@@ -182,6 +183,6 @@ void rockchip_rgb_fini(struct rockchip_rgb *rgb)
{
drm_panel_bridge_remove(rgb->bridge);
drm_connector_cleanup(&rgb->connector);
drm_encoder_cleanup(&rgb->encoder);
drm_encoder_cleanup(&rgb->encoder.encoder);
}
EXPORT_SYMBOL_GPL(rockchip_rgb_fini);

View File

@@ -8,12 +8,14 @@
#ifdef CONFIG_ROCKCHIP_RGB
struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
struct drm_crtc *crtc,
struct drm_device *drm_dev);
struct drm_device *drm_dev,
int video_port);
void rockchip_rgb_fini(struct rockchip_rgb *rgb);
#else
static inline struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
struct drm_crtc *crtc,
struct drm_device *drm_dev)
struct drm_device *drm_dev,
int video_port)
{
return NULL;
}

View File

@@ -53,6 +53,7 @@
#include <drm/drm_print.h>
#include <drm/drm_gem.h>
#include <drm/drm_syncobj.h>
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
@@ -718,6 +719,34 @@ int drm_sched_job_add_dependency(struct drm_sched_job *job,
}
EXPORT_SYMBOL(drm_sched_job_add_dependency);
/**
* drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
* @job: scheduler job to add the dependencies to
* @file_private: drm file private pointer
* @handle: syncobj handle to lookup
* @point: timeline point
*
* This adds the fence matching the given syncobj to @job.
*
* Returns:
* 0 on success, or an error on failing to expand the array.
*/
int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
struct drm_file *file,
u32 handle,
u32 point)
{
struct dma_fence *fence;
int ret;
ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
if (ret)
return ret;
return drm_sched_job_add_dependency(job, fence);
}
EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
/**
* drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
* @job: scheduler job to add the dependencies to

View File

@@ -597,7 +597,7 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
drm_fb_xrgb8888_to_xrgb1555(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
@@ -628,7 +628,7 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
drm_fb_xrgb8888_to_argb1555(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
@@ -659,7 +659,7 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
drm_fb_xrgb8888_to_rgba5551(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
@@ -724,7 +724,7 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
drm_fb_xrgb8888_to_argb8888(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
@@ -786,7 +786,7 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
drm_fb_xrgb8888_to_argb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static struct kunit_case drm_format_helper_test_cases[] = {

View File

@@ -1985,9 +1985,9 @@ dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state)
(y * fb->pitches[1] / fb->format->vsub);
}
int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport)
void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport)
{
bool lite = dispc->feat->vid_lite[hw_plane];
u32 fourcc = state->fb->format->format;
@@ -2066,15 +2066,11 @@ int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
else
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
28, 28);
return 0;
}
int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
{
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0);
return 0;
}
static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane)

View File

@@ -123,10 +123,10 @@ int dispc_runtime_resume(struct dispc_device *dispc);
int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport);
int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport);
int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable);
void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport);
void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable);
const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len);
int dispc_init(struct tidss_device *tidss);

View File

@@ -113,7 +113,6 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
u32 hw_videoport;
int ret;
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -124,15 +123,17 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
hw_videoport = to_tidss_crtc(new_state->crtc)->hw_videoport;
ret = dispc_plane_setup(tidss->dispc, tplane->hw_plane_id,
new_state, hw_videoport);
dispc_plane_setup(tidss->dispc, tplane->hw_plane_id, new_state, hw_videoport);
}
if (ret) {
dev_err(plane->dev->dev, "%s: Failed to setup plane %d\n",
__func__, tplane->hw_plane_id);
dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
return;
}
static void tidss_plane_atomic_enable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *ddev = plane->dev;
struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
dev_dbg(ddev->dev, "%s\n", __func__);
dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true);
}
@@ -160,6 +161,7 @@ static void drm_plane_destroy(struct drm_plane *plane)
static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
.atomic_check = tidss_plane_atomic_check,
.atomic_update = tidss_plane_atomic_update,
.atomic_enable = tidss_plane_atomic_enable,
.atomic_disable = tidss_plane_atomic_disable,
};

View File

@@ -606,16 +606,12 @@ static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
*/
static struct drm_display_mode simpledrm_mode(unsigned int width,
unsigned int height)
unsigned int height,
unsigned int width_mm,
unsigned int height_mm)
{
/*
* Assume a monitor resolution of 96 dpi to
* get a somewhat reasonable screen size.
*/
const struct drm_display_mode mode = {
DRM_MODE_INIT(60, width, height,
DRM_MODE_RES_MM(width, 96ul),
DRM_MODE_RES_MM(height, 96ul))
DRM_MODE_INIT(60, width, height, width_mm, height_mm)
};
return mode;
@@ -629,6 +625,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
struct simpledrm_device *sdev;
struct drm_device *dev;
int width, height, stride;
int width_mm = 0, height_mm = 0;
struct device_node *panel_node;
const struct drm_format_info *format;
struct resource *res, *mem = NULL;
struct drm_plane *primary_plane;
@@ -685,6 +683,12 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
mem = simplefb_get_memory_of(dev, of_node);
if (IS_ERR(mem))
return ERR_CAST(mem);
panel_node = of_parse_phandle(of_node, "panel", 0);
if (panel_node) {
simplefb_read_u32_of(dev, panel_node, "width-mm", &width_mm);
simplefb_read_u32_of(dev, panel_node, "height-mm", &height_mm);
of_node_put(panel_node);
}
} else {
drm_err(dev, "no simplefb configuration found\n");
return ERR_PTR(-ENODEV);
@@ -695,7 +699,16 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
return ERR_PTR(-EINVAL);
}
sdev->mode = simpledrm_mode(width, height);
/*
* Assume a monitor resolution of 96 dpi if physical dimensions
* are not specified to get a somewhat reasonable screen size.
*/
if (!width_mm)
width_mm = DRM_MODE_RES_MM(width, 96ul);
if (!height_mm)
height_mm = DRM_MODE_RES_MM(height, 96ul);
sdev->mode = simpledrm_mode(width, height, width_mm, height_mm);
sdev->format = format;
sdev->pitch = stride;

View File

@@ -120,8 +120,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
bool old_use_tt, new_use_tt;
int ret;
old_use_tt = bo->resource &&
ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
ttm_bo_unmap_virtual(bo);
@@ -894,14 +893,18 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
if (!placement->num_placement && !placement->num_busy_placement)
return ttm_bo_pipeline_gutting(bo);
/*
* Check whether we need to move buffer.
*/
if (!bo->resource || !ttm_resource_compat(bo->resource, placement)) {
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
}
/* Check whether we need to move buffer. */
if (bo->resource && ttm_resource_compat(bo->resource, placement))
return 0;
/* Moving of pinned BOs is forbidden */
if (bo->pin_count)
return -EINVAL;
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
/*
* We might need to add a TTM.
*/
@@ -953,7 +956,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
int ret;
kref_init(&bo->kref);
@@ -970,12 +972,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
bo->base.resv = &bo->base._resv;
atomic_inc(&ttm_glob.bo_count);
ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
if (unlikely(ret)) {
ttm_bo_put(bo);
return ret;
}
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.

View File

@@ -157,8 +157,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool clear;
int ret = 0;
if (!src_mem)
return 0;
if (WARN_ON(!src_mem))
return -EINVAL;
src_man = ttm_manager_type(bdev, src_mem->mem_type);
if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
@@ -704,30 +704,23 @@ EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
*/
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{
static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
struct ttm_buffer_object *ghost;
struct ttm_resource *sys_res;
struct ttm_tt *ttm;
int ret;
ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
if (ret)
return ret;
/* If already idle, no need for ghost object dance. */
if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
if (!bo->ttm) {
/* See comment below about clearing. */
ret = ttm_tt_create(bo, true);
if (ret)
goto error_free_sys_mem;
return ret;
} else {
ttm_tt_unpopulate(bo->bdev, bo->ttm);
if (bo->type == ttm_bo_type_device)
ttm_tt_mark_for_clear(bo->ttm);
}
ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, sys_res);
return 0;
}
@@ -744,7 +737,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
ret = ttm_tt_create(bo, true);
swap(bo->ttm, ttm);
if (ret)
goto error_free_sys_mem;
return ret;
ret = ttm_buffer_object_transfer(bo, &ghost);
if (ret)
@@ -760,13 +753,9 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
bo->ttm = ttm;
ttm_bo_assign_mem(bo, sys_res);
return 0;
error_destroy_tt:
ttm_tt_destroy(bo->bdev, ttm);
error_free_sys_mem:
ttm_resource_free(bo, &sys_res);
return ret;
}

View File

@@ -361,7 +361,6 @@ bool ttm_resource_compat(struct ttm_resource *res,
return false;
}
EXPORT_SYMBOL(ttm_resource_compat);
void ttm_resource_set_bo(struct ttm_resource *res,
struct ttm_buffer_object *bo)

View File

@@ -396,20 +396,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
return ret;
}
static int
v3d_job_add_deps(struct drm_file *file_priv, struct v3d_job *job,
u32 in_sync, u32 point)
{
struct dma_fence *in_fence = NULL;
int ret;
ret = drm_syncobj_find_fence(file_priv, in_sync, point, 0, &in_fence);
if (ret == -EINVAL)
return ret;
return drm_sched_job_add_dependency(&job->base, in_fence);
}
static int
v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
void **container, size_t size, void (*free)(struct kref *ref),
@@ -447,14 +433,18 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
DRM_DEBUG("Failed to copy wait dep handle.\n");
goto fail_deps;
}
ret = v3d_job_add_deps(file_priv, job, in.handle, 0);
if (ret)
ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
// TODO: Investigate why this was filtered out for the IOCTL.
if (ret && ret != -ENOENT)
goto fail_deps;
}
}
} else {
ret = v3d_job_add_deps(file_priv, job, in_sync, 0);
if (ret)
ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
// TODO: Investigate why this was filtered out for the IOCTL.
if (ret && ret != -ENOENT)
goto fail_deps;
}

View File

@@ -690,7 +690,7 @@ struct vc4_exec_info {
/* This is the array of BOs that were looked up at the start of exec.
* Command validation will use indices into this array.
*/
struct drm_gem_dma_object **bo;
struct drm_gem_object **bo;
uint32_t bo_count;
/* List of BOs that are being written by the RCL. Other than

View File

@@ -199,7 +199,7 @@ vc4_save_hang_state(struct drm_device *dev)
continue;
for (j = 0; j < exec[i]->bo_count; j++) {
bo = to_vc4_bo(&exec[i]->bo[j]->base);
bo = to_vc4_bo(exec[i]->bo[j]);
/* Retain BOs just in case they were marked purgeable.
* This prevents the BO from being purged before
@@ -207,8 +207,8 @@ vc4_save_hang_state(struct drm_device *dev)
*/
WARN_ON(!refcount_read(&bo->usecnt));
refcount_inc(&bo->usecnt);
drm_gem_object_get(&exec[i]->bo[j]->base);
kernel_state->bo[k++] = &exec[i]->bo[j]->base;
drm_gem_object_get(exec[i]->bo[j]);
kernel_state->bo[k++] = exec[i]->bo[j];
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
@@ -558,7 +558,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
unsigned i;
for (i = 0; i < exec->bo_count; i++) {
bo = to_vc4_bo(&exec->bo[i]->base);
bo = to_vc4_bo(exec->bo[i]);
bo->seqno = seqno;
dma_resv_add_fence(bo->base.base.resv, exec->fence,
@@ -585,11 +585,8 @@ vc4_unlock_bo_reservations(struct drm_device *dev,
{
int i;
for (i = 0; i < exec->bo_count; i++) {
struct drm_gem_object *bo = &exec->bo[i]->base;
dma_resv_unlock(bo->resv);
}
for (i = 0; i < exec->bo_count; i++)
dma_resv_unlock(exec->bo[i]->resv);
ww_acquire_fini(acquire_ctx);
}
@@ -614,7 +611,7 @@ vc4_lock_bo_reservations(struct drm_device *dev,
retry:
if (contended_lock != -1) {
bo = &exec->bo[contended_lock]->base;
bo = exec->bo[contended_lock];
ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
if (ret) {
ww_acquire_done(acquire_ctx);
@@ -626,19 +623,19 @@ vc4_lock_bo_reservations(struct drm_device *dev,
if (i == contended_lock)
continue;
bo = &exec->bo[i]->base;
bo = exec->bo[i];
ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++) {
bo = &exec->bo[j]->base;
bo = exec->bo[j];
dma_resv_unlock(bo->resv);
}
if (contended_lock != -1 && contended_lock >= i) {
bo = &exec->bo[contended_lock]->base;
bo = exec->bo[contended_lock];
dma_resv_unlock(bo->resv);
}
@@ -659,7 +656,7 @@ vc4_lock_bo_reservations(struct drm_device *dev,
* before we commit the CL to the hardware.
*/
for (i = 0; i < exec->bo_count; i++) {
bo = &exec->bo[i]->base;
bo = exec->bo[i];
ret = dma_resv_reserve_fences(bo->resv, 1);
if (ret) {
@@ -749,7 +746,6 @@ vc4_cl_lookup_bos(struct drm_device *dev,
struct vc4_exec_info *exec)
{
struct drm_vc4_submit_cl *args = exec->args;
uint32_t *handles;
int ret = 0;
int i;
@@ -763,54 +759,18 @@ vc4_cl_lookup_bos(struct drm_device *dev,
return -EINVAL;
}
exec->bo = kvmalloc_array(exec->bo_count,
sizeof(struct drm_gem_dma_object *),
GFP_KERNEL | __GFP_ZERO);
if (!exec->bo) {
DRM_ERROR("Failed to allocate validated BO pointers\n");
return -ENOMEM;
}
handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
if (!handles) {
ret = -ENOMEM;
DRM_ERROR("Failed to allocate incoming GEM handles\n");
goto fail;
}
if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
exec->bo_count * sizeof(uint32_t))) {
ret = -EFAULT;
DRM_ERROR("Failed to copy in GEM handles\n");
goto fail;
}
spin_lock(&file_priv->table_lock);
for (i = 0; i < exec->bo_count; i++) {
struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
handles[i]);
if (!bo) {
DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
i, handles[i]);
ret = -EINVAL;
break;
}
drm_gem_object_get(bo);
exec->bo[i] = (struct drm_gem_dma_object *)bo;
}
spin_unlock(&file_priv->table_lock);
ret = drm_gem_objects_lookup(file_priv, u64_to_user_ptr(args->bo_handles),
exec->bo_count, &exec->bo);
if (ret)
goto fail_put_bo;
for (i = 0; i < exec->bo_count; i++) {
ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
ret = vc4_bo_inc_usecnt(to_vc4_bo(exec->bo[i]));
if (ret)
goto fail_dec_usecnt;
}
kvfree(handles);
return 0;
fail_dec_usecnt:
@@ -823,15 +783,13 @@ vc4_cl_lookup_bos(struct drm_device *dev,
* step.
*/
for (i-- ; i >= 0; i--)
vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
vc4_bo_dec_usecnt(to_vc4_bo(exec->bo[i]));
fail_put_bo:
/* Release any reference to acquired objects. */
for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
drm_gem_object_put(&exec->bo[i]->base);
drm_gem_object_put(exec->bo[i]);
fail:
kvfree(handles);
kvfree(exec->bo);
exec->bo = NULL;
return ret;
@@ -974,10 +932,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++) {
struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
struct vc4_bo *bo = to_vc4_bo(exec->bo[i]);
vc4_bo_dec_usecnt(bo);
drm_gem_object_put(&exec->bo[i]->base);
drm_gem_object_put(exec->bo[i]);
}
kvfree(exec->bo);
}

View File

@@ -1466,6 +1466,12 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
if (!drm_dev_enter(drm, &idx))
goto out;
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
goto err_dev_exit;
}
/*
* As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
* be faster than pixel clock, infinitesimally faster, tested in
@@ -1482,17 +1488,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
* Additionally, the AXI clock needs to be at least 25% of
* pixel clock, but HSM ends up being the limiting factor.
*/
hsm_rate = max_t(unsigned long, 120000000, (tmds_char_rate / 100) * 101);
hsm_rate = max_t(unsigned long,
HSM_MIN_CLOCK_FREQ,
(tmds_char_rate / 100) * 101);
ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate);
if (ret) {
DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
goto err_dev_exit;
}
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
goto err_dev_exit;
goto err_put_runtime_pm;
}
ret = clk_set_rate(vc4_hdmi->pixel_clock, tmds_char_rate);
@@ -3188,16 +3190,9 @@ static int vc4_hdmi_init_resources(struct drm_device *drm,
DRM_ERROR("Failed to get HDMI state machine clock\n");
return PTR_ERR(vc4_hdmi->hsm_clock);
}
vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock;
vc4_hdmi->cec_clock = vc4_hdmi->hsm_clock;
vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi");
if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) {
DRM_ERROR("Failed to get HDMI state machine clock\n");
return PTR_ERR(vc4_hdmi->hsm_rpm_clock);
}
return 0;
}
@@ -3280,12 +3275,6 @@ static int vc5_hdmi_init_resources(struct drm_device *drm,
return PTR_ERR(vc4_hdmi->hsm_clock);
}
vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi");
if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) {
DRM_ERROR("Failed to get HDMI state machine clock\n");
return PTR_ERR(vc4_hdmi->hsm_rpm_clock);
}
vc4_hdmi->pixel_bvb_clock = devm_clk_get(dev, "bvb");
if (IS_ERR(vc4_hdmi->pixel_bvb_clock)) {
DRM_ERROR("Failed to get pixel bvb clock\n");
@@ -3349,7 +3338,7 @@ static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
clk_disable_unprepare(vc4_hdmi->hsm_rpm_clock);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
return 0;
}
@@ -3362,16 +3351,7 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
unsigned long rate;
int ret;
/*
* The HSM clock is in the HDMI power domain, so we need to set
* its frequency while the power domain is active so that it
* keeps its rate.
*/
ret = clk_set_min_rate(vc4_hdmi->hsm_rpm_clock, HSM_MIN_CLOCK_FREQ);
if (ret)
return ret;
ret = clk_prepare_enable(vc4_hdmi->hsm_rpm_clock);
ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret)
return ret;
@@ -3384,7 +3364,7 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
* case, it will lead to a silent CPU stall. Let's make sure we
* prevent such a case.
*/
rate = clk_get_rate(vc4_hdmi->hsm_rpm_clock);
rate = clk_get_rate(vc4_hdmi->hsm_clock);
if (!rate) {
ret = -EINVAL;
goto err_disable_clk;

View File

@@ -164,7 +164,6 @@ struct vc4_hdmi {
struct clk *cec_clock;
struct clk *pixel_clock;
struct clk *hsm_clock;
struct clk *hsm_rpm_clock;
struct clk *audio_clock;
struct clk *pixel_bvb_clock;

View File

@@ -117,7 +117,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
hindex, exec->bo_count);
return NULL;
}
obj = exec->bo[hindex];
obj = to_drm_gem_dma_obj(exec->bo[hindex]);
bo = to_vc4_bo(&obj->base);
if (bo->validated_shader) {
@@ -810,7 +810,7 @@ validate_gl_shader_rec(struct drm_device *dev,
return -EINVAL;
}
bo[i] = exec->bo[src_handles[i]];
bo[i] = to_drm_gem_dma_obj(exec->bo[src_handles[i]]);
if (!bo[i])
return -EINVAL;
}

View File

@@ -249,4 +249,5 @@ void vgem_fence_close(struct vgem_file *vfile)
{
idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
idr_destroy(&vfile->fence_idr);
mutex_destroy(&vfile->fence_mutex);
}

View File

@@ -11,3 +11,14 @@ config DRM_VIRTIO_GPU
QEMU based VMMs (like KVM or Xen).
If unsure say M.
config DRM_VIRTIO_GPU_KMS
bool "Virtio GPU driver modesetting support"
depends on DRM_VIRTIO_GPU
default y
help
Enable modesetting support for virtio GPU driver. This can be
disabled in cases where only "headless" usage of the GPU is
required.
If unsure, say Y.

View File

@@ -336,6 +336,9 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
{
int i, ret;
if (!vgdev->num_scanouts)
return 0;
ret = drmm_mode_config_init(vgdev->ddev);
if (ret)
return ret;
@@ -362,6 +365,9 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
{
int i;
if (!vgdev->num_scanouts)
return;
for (i = 0 ; i < vgdev->num_scanouts; ++i)
kfree(vgdev->outputs[i].edid);
}

View File

@@ -172,6 +172,10 @@ MODULE_AUTHOR("Alon Levy");
DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static const struct drm_driver driver = {
/*
* If KMS is disabled DRIVER_MODESET and DRIVER_ATOMIC are masked
* out via drm_device::driver_features:
*/
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,

View File

@@ -43,11 +43,13 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
events_read, &events_read);
if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
virtio_gpu_notify(vgdev);
drm_helper_hpd_irq_event(vgdev->ddev);
if (vgdev->num_scanouts) {
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
virtio_gpu_notify(vgdev);
drm_helper_hpd_irq_event(vgdev->ddev);
}
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
}
virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
@@ -223,12 +225,15 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
num_scanouts, &num_scanouts);
vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
VIRTIO_GPU_MAX_SCANOUTS);
if (!vgdev->num_scanouts) {
DRM_ERROR("num_scanouts is zero\n");
ret = -EINVAL;
goto err_scanouts;
if (!IS_ENABLED(CONFIG_DRM_VIRTIO_GPU_KMS) || !vgdev->num_scanouts) {
DRM_INFO("KMS disabled\n");
vgdev->num_scanouts = 0;
vgdev->has_edid = false;
dev->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
} else {
DRM_INFO("number of scanouts: %d\n", num_scanouts);
}
DRM_INFO("number of scanouts: %d\n", num_scanouts);
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
num_capsets, &num_capsets);
@@ -244,12 +249,14 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
if (num_capsets)
virtio_gpu_get_capsets(vgdev, num_capsets);
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
virtio_gpu_notify(vgdev);
wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
5 * HZ);
if (vgdev->num_scanouts) {
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
virtio_gpu_notify(vgdev);
wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
5 * HZ);
}
return 0;
err_scanouts:

View File

@@ -923,8 +923,7 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
cmd_p->hdr.ctx_id = cpu_to_le32(id);
cmd_p->nlen = cpu_to_le32(nlen);
cmd_p->context_init = cpu_to_le32(context_init);
strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name));
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o \
vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,40 +26,31 @@
*
**************************************************************************/
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_placement.h>
#include "vmwgfx_drv.h"
#include "ttm_object.h"
/**
* vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
* vmw_buffer_object.
*
* @bo: Pointer to the TTM buffer object.
* Return: Pointer to the struct vmw_buffer_object embedding the
* TTM buffer object.
*/
static struct vmw_buffer_object *
vmw_buffer_object(struct ttm_buffer_object *bo)
static void vmw_bo_release(struct vmw_bo *vbo)
{
return container_of(bo, struct vmw_buffer_object, base);
vmw_bo_unmap(vbo);
drm_gem_object_release(&vbo->tbo.base);
}
/**
* bo_is_vmw - check if the buffer object is a &vmw_buffer_object
* @bo: ttm buffer object to be checked
* vmw_bo_free - vmw_bo destructor
*
* Uses destroy function associated with the object to determine if this is
* a &vmw_buffer_object.
*
* Returns:
* true if the object is of &vmw_buffer_object type, false if not.
* @bo: Pointer to the embedded struct ttm_buffer_object
*/
static bool bo_is_vmw(struct ttm_buffer_object *bo)
static void vmw_bo_free(struct ttm_buffer_object *bo)
{
return bo->destroy == &vmw_bo_bo_free ||
bo->destroy == &vmw_gem_destroy;
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
WARN_ON(vbo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_release(vbo);
kfree(vbo);
}
/**
@@ -72,13 +63,13 @@ static bool bo_is_vmw(struct ttm_buffer_object *bo)
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
struct ttm_placement *placement,
bool interruptible)
static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
struct vmw_bo *buf,
struct ttm_placement *placement,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
struct ttm_buffer_object *bo = &buf->tbo;
int ret;
vmw_execbuf_release_pinned_bo(dev_priv);
@@ -87,12 +78,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
if (buf->base.pin_count > 0)
ret = ttm_resource_compat(bo->resource, placement)
? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, placement, &ctx);
ret = ttm_bo_validate(bo, placement, &ctx);
if (!ret)
vmw_bo_pin_reserved(buf, true);
@@ -115,11 +101,11 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
struct vmw_bo *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
struct ttm_buffer_object *bo = &buf->tbo;
int ret;
vmw_execbuf_release_pinned_bo(dev_priv);
@@ -128,17 +114,17 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
if (buf->base.pin_count > 0) {
ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement)
? 0 : -EINVAL;
goto out_unreserve;
}
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
vmw_bo_placement_set(buf,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_GMR);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto out_unreserve;
ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
vmw_bo_placement_set(buf,
VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
out_unreserve:
if (!ret)
@@ -163,7 +149,7 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
struct vmw_bo *buf,
bool interruptible)
{
return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
@@ -184,22 +170,13 @@ int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
struct vmw_bo *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
struct ttm_place place;
struct ttm_buffer_object *bo = &buf->tbo;
int ret = 0;
place = vmw_vram_placement.placement[0];
place.lpfn = PFN_UP(bo->resource->size);
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
placement.busy_placement = &place;
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
@@ -213,16 +190,19 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
if (bo->resource->mem_type == TTM_PL_VRAM &&
bo->resource->start < PFN_UP(bo->resource->size) &&
bo->resource->start > 0 &&
buf->base.pin_count == 0) {
buf->tbo.pin_count == 0) {
ctx.interruptible = false;
(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
vmw_bo_placement_set(buf,
VMW_BO_DOMAIN_SYS,
VMW_BO_DOMAIN_SYS);
(void)ttm_bo_validate(bo, &buf->placement, &ctx);
}
if (buf->base.pin_count > 0)
ret = ttm_resource_compat(bo->resource, &placement)
? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, &placement, &ctx);
vmw_bo_placement_set(buf,
VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM);
buf->places[0].lpfn = PFN_UP(bo->resource->size);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->resource->start != 0);
@@ -248,10 +228,10 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_unpin(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
struct vmw_bo *buf,
bool interruptible)
{
struct ttm_buffer_object *bo = &buf->base;
struct ttm_buffer_object *bo = &buf->tbo;
int ret;
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
@@ -293,12 +273,12 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
* @pin: Whether to pin or unpin.
*
*/
void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
{
struct ttm_operation_ctx ctx = { false, true };
struct ttm_place pl;
struct ttm_placement placement;
struct ttm_buffer_object *bo = &vbo->base;
struct ttm_buffer_object *bo = &vbo->tbo;
uint32_t old_mem_type = bo->resource->mem_type;
int ret;
@@ -341,9 +321,9 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
* 3) Buffer object destruction
*
*/
void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
{
struct ttm_buffer_object *bo = &vbo->base;
struct ttm_buffer_object *bo = &vbo->tbo;
bool not_used;
void *virtual;
int ret;
@@ -366,96 +346,70 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
* @vbo: The buffer object whose map we are tearing down.
*
* This function tears down a cached map set up using
* vmw_buffer_object_map_and_cache().
* vmw_bo_map_and_cache().
*/
void vmw_bo_unmap(struct vmw_buffer_object *vbo)
void vmw_bo_unmap(struct vmw_bo *vbo)
{
if (vbo->map.bo == NULL)
return;
ttm_bo_kunmap(&vbo->map);
vbo->map.bo = NULL;
}
/**
* vmw_bo_bo_free - vmw buffer object destructor
*
* @bo: Pointer to the embedded struct ttm_buffer_object
*/
void vmw_bo_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
WARN_ON(vmw_bo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
vmw_bo_unmap(vmw_bo);
drm_gem_object_release(&bo->base);
kfree(vmw_bo);
}
/* default destructor */
static void vmw_bo_default_destroy(struct ttm_buffer_object *bo)
{
kfree(bo);
}
/**
* vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
* vmw_bo_init - Initialize a vmw buffer object
*
* @dev_priv: Pointer to the device private struct
* @size: size of the BO we need
* @placement: where to put it
* @p_bo: resulting BO
* @vmw_bo: Buffer object to initialize
* @params: Parameters used to initialize the buffer object
* @destroy: The function used to delete the buffer object
* Returns: Zero on success, negative error code on error.
*
* Creates and pin a simple BO for in kernel use.
*/
int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
struct ttm_placement *placement,
struct ttm_buffer_object **p_bo)
static int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_bo *vmw_bo,
struct vmw_bo_params *params,
void (*destroy)(struct ttm_buffer_object *))
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
.interruptible = params->bo_type != ttm_bo_type_kernel,
.no_wait_gpu = false
};
struct ttm_buffer_object *bo;
struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
int ret;
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (unlikely(!bo))
return -ENOMEM;
memset(vmw_bo, 0, sizeof(*vmw_bo));
size = ALIGN(size, PAGE_SIZE);
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->tbo.priority = 3;
vmw_bo->res_tree = RB_ROOT;
drm_gem_private_object_init(vdev, &bo->base, size);
params->size = ALIGN(params->size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, ttm_bo_type_kernel,
placement, 0, &ctx, NULL, NULL,
vmw_bo_default_destroy);
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
&vmw_bo->placement, 0, &ctx, NULL,
NULL, destroy);
if (unlikely(ret))
goto error_free;
return ret;
ttm_bo_pin(bo);
ttm_bo_unreserve(bo);
*p_bo = bo;
if (params->pin)
ttm_bo_pin(&vmw_bo->tbo);
ttm_bo_unreserve(&vmw_bo->tbo);
return 0;
error_free:
kfree(bo);
return ret;
}
int vmw_bo_create(struct vmw_private *vmw,
size_t size, struct ttm_placement *placement,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo),
struct vmw_buffer_object **p_bo)
struct vmw_bo_params *params,
struct vmw_bo **p_bo)
{
int ret;
BUG_ON(!bo_free);
*p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
if (unlikely(!*p_bo)) {
DRM_ERROR("Failed to allocate a buffer.\n");
@@ -465,9 +419,7 @@ int vmw_bo_create(struct vmw_private *vmw,
/*
* vmw_bo_init will delete the *p_bo object if it fails
*/
ret = vmw_bo_init(vmw, *p_bo, size,
placement, interruptible, pin,
bo_free);
ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
if (unlikely(ret != 0))
goto out_error;
@@ -478,57 +430,7 @@ int vmw_bo_create(struct vmw_private *vmw,
}
/**
* vmw_bo_init - Initialize a vmw buffer object
*
* @dev_priv: Pointer to the device private struct
* @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
* @size: Buffer object size in bytes.
* @placement: Initial placement.
* @interruptible: Whether waits should be performed interruptible.
* @pin: If the BO should be created pinned at a fixed location.
* @bo_free: The buffer object destructor.
* Returns: Zero on success, negative error code on error.
*
* Note that on error, the code will free the buffer object.
*/
int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo))
{
struct ttm_operation_ctx ctx = {
.interruptible = interruptible,
.no_wait_gpu = false
};
struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
int ret;
WARN_ON_ONCE(!bo_free);
memset(vmw_bo, 0, sizeof(*vmw_bo));
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->base.priority = 3;
vmw_bo->res_tree = RB_ROOT;
size = ALIGN(size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, ttm_bo_type_device,
placement, 0, &ctx, NULL, NULL, bo_free);
if (unlikely(ret)) {
return ret;
}
if (pin)
ttm_bo_pin(&vmw_bo->base);
ttm_bo_unreserve(&vmw_bo->base);
return 0;
}
/**
* vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu
* vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
* access, idling previous GPU operations on the buffer and optionally
* blocking it for further command submissions.
*
@@ -541,11 +443,11 @@ int vmw_bo_init(struct vmw_private *dev_priv,
*
* A blocking grab will be automatically released when @tfile is closed.
*/
static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo,
static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
uint32_t flags)
{
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
struct ttm_buffer_object *bo = &vmw_bo->base;
struct ttm_buffer_object *bo = &vmw_bo->tbo;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
@@ -588,17 +490,17 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
uint32_t handle,
uint32_t flags)
{
struct vmw_buffer_object *vmw_bo;
struct vmw_bo *vmw_bo;
int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
if (!ret) {
if (!(flags & drm_vmw_synccpu_allow_cs)) {
atomic_dec(&vmw_bo->cpu_writers);
}
ttm_bo_put(&vmw_bo->base);
ttm_bo_put(&vmw_bo->tbo);
}
drm_gem_object_put(&vmw_bo->base.base);
drm_gem_object_put(&vmw_bo->tbo.base);
return ret;
}
@@ -620,7 +522,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
{
struct drm_vmw_synccpu_arg *arg =
(struct drm_vmw_synccpu_arg *) data;
struct vmw_buffer_object *vbo;
struct vmw_bo *vbo;
int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
@@ -639,7 +541,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
drm_gem_object_put(&vbo->base.base);
drm_gem_object_put(&vbo->tbo.base);
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
@@ -683,8 +585,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_unref_dmabuf_arg *arg =
(struct drm_vmw_unref_dmabuf_arg *)data;
drm_gem_handle_delete(file_priv, arg->handle);
return 0;
return drm_gem_handle_delete(file_priv, arg->handle);
}
@@ -694,14 +595,14 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
* @filp: The file the handle is registered with.
* @handle: The user buffer object handle
* @out: Pointer to a where a pointer to the embedded
* struct vmw_buffer_object should be placed.
* struct vmw_bo should be placed.
* Return: Zero on success, Negative error code on error.
*
* The vmw buffer object pointer will be refcounted (both ttm and gem)
*/
int vmw_user_bo_lookup(struct drm_file *filp,
uint32_t handle,
struct vmw_buffer_object **out)
u32 handle,
struct vmw_bo **out)
{
struct drm_gem_object *gobj;
@@ -712,8 +613,8 @@ int vmw_user_bo_lookup(struct drm_file *filp,
return -ESRCH;
}
*out = gem_to_vmw_bo(gobj);
ttm_bo_get(&(*out)->base);
*out = to_vmw_bo(gobj);
ttm_bo_get(&(*out)->tbo);
return 0;
}
@@ -734,8 +635,7 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence)
{
struct ttm_device *bdev = bo->bdev;
struct vmw_private *dev_priv =
container_of(bdev, struct vmw_private, bdev);
struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
int ret;
if (fence == NULL)
@@ -771,7 +671,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_buffer_object *vbo;
struct vmw_bo *vbo;
int cpp = DIV_ROUND_UP(args->bpp, 8);
int ret;
@@ -795,7 +695,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
args->size, &args->handle,
&vbo);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&vbo->base.base);
drm_gem_object_put(&vbo->tbo.base);
return ret;
}
@@ -806,12 +706,8 @@ int vmw_dumb_create(struct drm_file *file_priv,
*/
void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{
/* Is @bo embedded in a struct vmw_buffer_object? */
if (!bo_is_vmw(bo))
return;
/* Kill any cached kernel maps before swapout */
vmw_bo_unmap(vmw_buffer_object(bo));
vmw_bo_unmap(to_vmw_bo(&bo->base));
}
@@ -828,13 +724,7 @@ void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem)
{
struct vmw_buffer_object *vbo;
/* Make sure @bo is embedded in a struct vmw_buffer_object? */
if (!bo_is_vmw(bo))
return;
vbo = container_of(bo, struct vmw_buffer_object, base);
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
/*
* Kill any cached kernel maps before move to or from VRAM.
@@ -852,3 +742,98 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
static u32
set_placement_list(struct ttm_place *pl, u32 domain)
{
u32 n = 0;
/*
* The placements are ordered according to our preferences
*/
if (domain & VMW_BO_DOMAIN_MOB) {
pl[n].mem_type = VMW_PL_MOB;
pl[n].flags = 0;
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_GMR) {
pl[n].mem_type = VMW_PL_GMR;
pl[n].flags = 0;
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_VRAM) {
pl[n].mem_type = TTM_PL_VRAM;
pl[n].flags = 0;
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
pl[n].mem_type = VMW_PL_SYSTEM;
pl[n].flags = 0;
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
if (domain & VMW_BO_DOMAIN_SYS) {
pl[n].mem_type = TTM_PL_SYSTEM;
pl[n].flags = 0;
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
WARN_ON(!n);
if (!n) {
pl[n].mem_type = TTM_PL_SYSTEM;
pl[n].flags = 0;
pl[n].fpfn = 0;
pl[n].lpfn = 0;
n++;
}
return n;
}
void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
{
struct ttm_device *bdev = bo->tbo.bdev;
struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
struct ttm_placement *pl = &bo->placement;
bool mem_compatible = false;
u32 i;
pl->placement = bo->places;
pl->num_placement = set_placement_list(bo->places, domain);
if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
for (i = 0; i < pl->num_placement; ++i) {
if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
bo->tbo.resource->mem_type == pl->placement[i].mem_type)
mem_compatible = true;
}
if (!mem_compatible)
drm_warn(&vmw->drm,
"%s: Incompatible transition from "
"bo->base.resource->mem_type = %u to domain = %u\n",
__func__, bo->tbo.resource->mem_type, domain);
}
pl->busy_placement = bo->busy_places;
pl->num_busy_placement = set_placement_list(bo->busy_places, busy_domain);
}
void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
{
struct ttm_device *bdev = bo->tbo.bdev;
struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
if (vmw->has_mob)
domain = VMW_BO_DOMAIN_MOB;
vmw_bo_placement_set(bo, domain, domain);
}

View File

@@ -0,0 +1,203 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright 2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef VMWGFX_BO_H
#define VMWGFX_BO_H
#include "device_include/svga_reg.h"
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/rbtree_types.h>
#include <linux/types.h>
struct vmw_bo_dirty;
struct vmw_fence_obj;
struct vmw_private;
struct vmw_resource;
enum vmw_bo_domain {
VMW_BO_DOMAIN_SYS = BIT(0),
VMW_BO_DOMAIN_WAITABLE_SYS = BIT(1),
VMW_BO_DOMAIN_VRAM = BIT(2),
VMW_BO_DOMAIN_GMR = BIT(3),
VMW_BO_DOMAIN_MOB = BIT(4),
};
struct vmw_bo_params {
u32 domain;
u32 busy_domain;
enum ttm_bo_type bo_type;
size_t size;
bool pin;
};
/**
* struct vmw_bo - TTM buffer object with vmwgfx additions
* @tbo: The TTM buffer object
* @placement: The preferred placement for this buffer object
* @places: The chosen places for the preferred placement.
* @busy_places: Chosen busy places for the preferred placement
* @map: Kmap object for semi-persistent mappings
* @res_tree: RB tree of resources using this buffer object as a backing MOB
* @res_prios: Eviction priority counts for attached resources
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when
* increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
* @dirty: structure for user-space dirty-tracking
*/
struct vmw_bo {
struct ttm_buffer_object tbo;
struct ttm_placement placement;
struct ttm_place places[5];
struct ttm_place busy_places[5];
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
struct rb_root res_tree;
u32 res_prios[TTM_MAX_BO_PRIORITY];
atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
struct vmw_bo_dirty *dirty;
};
void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo);
int vmw_bo_create(struct vmw_private *dev_priv,
struct vmw_bo_params *params,
struct vmw_bo **p_bo);
int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
struct vmw_bo *buf,
bool interruptible);
int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_bo *buf,
bool interruptible);
int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_bo *bo,
bool interruptible);
void vmw_bo_pin_reserved(struct vmw_bo *bo, bool pin);
int vmw_bo_unpin(struct vmw_private *vmw_priv,
struct vmw_bo *bo,
bool interruptible);
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
void *vmw_bo_map_and_cache(struct vmw_bo *vbo);
void vmw_bo_unmap(struct vmw_bo *vbo);
void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
int vmw_user_bo_lookup(struct drm_file *filp,
u32 handle,
struct vmw_bo **out);
/**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority
* according to attached resources
* @vbo: The struct vmw_bo
*/
static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo)
{
int i = ARRAY_SIZE(vbo->res_prios);
while (i--) {
if (vbo->res_prios[i]) {
vbo->tbo.priority = i;
return;
}
}
vbo->tbo.priority = 3;
}
/**
* vmw_bo_prio_add - Notify a buffer object of a newly attached resource
* eviction priority
* @vbo: The struct vmw_bo
* @prio: The resource priority
*
* After being notified, the code assigns the highest resource eviction priority
* to the backing buffer object (mob).
*/
static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio)
{
if (vbo->res_prios[prio]++ == 0)
vmw_bo_prio_adjust(vbo);
}
/**
* vmw_bo_used_prio_del - Notify a buffer object of a resource with a certain
* priority being removed
* @vbo: The struct vmw_bo
* @prio: The resource priority
*
* After being notified, the code assigns the highest resource eviction priority
* to the backing buffer object (mob).
*/
static inline void vmw_bo_prio_del(struct vmw_bo *vbo, int prio)
{
if (--vbo->res_prios[prio] == 0)
vmw_bo_prio_adjust(vbo);
}
static inline void vmw_bo_unreference(struct vmw_bo **buf)
{
struct vmw_bo *tmp_buf = *buf;
*buf = NULL;
if (tmp_buf)
ttm_bo_put(&tmp_buf->tbo);
}
static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
{
ttm_bo_get(&buf->tbo);
return buf;
}
static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
{
return container_of((gobj), struct vmw_bo, tbo.base);
}
#endif // VMWGFX_BO_H

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2020 VMware, Inc., Palo Alto, CA., USA
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -24,13 +24,13 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <linux/sched/signal.h>
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_devcaps.h"
#include <drm/ttm/ttm_placement.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_devcaps.h"
#include <linux/sched/signal.h>
bool vmw_supports_3d(struct vmw_private *dev_priv)
{
@@ -567,7 +567,7 @@ static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
* without writing to the query result structure.
*/
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery body;
@@ -613,7 +613,7 @@ static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
* without writing to the query result structure.
*/
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery body;

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2015 VMware, Inc., Palo Alto, CA., USA
* Copyright 2015-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,12 +25,13 @@
*
**************************************************************************/
#include <linux/dmapool.h>
#include <linux/pci.h>
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo.h>
#include "vmwgfx_drv.h"
#include <linux/dmapool.h>
#include <linux/pci.h>
/*
* Size of inline command buffers. Try to make sure that a page size is a
@@ -79,7 +80,6 @@ struct vmw_cmdbuf_context {
* frees are protected by @lock.
* @cmd_space: Buffer object for the command buffer space, unless we were
* able to make a contigous coherent DMA memory allocation, @handle. Immutable.
* @map_obj: Mapping state for @cmd_space. Immutable.
* @map: Pointer to command buffer space. May be a mapped buffer object or
* a contigous coherent DMA memory allocation. Immutable.
* @cur: Command buffer for small kernel command submissions. Protected by
@@ -116,8 +116,7 @@ struct vmw_cmdbuf_man {
struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
struct list_head error;
struct drm_mm mm;
struct ttm_buffer_object *cmd_space;
struct ttm_bo_kmap_obj map_obj;
struct vmw_bo *cmd_space;
u8 *map;
struct vmw_cmdbuf_header *cur;
size_t cur_pos;
@@ -888,7 +887,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
header->cmd = man->map + offset;
if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB;
cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start;
cb_hdr->ptr.mob.mobOffset = offset;
} else {
cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
@@ -1221,7 +1220,6 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
{
struct vmw_private *dev_priv = man->dev_priv;
bool dummy;
int ret;
if (man->has_pool)
@@ -1234,6 +1232,13 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
if (man->map) {
man->using_mob = false;
} else {
struct vmw_bo_params bo_params = {
.domain = VMW_BO_DOMAIN_MOB,
.busy_domain = VMW_BO_DOMAIN_MOB,
.bo_type = ttm_bo_type_kernel,
.size = size,
.pin = true
};
/*
* DMA memory failed. If we can have command buffers in a
* MOB, try to use that instead. Note that this will
@@ -1244,19 +1249,12 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
!dev_priv->has_mob)
return -ENOMEM;
ret = vmw_bo_create_kernel(dev_priv, size,
&vmw_mob_placement,
&man->cmd_space);
ret = vmw_bo_create(dev_priv, &bo_params, &man->cmd_space);
if (ret)
return ret;
man->using_mob = true;
ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
&man->map_obj);
if (ret)
goto out_no_map;
man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
man->map = vmw_bo_map_and_cache(man->cmd_space);
man->using_mob = man->map;
}
man->size = size;
@@ -1276,14 +1274,6 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
(man->using_mob) ? "MOB" : "DMA");
return 0;
out_no_map:
if (man->using_mob) {
ttm_bo_put(man->cmd_space);
man->cmd_space = NULL;
}
return ret;
}
/**
@@ -1382,14 +1372,11 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
man->has_pool = false;
man->default_size = VMW_CMDBUF_INLINE_SIZE;
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
if (man->using_mob) {
(void) ttm_bo_kunmap(&man->map_obj);
ttm_bo_put(man->cmd_space);
man->cmd_space = NULL;
} else {
if (man->using_mob)
vmw_bo_unreference(&man->cmd_space);
else
dma_free_coherent(man->dev_priv->drm.dev,
man->size, man->map, man->handle);
}
}
/**

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,9 +27,10 @@
#include <drm/ttm/ttm_placement.h>
#include "vmwgfx_binding.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_binding.h"
struct vmw_user_context {
struct ttm_base_object base;
@@ -38,7 +39,7 @@ struct vmw_user_context {
struct vmw_cmdbuf_res_manager *man;
struct vmw_resource *cotables[SVGA_COTABLE_MAX];
spinlock_t cotable_lock;
struct vmw_buffer_object *dx_query_mob;
struct vmw_bo *dx_query_mob;
};
static void vmw_user_context_free(struct vmw_resource *res);
@@ -72,10 +73,11 @@ const struct vmw_user_resource_conv *user_context_converter =
static const struct vmw_res_func vmw_legacy_context_func = {
.res_type = vmw_res_context,
.needs_backup = false,
.needs_guest_memory = false,
.may_evict = false,
.type_name = "legacy contexts",
.backup_placement = NULL,
.domain = VMW_BO_DOMAIN_SYS,
.busy_domain = VMW_BO_DOMAIN_SYS,
.create = NULL,
.destroy = NULL,
.bind = NULL,
@@ -84,12 +86,13 @@ static const struct vmw_res_func vmw_legacy_context_func = {
static const struct vmw_res_func vmw_gb_context_func = {
.res_type = vmw_res_context,
.needs_backup = true,
.needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "guest backed contexts",
.backup_placement = &vmw_mob_placement,
.domain = VMW_BO_DOMAIN_MOB,
.busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_context_create,
.destroy = vmw_gb_context_destroy,
.bind = vmw_gb_context_bind,
@@ -98,12 +101,13 @@ static const struct vmw_res_func vmw_gb_context_func = {
static const struct vmw_res_func vmw_dx_context_func = {
.res_type = vmw_res_dx_context,
.needs_backup = true,
.needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "dx contexts",
.backup_placement = &vmw_mob_placement,
.domain = VMW_BO_DOMAIN_MOB,
.busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_dx_context_create,
.destroy = vmw_dx_context_destroy,
.bind = vmw_dx_context_bind,
@@ -182,7 +186,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
res->guest_memory_size = (dx ? sizeof(SVGADXContextMobFormat) :
sizeof(SVGAGBContextData));
ret = vmw_resource_init(dev_priv, res, true,
res_free,
@@ -354,8 +358,8 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
cmd->body.mobid = bo->resource->start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
cmd->body.validContents = res->guest_memory_dirty;
res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -521,8 +525,8 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
cmd->body.mobid = bo->resource->start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
cmd->body.validContents = res->guest_memory_dirty;
res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -853,7 +857,7 @@ vmw_context_binding_state(struct vmw_resource *ctx)
* specified in the parameter. 0 otherwise.
*/
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
struct vmw_buffer_object *mob)
struct vmw_bo *mob)
{
struct vmw_user_context *uctx =
container_of(ctx_res, struct vmw_user_context, res);
@@ -885,7 +889,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
*
* @ctx_res: The context resource
*/
struct vmw_buffer_object *
struct vmw_bo *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
struct vmw_user_context *uctx =

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
* Copyright 2014-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -30,13 +30,14 @@
* whenever the backing MOB is evicted.
*/
#include <drm/ttm/ttm_placement.h>
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_mksstat.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
#include <drm/ttm/ttm_placement.h>
/**
* struct vmw_cotable - Context Object Table resource
*
@@ -130,12 +131,13 @@ static int vmw_cotable_destroy(struct vmw_resource *res);
static const struct vmw_res_func vmw_cotable_func = {
.res_type = vmw_res_cotable,
.needs_backup = true,
.needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "context guest backed object tables",
.backup_placement = &vmw_mob_placement,
.domain = VMW_BO_DOMAIN_MOB,
.busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_cotable_create,
.destroy = vmw_cotable_destroy,
.bind = vmw_cotable_bind,
@@ -180,7 +182,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = &res->backup->base;
struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetCOTable body;
@@ -228,7 +230,7 @@ static int vmw_cotable_bind(struct vmw_resource *res,
* take the opportunity to correct the value here so that it's not
* misused in the future.
*/
val_buf->bo = &res->backup->base;
val_buf->bo = &res->guest_memory_bo->tbo;
return vmw_cotable_unscrub(res);
}
@@ -289,7 +291,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
cmd0->body.cid = vcotbl->ctx->id;
cmd0->body.type = vcotbl->type;
cmd1 = (void *) &cmd0[1];
vcotbl->size_read_back = res->backup_size;
vcotbl->size_read_back = res->guest_memory_size;
}
cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
cmd1->header.size = sizeof(cmd1->body);
@@ -371,12 +373,12 @@ static int vmw_cotable_readback(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type;
vcotbl->size_read_back = res->backup_size;
vcotbl->size_read_back = res->guest_memory_size;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
}
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
vmw_bo_fence_single(&res->backup->base, fence);
vmw_bo_fence_single(&res->guest_memory_bo->tbo, fence);
vmw_fence_obj_unreference(&fence);
return 0;
@@ -399,14 +401,21 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
struct ttm_operation_ctx ctx = { false, false };
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_buffer_object *buf, *old_buf = res->backup;
struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
size_t old_size = res->backup_size;
struct vmw_bo *buf, *old_buf = res->guest_memory_bo;
struct ttm_buffer_object *bo, *old_bo = &res->guest_memory_bo->tbo;
size_t old_size = res->guest_memory_size;
size_t old_size_read_back = vcotbl->size_read_back;
size_t cur_size_read_back;
struct ttm_bo_kmap_obj old_map, new_map;
int ret;
size_t i;
struct vmw_bo_params bo_params = {
.domain = VMW_BO_DOMAIN_MOB,
.busy_domain = VMW_BO_DOMAIN_MOB,
.bo_type = ttm_bo_type_device,
.size = new_size,
.pin = true
};
MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE);
MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE);
@@ -423,14 +432,13 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement,
true, true, vmw_bo_bo_free, &buf);
ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
goto out_done;
}
bo = &buf->base;
bo = &buf->tbo;
WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
ret = ttm_bo_wait(old_bo, false, false);
@@ -464,15 +472,18 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
}
/* Unpin new buffer, and switch backup buffers. */
ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
vmw_bo_placement_set(buf,
VMW_BO_DOMAIN_MOB,
VMW_BO_DOMAIN_MOB);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed validating new COTable backup buffer.\n");
goto out_wait;
}
vmw_resource_mob_detach(res);
res->backup = buf;
res->backup_size = new_size;
res->guest_memory_bo = buf;
res->guest_memory_size = new_size;
vcotbl->size_read_back = cur_size_read_back;
/*
@@ -482,8 +493,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
ret = vmw_cotable_unscrub(res);
if (ret) {
DRM_ERROR("Failed switching COTable backup buffer.\n");
res->backup = old_buf;
res->backup_size = old_size;
res->guest_memory_bo = old_buf;
res->guest_memory_size = old_size;
vcotbl->size_read_back = old_size_read_back;
vmw_resource_mob_attach(res);
goto out_wait;
@@ -498,7 +509,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
if (unlikely(ret))
goto out_wait;
/* Release the pin acquired in vmw_bo_init */
/* Release the pin acquired in vmw_bo_create */
ttm_bo_unpin(bo);
MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
@@ -533,7 +544,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
static int vmw_cotable_create(struct vmw_resource *res)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
size_t new_size = res->backup_size;
size_t new_size = res->guest_memory_size;
size_t needed_size;
int ret;
@@ -542,7 +553,7 @@ static int vmw_cotable_create(struct vmw_resource *res)
while (needed_size > new_size)
new_size *= 2;
if (likely(new_size <= res->backup_size)) {
if (likely(new_size <= res->guest_memory_size)) {
if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
ret = vmw_cotable_unscrub(res);
if (ret)
@@ -606,12 +617,12 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&vcotbl->resource_list);
vcotbl->res.id = type;
vcotbl->res.backup_size = PAGE_SIZE;
vcotbl->res.guest_memory_size = PAGE_SIZE;
num_entries = PAGE_SIZE / co_info[type].size;
if (num_entries < co_info[type].min_initial_entries) {
vcotbl->res.backup_size = co_info[type].min_initial_entries *
vcotbl->res.guest_memory_size = co_info[type].min_initial_entries *
co_info[type].size;
vcotbl->res.backup_size = PFN_ALIGN(vcotbl->res.backup_size);
vcotbl->res.guest_memory_size = PFN_ALIGN(vcotbl->res.guest_memory_size);
}
vcotbl->scrubbed = true;

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -28,9 +28,10 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_binding.h"
#include "vmwgfx_devcaps.h"
#include "vmwgfx_mksstat.h"
#include "vmwgfx_binding.h"
#include "ttm_object.h"
#include <drm/drm_aperture.h>
@@ -386,27 +387,32 @@ static void vmw_print_sm_type(struct vmw_private *dev_priv)
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
int ret;
struct vmw_buffer_object *vbo;
struct vmw_bo *vbo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
struct vmw_bo_params bo_params = {
.domain = VMW_BO_DOMAIN_SYS,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_kernel,
.size = PAGE_SIZE,
.pin = true
};
/*
* Create the vbo as pinned, so that a tryreserve will
* immediately succeed. This is because we're the only
* user of the bo currently.
*/
ret = vmw_bo_create(dev_priv, PAGE_SIZE,
&vmw_sys_placement, false, true,
&vmw_bo_bo_free, &vbo);
ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
BUG_ON(ret != 0);
vmw_bo_pin_reserved(vbo, true);
ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
result->totalSize = sizeof(*result);
@@ -415,7 +421,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
ttm_bo_kunmap(&map);
}
vmw_bo_pin_reserved(vbo, false);
ttm_bo_unreserve(&vbo->base);
ttm_bo_unreserve(&vbo->tbo);
if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n");
@@ -1565,7 +1571,7 @@ static const struct file_operations vmwgfx_driver_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = vmw_unlocked_ioctl,
.mmap = vmw_mmap,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
#if defined(CONFIG_COMPAT)

View File

@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -117,32 +117,6 @@ struct vmwgfx_hash_item {
unsigned long key;
};
/**
* struct vmw_buffer_object - TTM buffer object with vmwgfx additions
* @base: The TTM buffer object
* @res_tree: RB tree of resources using this buffer object as a backing MOB
* @base_mapped_count: ttm BO mapping count; used by KMS atomic helpers.
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when
* increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
* @map: Kmap object for semi-persistent mappings
* @res_prios: Eviction priority counts for attached resources
* @dirty: structure for user-space dirty-tracking
*/
struct vmw_buffer_object {
struct ttm_buffer_object base;
struct rb_root res_tree;
/* For KMS atomic helpers: ttm bo mapping count */
atomic_t base_mapped_count;
atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
u32 res_prios[TTM_MAX_BO_PRIORITY];
struct vmw_bo_dirty *dirty;
};
/**
* struct vmw_validate_buffer - Carries validation info about buffers.
@@ -168,21 +142,23 @@ struct vmw_res_func;
* @kref: For refcounting.
* @dev_priv: Pointer to the device private for this resource. Immutable.
* @id: Device id. Protected by @dev_priv::resource_lock.
* @backup_size: Backup buffer size. Immutable.
* @res_dirty: Resource contains data not yet in the backup buffer. Protected
* by resource reserved.
* @backup_dirty: Backup buffer contains data not yet in the HW resource.
* @guest_memory_size: Guest memory buffer size. Immutable.
* @res_dirty: Resource contains data not yet in the guest memory buffer.
* Protected by resource reserved.
* @guest_memory_dirty: Guest memory buffer contains data not yet in the HW
* resource. Protected by resource reserved.
* @coherent: Emulate coherency by tracking vm accesses.
* @backup: The backup buffer if any. Protected by resource reserved.
* @backup_offset: Offset into the backup buffer if any. Protected by resource
* reserved. Note that only a few resource types can have a @backup_offset
* different from zero.
* @guest_memory_bo: The guest memory buffer if any. Protected by resource
* reserved.
* @guest_memory_offset: Offset into the guest memory buffer if any. Protected
* by resource reserved. Note that only a few resource types can have a
* @guest_memory_offset different from zero.
* @pin_count: The pin count for this resource. A pinned resource has a
* pin-count greater than zero. It is not on the resource LRU lists and its
* backup buffer is pinned. Hence it can't be evicted.
* guest memory buffer is pinned. Hence it can't be evicted.
* @func: Method vtable for this resource. Immutable.
* @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved.
* @mob_node; Node for the MOB guest memory rbtree. Protected by
* @guest_memory_bo reserved.
* @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
* @binding_head: List head for the context binding list. Protected by
* the @dev_priv::binding_mutex
@@ -190,18 +166,20 @@ struct vmw_res_func;
* @hw_destroy: Callback to destroy the resource on the device, as part of
* resource destruction.
*/
struct vmw_bo;
struct vmw_bo;
struct vmw_resource_dirty;
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
int id;
u32 used_prio;
unsigned long backup_size;
unsigned long guest_memory_size;
u32 res_dirty : 1;
u32 backup_dirty : 1;
u32 guest_memory_dirty : 1;
u32 coherent : 1;
struct vmw_buffer_object *backup;
unsigned long backup_offset;
struct vmw_bo *guest_memory_bo;
unsigned long guest_memory_offset;
unsigned long pin_count;
const struct vmw_res_func *func;
struct rb_node mob_node;
@@ -446,7 +424,7 @@ struct vmw_sw_context{
struct drm_file *filp;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct vmw_buffer_object *cur_query_bo;
struct vmw_bo *cur_query_bo;
struct list_head bo_relocations;
struct list_head res_relocations;
uint32_t *buf_start;
@@ -458,7 +436,7 @@ struct vmw_sw_context{
struct list_head staged_cmd_res;
struct list_head ctx_list;
struct vmw_ctx_validation_info *dx_ctx_node;
struct vmw_buffer_object *dx_query_mob;
struct vmw_bo *dx_query_mob;
struct vmw_resource *dx_query_ctx;
struct vmw_cmdbuf_res_manager *man;
struct vmw_validation_context *ctx;
@@ -492,7 +470,7 @@ struct vmw_otable_batch {
unsigned num_otables;
struct vmw_otable *otables;
struct vmw_resource *context;
struct ttm_buffer_object *otable_bo;
struct vmw_bo *otable_bo;
};
enum {
@@ -632,8 +610,8 @@ struct vmw_private {
* are protected by the cmdbuf mutex.
*/
struct vmw_buffer_object *dummy_query_bo;
struct vmw_buffer_object *pinned_bo;
struct vmw_bo *dummy_query_bo;
struct vmw_bo *pinned_bo;
uint32_t query_cid;
uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
@@ -677,11 +655,6 @@ struct vmw_private {
#endif
};
static inline struct vmw_buffer_object *gem_to_vmw_bo(struct drm_gem_object *gobj)
{
return container_of((gobj), struct vmw_buffer_object, base.base);
}
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
{
return container_of(res, struct vmw_surface, res);
@@ -692,6 +665,11 @@ static inline struct vmw_private *vmw_priv(struct drm_device *dev)
return (struct vmw_private *)dev->dev_private;
}
static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev)
{
return container_of(bdev, struct vmw_private, bdev);
}
static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
{
return (struct vmw_fpriv *)file_priv->driver_priv;
@@ -825,7 +803,7 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_buffer_object **out_buf);
struct vmw_bo **out_buf);
extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
@@ -844,20 +822,20 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
extern void vmw_resource_unreserve(struct vmw_resource *res,
bool dirty_set,
bool dirty,
bool switch_backup,
struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset);
bool switch_guest_memory,
struct vmw_bo *new_guest_memory,
unsigned long new_guest_memory_offset);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *old_mem,
struct ttm_resource *new_mem);
extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
int vmw_query_readback_all(struct vmw_bo *dx_query_mob);
void vmw_resource_evict_all(struct vmw_private *dev_priv);
void vmw_resource_unbind_list(struct vmw_bo *vbo);
void vmw_resource_mob_attach(struct vmw_resource *res);
void vmw_resource_mob_detach(struct vmw_resource *res);
void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
pgoff_t end);
int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
pgoff_t end, pgoff_t *num_prefault);
/**
@@ -871,107 +849,6 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
return !RB_EMPTY_NODE(&res->mob_node);
}
/**
* Buffer object helper functions - vmwgfx_bo.c
*/
extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
struct vmw_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible);
extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
bool interruptible);
extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
bool interruptible);
extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_buffer_object *bo,
bool interruptible);
extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
struct vmw_buffer_object *bo,
bool interruptible);
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
extern int vmw_bo_create_kernel(struct vmw_private *dev_priv,
unsigned long size,
struct ttm_placement *placement,
struct ttm_buffer_object **p_bo);
extern int vmw_bo_create(struct vmw_private *dev_priv,
size_t size, struct ttm_placement *placement,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo),
struct vmw_buffer_object **p_bo);
extern int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo));
extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_bo_lookup(struct drm_file *filp,
uint32_t handle,
struct vmw_buffer_object **out);
extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
/**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority
* according to attached resources
* @vbo: The struct vmw_buffer_object
*/
static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo)
{
int i = ARRAY_SIZE(vbo->res_prios);
while (i--) {
if (vbo->res_prios[i]) {
vbo->base.priority = i;
return;
}
}
vbo->base.priority = 3;
}
/**
* vmw_bo_prio_add - Notify a buffer object of a newly attached resource
* eviction priority
* @vbo: The struct vmw_buffer_object
* @prio: The resource priority
*
* After being notified, the code assigns the highest resource eviction priority
* to the backing buffer object (mob).
*/
static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio)
{
if (vbo->res_prios[prio]++ == 0)
vmw_bo_prio_adjust(vbo);
}
/**
* vmw_bo_prio_del - Notify a buffer object of a resource with a certain
* priority being removed
* @vbo: The struct vmw_buffer_object
* @prio: The resource priority
*
* After being notified, the code assigns the highest resource eviction priority
* to the backing buffer object (mob).
*/
static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
{
if (--vbo->res_prios[prio] == 0)
vmw_bo_prio_adjust(vbo);
}
/**
* GEM related functionality - vmwgfx_gem.c
*/
@@ -979,10 +856,9 @@ extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
uint32_t *handle,
struct vmw_buffer_object **p_vbo);
struct vmw_bo **p_vbo);
extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
extern void vmw_gem_destroy(struct ttm_buffer_object *bo);
extern void vmw_debugfs_gem_init(struct vmw_private *vdev);
/**
@@ -1055,30 +931,21 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0;
}
/**
* TTM glue - vmwgfx_ttm_glue.c
*/
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
extern const size_t vmw_tt_size;
extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_device_funcs vmw_bo_driver;
extern const struct vmw_sg_table *
vmw_bo_sg_table(struct ttm_buffer_object *bo);
extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
unsigned long bo_size,
struct ttm_buffer_object **bo_p);
int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
size_t bo_size,
u32 domain,
struct vmw_bo **bo_p);
extern void vmw_piter_start(struct vmw_piter *viter,
const struct vmw_sg_table *vsgt,
@@ -1297,8 +1164,8 @@ vmw_context_binding_state(struct vmw_resource *ctx);
extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
bool readback);
extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
struct vmw_buffer_object *mob);
extern struct vmw_buffer_object *
struct vmw_bo *mob);
extern struct vmw_bo *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
@@ -1523,12 +1390,12 @@ int vmw_mksstat_remove_all(struct vmw_private *dev_priv);
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/* Resource dirtying - vmwgfx_page_dirty.c */
void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo);
int vmw_bo_dirty_add(struct vmw_buffer_object *vbo);
void vmw_bo_dirty_scan(struct vmw_bo *vbo);
int vmw_bo_dirty_add(struct vmw_bo *vbo);
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
void vmw_bo_dirty_clear_res(struct vmw_resource *res);
void vmw_bo_dirty_release(struct vmw_buffer_object *vbo);
void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
void vmw_bo_dirty_release(struct vmw_bo *vbo);
void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
pgoff_t start, pgoff_t end);
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
@@ -1561,22 +1428,6 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
return srf;
}
static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
{
struct vmw_buffer_object *tmp_buf = *buf;
*buf = NULL;
if (tmp_buf != NULL)
ttm_bo_put(&tmp_buf->base);
}
static inline struct vmw_buffer_object *
vmw_bo_reference(struct vmw_buffer_object *buf)
{
ttm_bo_get(&buf->base);
return buf;
}
static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
{
atomic_inc(&dev_priv->num_fifo_resources);

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009 - 2022 VMware, Inc., Palo Alto, CA., USA
* Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -24,17 +24,17 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <linux/sync_file.h>
#include <linux/hashtable.h>
#include "vmwgfx_binding.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_reg.h"
#include "vmwgfx_mksstat.h"
#include "vmwgfx_so.h"
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include "vmwgfx_so.h"
#include "vmwgfx_binding.h"
#include "vmwgfx_mksstat.h"
#include <linux/sync_file.h>
#include <linux/hashtable.h>
/*
* Helper macro to get dx_ctx_node if available otherwise print an error
@@ -65,7 +65,7 @@
*/
struct vmw_relocation {
struct list_head head;
struct vmw_buffer_object *vbo;
struct vmw_bo *vbo;
union {
SVGAMobId *mob_loc;
SVGAGuestPtr *location;
@@ -149,7 +149,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
struct vmw_buffer_object **vmw_bo_p);
struct vmw_bo **vmw_bo_p);
/**
* vmw_ptr_diff - Compute the offset from a to b in bytes
*
@@ -475,12 +475,16 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (has_sm4_context(dev_priv) &&
vmw_res_type(ctx) == vmw_res_dx_context) {
struct vmw_buffer_object *dx_query_mob;
struct vmw_bo *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
if (dx_query_mob)
if (dx_query_mob) {
vmw_bo_placement_set(dx_query_mob,
VMW_BO_DOMAIN_MOB,
VMW_BO_DOMAIN_MOB);
ret = vmw_validation_add_bo(sw_context->ctx,
dx_query_mob, true, false);
dx_query_mob);
}
}
mutex_unlock(&dev_priv->binding_mutex);
@@ -596,7 +600,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
return ret;
if (sw_context->dx_query_mob) {
struct vmw_buffer_object *expected_dx_query_mob;
struct vmw_bo *expected_dx_query_mob;
expected_dx_query_mob =
vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
@@ -703,7 +707,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
{
struct vmw_private *dev_priv = ctx_res->dev_priv;
struct vmw_buffer_object *dx_query_mob;
struct vmw_bo *dx_query_mob;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
@@ -718,7 +722,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = ctx_res->id;
cmd->body.mobid = dx_query_mob->base.resource->start;
cmd->body.mobid = dx_query_mob->tbo.resource->start;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_context_bind_dx_query(ctx_res, dx_query_mob);
@@ -1017,7 +1021,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* after successful submission of the current command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
struct vmw_buffer_object *new_query_bo,
struct vmw_bo *new_query_bo,
struct vmw_sw_context *sw_context)
{
struct vmw_res_cache_entry *ctx_entry =
@@ -1029,24 +1033,24 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) {
if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
if (unlikely(sw_context->cur_query_bo != NULL)) {
sw_context->needs_post_query_barrier = true;
vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
ret = vmw_validation_add_bo(sw_context->ctx,
sw_context->cur_query_bo,
dev_priv->has_mob, false);
sw_context->cur_query_bo);
if (unlikely(ret != 0))
return ret;
}
sw_context->cur_query_bo = new_query_bo;
vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
ret = vmw_validation_add_bo(sw_context->ctx,
dev_priv->dummy_query_bo,
dev_priv->has_mob, false);
dev_priv->dummy_query_bo);
if (unlikely(ret != 0))
return ret;
}
@@ -1145,9 +1149,9 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
struct vmw_buffer_object **vmw_bo_p)
struct vmw_bo **vmw_bo_p)
{
struct vmw_buffer_object *vmw_bo;
struct vmw_bo *vmw_bo;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
@@ -1158,9 +1162,10 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo);
}
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
ttm_bo_put(&vmw_bo->base);
drm_gem_object_put(&vmw_bo->base.base);
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
ttm_bo_put(&vmw_bo->tbo);
drm_gem_object_put(&vmw_bo->tbo.base);
if (unlikely(ret != 0))
return ret;
@@ -1200,9 +1205,9 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
struct vmw_buffer_object **vmw_bo_p)
struct vmw_bo **vmw_bo_p)
{
struct vmw_buffer_object *vmw_bo;
struct vmw_bo *vmw_bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
@@ -1213,9 +1218,11 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo);
}
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
ttm_bo_put(&vmw_bo->base);
drm_gem_object_put(&vmw_bo->base.base);
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
ttm_bo_put(&vmw_bo->tbo);
drm_gem_object_put(&vmw_bo->tbo.base);
if (unlikely(ret != 0))
return ret;
@@ -1280,7 +1287,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
struct vmw_buffer_object *vmw_bo;
struct vmw_bo *vmw_bo;
int ret;
cmd = container_of(header, typeof(*cmd), header);
@@ -1363,7 +1370,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
int ret;
@@ -1393,7 +1400,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
int ret;
@@ -1439,7 +1446,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
int ret;
@@ -1467,7 +1474,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
int ret;
@@ -1504,7 +1511,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo = NULL;
struct vmw_bo *vmw_bo = NULL;
struct vmw_surface *srf = NULL;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
int ret;
@@ -1528,7 +1535,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
return ret;
/* Make sure DMA doesn't cross BO boundaries. */
bo_size = vmw_bo->base.base.size;
bo_size = vmw_bo->tbo.base.size;
if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
VMW_DEBUG_USER("Invalid DMA offset.\n");
return -EINVAL;
@@ -1551,7 +1558,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
return 0;
}
@@ -1670,7 +1677,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf)
{
struct vmw_buffer_object *vmw_bo;
struct vmw_bo *vmw_bo;
struct {
uint32_t header;
@@ -1701,7 +1708,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
struct vmw_resource *res, uint32_t *buf_id,
unsigned long backup_offset)
{
struct vmw_buffer_object *vbo;
struct vmw_bo *vbo;
void *info;
int ret;
@@ -3754,7 +3761,7 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
struct ttm_buffer_object *bo;
list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
bo = &reloc->vbo->base;
bo = &reloc->vbo->tbo;
switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
reloc->location->offset += bo->resource->start << PAGE_SHIFT;
@@ -4364,13 +4371,17 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
false);
vmw_bo_placement_set(dev_priv->pinned_bo,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
if (ret)
goto out_no_reserve;
ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
false);
vmw_bo_placement_set(dev_priv->dummy_query_bo,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
if (ret)
goto out_no_reserve;

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
* Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright 2021 VMware, Inc.
* Copyright 2021-2023 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -24,31 +24,17 @@
*
*/
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "drm/drm_prime.h"
#include "drm/drm_gem_ttm_helper.h"
/**
* vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
* vmw_buffer_object.
*
* @bo: Pointer to the TTM buffer object.
* Return: Pointer to the struct vmw_buffer_object embedding the
* TTM buffer object.
*/
static struct vmw_buffer_object *
vmw_buffer_object(struct ttm_buffer_object *bo)
{
return container_of(bo, struct vmw_buffer_object, base);
}
static void vmw_gem_object_free(struct drm_gem_object *gobj)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
if (bo) {
if (bo)
ttm_bo_put(bo);
}
}
static int vmw_gem_object_open(struct drm_gem_object *obj,
@@ -65,7 +51,7 @@ static void vmw_gem_object_close(struct drm_gem_object *obj,
static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
struct vmw_bo *vbo = to_vmw_bo(obj);
int ret;
ret = ttm_bo_reserve(bo, false, false, NULL);
@@ -103,6 +89,13 @@ static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
}
static const struct vm_operations_struct vmw_vm_ops = {
.pfn_mkwrite = vmw_bo_vm_mkwrite,
.page_mkwrite = vmw_bo_vm_mkwrite,
.fault = vmw_bo_vm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
};
static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.free = vmw_gem_object_free,
@@ -115,43 +108,31 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.vmap = drm_gem_ttm_vmap,
.vunmap = drm_gem_ttm_vunmap,
.mmap = drm_gem_ttm_mmap,
.vm_ops = &vmw_vm_ops,
};
/**
* vmw_gem_destroy - vmw buffer object destructor
*
* @bo: Pointer to the embedded struct ttm_buffer_object
*/
void vmw_gem_destroy(struct ttm_buffer_object *bo)
{
struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
WARN_ON(vbo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_unmap(vbo);
drm_gem_object_release(&vbo->base.base);
kfree(vbo);
}
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
uint32_t *handle,
struct vmw_buffer_object **p_vbo)
struct vmw_bo **p_vbo)
{
int ret;
struct vmw_bo_params params = {
.domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
.pin = false
};
ret = vmw_bo_create(dev_priv, size,
(dev_priv->has_mob) ?
&vmw_sys_placement :
&vmw_vram_sys_placement,
true, false, &vmw_gem_destroy, p_vbo);
ret = vmw_bo_create(dev_priv, &params, p_vbo);
if (ret != 0)
goto out_no_bo;
(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
(*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle);
out_no_bo:
return ret;
}
@@ -165,7 +146,7 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
(union drm_vmw_alloc_dmabuf_arg *)data;
struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
struct vmw_buffer_object *vbo;
struct vmw_bo *vbo;
uint32_t handle;
int ret;
@@ -175,23 +156,23 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
goto out_no_bo;
rep->handle = handle;
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
rep->map_handle = drm_vma_node_offset_addr(&vbo->tbo.base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&vbo->base.base);
drm_gem_object_put(&vbo->tbo.base);
out_no_bo:
return ret;
}
#if defined(CONFIG_DEBUG_FS)
static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_file *m)
static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m)
{
const char *placement;
const char *type;
switch (bo->base.resource->mem_type) {
switch (bo->tbo.resource->mem_type) {
case TTM_PL_SYSTEM:
placement = " CPU";
break;
@@ -212,7 +193,7 @@ static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_f
break;
}
switch (bo->base.type) {
switch (bo->tbo.type) {
case ttm_bo_type_device:
type = "device";
break;
@@ -228,12 +209,12 @@ static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_f
}
seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s",
id, bo->base.base.size, placement, type);
id, bo->tbo.base.size, placement, type);
seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
bo->base.priority,
bo->base.pin_count,
kref_read(&bo->base.base.refcount),
kref_read(&bo->base.kref));
bo->tbo.priority,
bo->tbo.pin_count,
kref_read(&bo->tbo.base.refcount),
kref_read(&bo->tbo.kref));
seq_puts(m, "\n");
}
@@ -267,7 +248,7 @@ static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused)
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, id) {
struct vmw_buffer_object *bo = gem_to_vmw_bo(gobj);
struct vmw_bo *bo = to_vmw_bo(gobj);
vmw_bo_print_info(id, bo, m);
}

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -24,8 +24,9 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_kms.h"
#include "vmwgfx_bo.h"
#include "vmw_surface_cache.h"
#include <drm/drm_atomic.h>
@@ -152,9 +153,8 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
SVGAGBCursorHeader *header;
SVGAGBAlphaCursorHeader *alpha_header;
const u32 image_size = width * height * sizeof(*image);
bool dummy;
header = ttm_kmap_obj_virtual(&vps->cursor.map, &dummy);
header = vmw_bo_map_and_cache(vps->cursor.bo);
alpha_header = &header->header.alphaHeader;
memset(header, 0, sizeof(*header));
@@ -169,7 +169,7 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
memcpy(header + 1, image, image_size);
vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
vps->cursor.bo->resource->start);
vps->cursor.bo->tbo.resource->start);
}
@@ -184,13 +184,13 @@ static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
*/
static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
{
bool dummy;
bool is_iomem;
if (vps->surf) {
if (vps->surf_mapped)
return vmw_bo_map_and_cache(vps->surf->res.backup);
return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
return vps->surf->snooper.image;
} else if (vps->bo)
return ttm_kmap_obj_virtual(&vps->bo->map, &dummy);
return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
return NULL;
}
@@ -222,15 +222,13 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
return changed;
}
static void vmw_du_destroy_cursor_mob(struct ttm_buffer_object **bo)
static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
{
if (!(*bo))
if (!(*vbo))
return;
ttm_bo_unpin(*bo);
ttm_bo_put(*bo);
kfree(*bo);
*bo = NULL;
ttm_bo_unpin(&(*vbo)->tbo);
vmw_bo_unreference(vbo);
}
static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
@@ -254,8 +252,8 @@ static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
/* Cache is full: See if this mob is bigger than an existing mob. */
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
if (vcp->cursor_mobs[i]->base.size <
vps->cursor.bo->base.size) {
if (vcp->cursor_mobs[i]->tbo.base.size <
vps->cursor.bo->tbo.base.size) {
vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
vcp->cursor_mobs[i] = vps->cursor.bo;
vps->cursor.bo = NULL;
@@ -288,7 +286,7 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
return -EINVAL;
if (vps->cursor.bo) {
if (vps->cursor.bo->base.size >= size)
if (vps->cursor.bo->tbo.base.size >= size)
return 0;
vmw_du_put_cursor_mob(vcp, vps);
}
@@ -296,26 +294,27 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
/* Look for an unused mob in the cache. */
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
if (vcp->cursor_mobs[i] &&
vcp->cursor_mobs[i]->base.size >= size) {
vcp->cursor_mobs[i]->tbo.base.size >= size) {
vps->cursor.bo = vcp->cursor_mobs[i];
vcp->cursor_mobs[i] = NULL;
return 0;
}
}
/* Create a new mob if we can't find an existing one. */
ret = vmw_bo_create_kernel(dev_priv, size, &vmw_mob_placement,
&vps->cursor.bo);
ret = vmw_bo_create_and_populate(dev_priv, size,
VMW_BO_DOMAIN_MOB,
&vps->cursor.bo);
if (ret != 0)
return ret;
/* Fence the mob creation so we are guarateed to have the mob */
ret = ttm_bo_reserve(vps->cursor.bo, false, false, NULL);
ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
if (ret != 0)
goto teardown;
vmw_bo_fence_single(vps->cursor.bo, NULL);
ttm_bo_unreserve(vps->cursor.bo);
vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
ttm_bo_unreserve(&vps->cursor.bo->tbo);
return 0;
teardown:
@@ -363,7 +362,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
SVGA3dCopyBox *box;
unsigned box_count;
void *virtual;
bool dummy;
bool is_iomem;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
@@ -423,7 +422,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
if (unlikely(ret != 0))
goto err_unreserve;
virtual = ttm_kmap_obj_virtual(&map, &dummy);
virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
memcpy(srf->snooper.image, virtual,
@@ -573,39 +572,30 @@ vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
{
int ret;
u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
struct ttm_buffer_object *bo = vps->cursor.bo;
struct ttm_buffer_object *bo;
if (!bo)
if (!vps->cursor.bo)
return -EINVAL;
bo = &vps->cursor.bo->tbo;
if (bo->base.size < size)
return -EINVAL;
if (vps->cursor.mapped)
if (vps->cursor.bo->map.virtual)
return 0;
ret = ttm_bo_reserve(bo, false, false, NULL);
if (unlikely(ret != 0))
return -ENOMEM;
ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vps->cursor.map);
/*
* We just want to try to get mob bind to finish
* so that the first write to SVGA_REG_CURSOR_MOBID
* is done with a buffer that the device has already
* seen
*/
(void) ttm_bo_wait(bo, false, false);
vmw_bo_map_and_cache(vps->cursor.bo);
ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return -ENOMEM;
vps->cursor.mapped = true;
return 0;
}
@@ -622,19 +612,15 @@ static int
vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
{
int ret = 0;
struct ttm_buffer_object *bo = vps->cursor.bo;
struct vmw_bo *vbo = vps->cursor.bo;
if (!vps->cursor.mapped)
if (!vbo || !vbo->map.virtual)
return 0;
if (!bo)
return 0;
ret = ttm_bo_reserve(bo, true, false, NULL);
ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
if (likely(ret == 0)) {
ttm_bo_kunmap(&vps->cursor.map);
ttm_bo_unreserve(bo);
vps->cursor.mapped = false;
vmw_bo_unmap(vbo);
ttm_bo_unreserve(&vbo->tbo);
}
return ret;
@@ -657,20 +643,19 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
bool dummy;
bool is_iomem;
if (vps->surf_mapped) {
vmw_bo_unmap(vps->surf->res.backup);
vmw_bo_unmap(vps->surf->res.guest_memory_bo);
vps->surf_mapped = false;
}
if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &dummy)) {
const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
if (likely(ret == 0)) {
if (atomic_read(&vps->bo->base_mapped_count) == 0)
ttm_bo_kunmap(&vps->bo->map);
ttm_bo_unreserve(&vps->bo->base);
ttm_bo_kunmap(&vps->bo->map);
ttm_bo_unreserve(&vps->bo->tbo);
}
}
@@ -736,29 +721,26 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
* reserve the ttm_buffer_object first which
* vmw_bo_map_and_cache() omits.
*/
ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
if (unlikely(ret != 0))
return -ENOMEM;
ret = ttm_bo_kmap(&vps->bo->base, 0, PFN_UP(size), &vps->bo->map);
ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
if (likely(ret == 0))
atomic_inc(&vps->bo->base_mapped_count);
ttm_bo_unreserve(&vps->bo->base);
ttm_bo_unreserve(&vps->bo->tbo);
if (unlikely(ret != 0))
return -ENOMEM;
} else if (vps->surf && !vps->bo && vps->surf->res.backup) {
} else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
WARN_ON(vps->surf->snooper.image);
ret = ttm_bo_reserve(&vps->surf->res.backup->base, true, false,
ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
NULL);
if (unlikely(ret != 0))
return -ENOMEM;
vmw_bo_map_and_cache(vps->surf->res.backup);
ttm_bo_unreserve(&vps->surf->res.backup->base);
vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
vps->surf_mapped = true;
}
@@ -785,7 +767,6 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
s32 hotspot_x, hotspot_y;
bool dummy;
hotspot_x = du->hotspot_x;
hotspot_y = du->hotspot_y;
@@ -827,11 +808,6 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x, hotspot_y);
}
if (vps->bo) {
if (ttm_kmap_obj_virtual(&vps->bo->map, &dummy))
atomic_dec(&vps->bo->base_mapped_count);
}
du->cursor_x = new_state->crtc_x + du->set_gui_x;
du->cursor_y = new_state->crtc_y + du->set_gui_y;
@@ -935,7 +911,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
WARN_ON(!surface);
if (!surface ||
(!surface->snooper.image && !surface->res.backup)) {
(!surface->snooper.image && !surface->res.guest_memory_bo)) {
DRM_ERROR("surface not suitable for cursor\n");
return -EINVAL;
}
@@ -1279,9 +1255,9 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
user_fence_rep, vclips, num_clips,
NULL);
case vmw_du_screen_target:
return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
user_fence_rep, NULL, vclips, num_clips,
1, false, true, NULL);
return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
user_fence_rep, NULL, vclips, num_clips,
1, NULL);
default:
WARN_ONCE(true,
"Readback called with invalid display system.\n");
@@ -1406,7 +1382,7 @@ static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(fb);
return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle);
return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
}
static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
@@ -1486,69 +1462,6 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
.dirty = vmw_framebuffer_bo_dirty_ext,
};
/*
* Pin the bofer in a location suitable for access by the
* display system.
*/
static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_buffer_object *buf;
struct ttm_placement *placement;
int ret;
buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
if (!buf)
return 0;
switch (dev_priv->active_display_unit) {
case vmw_du_legacy:
vmw_overlay_pause_all(dev_priv);
ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
vmw_overlay_resume_all(dev_priv);
break;
case vmw_du_screen_object:
case vmw_du_screen_target:
if (vfb->bo) {
if (dev_priv->capabilities & SVGA_CAP_3D) {
/*
* Use surface DMA to get content to
* sreen target surface.
*/
placement = &vmw_vram_gmr_placement;
} else {
/* Use CPU blit. */
placement = &vmw_sys_placement;
}
} else {
/* Use surface / image update */
placement = &vmw_mob_placement;
}
return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
default:
return -EINVAL;
}
return ret;
}
static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_buffer_object *buf;
buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
if (WARN_ON(!buf))
return 0;
return vmw_bo_unpin(dev_priv, buf, false);
}
/**
* vmw_create_bo_proxy - create a proxy surface for the buffer object
*
@@ -1566,7 +1479,7 @@ static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
*/
static int vmw_create_bo_proxy(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct vmw_buffer_object *bo_mob,
struct vmw_bo *bo_mob,
struct vmw_surface **srf_out)
{
struct vmw_surface_metadata metadata = {0};
@@ -1618,9 +1531,9 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
/* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void) vmw_resource_reserve(res, false, true);
vmw_bo_unreference(&res->backup);
res->backup = vmw_bo_reference(bo_mob);
res->backup_offset = 0;
vmw_bo_unreference(&res->guest_memory_bo);
res->guest_memory_bo = vmw_bo_reference(bo_mob);
res->guest_memory_offset = 0;
vmw_resource_unreserve(res, false, false, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
@@ -1630,7 +1543,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
struct vmw_buffer_object *bo,
struct vmw_bo *bo,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd2
*mode_cmd)
@@ -1642,7 +1555,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0];
if (unlikely(requested_size > bo->base.base.size)) {
if (unlikely(requested_size > bo->tbo.base.size)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
@@ -1663,7 +1576,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
goto out_err1;
}
vfbd->base.base.obj[0] = &bo->base.base;
vfbd->base.base.obj[0] = &bo->tbo.base;
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
@@ -1718,7 +1631,7 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_buffer_object *bo,
struct vmw_bo *bo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd)
@@ -1765,9 +1678,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
if (ret)
return ERR_PTR(ret);
vfb->pin = vmw_framebuffer_pin;
vfb->unpin = vmw_framebuffer_unpin;
return vfb;
}
@@ -1782,7 +1692,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_buffer_object *bo = NULL;
struct vmw_bo *bo = NULL;
int ret;
/* returns either a bo or surface */
@@ -1817,7 +1727,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo) {
vmw_bo_unreference(&bo);
drm_gem_object_put(&bo->base.base);
drm_gem_object_put(&bo->tbo.base);
}
if (surface)
vmw_surface_unreference(&surface);
@@ -3076,8 +2986,20 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
struct vmw_framebuffer_bo *vfbbo =
container_of(update->vfb, typeof(*vfbbo), base);
ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
update->cpu_blit);
/*
* For screen targets we want a mappable bo, for everything else we want
* accelerated i.e. host backed (vram or gmr) bo. If the display unit
* is not screen target then mob's shouldn't be available.
*/
if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
vmw_bo_placement_set(vfbbo->buffer,
VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
} else {
WARN_ON(update->dev_priv->has_mob);
vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
}
ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
} else {
struct vmw_framebuffer_surface *vfbs =
container_of(update->vfb, typeof(*vfbs), base);

View File

@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -126,7 +126,6 @@ struct vmw_du_update_plane {
struct vmw_framebuffer *vfb;
struct vmw_fence_obj **out_fence;
struct mutex *mutex;
bool cpu_blit;
bool intr;
};
@@ -217,8 +216,6 @@ struct vmw_kms_dirty {
*/
struct vmw_framebuffer {
struct drm_framebuffer base;
int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb);
bool bo;
uint32_t user_handle;
};
@@ -233,7 +230,7 @@ struct vmw_clip_rect {
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
struct vmw_buffer_object *buffer;
struct vmw_bo *buffer;
struct list_head head;
bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
};
@@ -241,7 +238,7 @@ struct vmw_framebuffer_surface {
struct vmw_framebuffer_bo {
struct vmw_framebuffer base;
struct vmw_buffer_object *buffer;
struct vmw_bo *buffer;
};
@@ -273,9 +270,7 @@ struct vmw_crtc_state {
};
struct vmw_cursor_plane_state {
struct ttm_buffer_object *bo;
struct ttm_bo_kmap_obj map;
bool mapped;
struct vmw_bo *bo;
s32 hotspot_x;
s32 hotspot_y;
};
@@ -293,7 +288,7 @@ struct vmw_cursor_plane_state {
struct vmw_plane_state {
struct drm_plane_state base;
struct vmw_surface *surf;
struct vmw_buffer_object *bo;
struct vmw_bo *bo;
int content_fb_type;
unsigned long bo_size;
@@ -346,7 +341,7 @@ struct vmw_connector_state {
struct vmw_cursor_plane {
struct drm_plane base;
struct ttm_buffer_object *cursor_mobs[3];
struct vmw_bo *cursor_mobs[3];
};
/**
@@ -364,7 +359,7 @@ struct vmw_display_unit {
struct vmw_cursor_plane cursor;
struct vmw_surface *cursor_surface;
struct vmw_buffer_object *cursor_bo;
struct vmw_bo *cursor_bo;
size_t cursor_age;
int cursor_x;
@@ -397,7 +392,7 @@ struct vmw_display_unit {
struct vmw_validation_ctx {
struct vmw_resource *res;
struct vmw_buffer_object *buf;
struct vmw_bo *buf;
};
#define vmw_crtc_to_du(x) \
@@ -458,7 +453,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
uint32_t num_clips);
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_buffer_object *bo,
struct vmw_bo *bo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd);
@@ -566,17 +561,15 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc);
int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips,
uint32_t num_clips,
int increment,
bool to_surface,
bool interruptible,
struct drm_crtc *crtc);
int vmw_kms_stdu_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips,
uint32_t num_clips,
int increment,
struct drm_crtc *crtc);
int vmw_du_helper_plane_update(struct vmw_du_update_plane *update);

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
* Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,11 +25,13 @@
*
**************************************************************************/
#include "vmwgfx_bo.h"
#include "vmwgfx_kms.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include "vmwgfx_kms.h"
#define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc)
@@ -134,6 +136,47 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
return 0;
}
/*
* Pin the buffer in a location suitable for access by the
* display system.
*/
static int vmw_ldu_fb_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_bo *buf;
int ret;
buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
if (!buf)
return 0;
WARN_ON(dev_priv->active_display_unit != vmw_du_legacy);
if (dev_priv->active_display_unit == vmw_du_legacy) {
vmw_overlay_pause_all(dev_priv);
ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
vmw_overlay_resume_all(dev_priv);
} else
ret = -EINVAL;
return ret;
}
static int vmw_ldu_fb_unpin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_bo *buf;
buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
if (WARN_ON(!buf))
return 0;
return vmw_bo_unpin(dev_priv, buf, false);
}
static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
struct vmw_legacy_display_unit *ldu)
{
@@ -145,8 +188,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
list_del_init(&ldu->active);
if (--(ld->num_active) == 0) {
BUG_ON(!ld->fb);
if (ld->fb->unpin)
ld->fb->unpin(ld->fb);
WARN_ON(vmw_ldu_fb_unpin(ld->fb));
ld->fb = NULL;
}
@@ -163,11 +205,10 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
BUG_ON(!ld->num_active && ld->fb);
if (vfb != ld->fb) {
if (ld->fb && ld->fb->unpin)
ld->fb->unpin(ld->fb);
if (ld->fb)
WARN_ON(vmw_ldu_fb_unpin(ld->fb));
vmw_svga_enable(vmw_priv);
if (vfb->pin)
vfb->pin(vfb);
WARN_ON(vmw_ldu_fb_pin(vfb));
ld->fb = vfb;
}

Some files were not shown because too many files have changed in this diff Show More