From 0b2d86670a8438f6e98266a86ab5f407c6e2b0b6 Mon Sep 17 00:00:00 2001 From: Nicolas Frattaroli Date: Fri, 16 Jan 2026 13:57:30 +0100 Subject: [PATCH 001/158] drm/panthor: Rework panthor_irq::suspended into panthor_irq::state To deal with the threaded interrupt handler and a suspend action overlapping, the boolean panthor_irq::suspended is not sufficient. Rework it into taking several different values depending on the current state, and check it and set it within the IRQ helper functions. Co-developed-by: Boris Brezillon Signed-off-by: Boris Brezillon Signed-off-by: Nicolas Frattaroli Reviewed-by: Steven Price Reviewed-by: Boris Brezillon Link: https://patch.msgid.link/20260116-panthor-tracepoints-v10-1-d925986e3d1b@collabora.com --- drivers/gpu/drm/panthor/panthor_device.h | 35 +++++++++++++++++++----- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h index f35e52b9546a..8597b388cc40 100644 --- a/drivers/gpu/drm/panthor/panthor_device.h +++ b/drivers/gpu/drm/panthor/panthor_device.h @@ -61,6 +61,17 @@ enum panthor_device_pm_state { PANTHOR_DEVICE_PM_STATE_SUSPENDING, }; +enum panthor_irq_state { + /** @PANTHOR_IRQ_STATE_ACTIVE: IRQ is active and ready to process events. */ + PANTHOR_IRQ_STATE_ACTIVE = 0, + /** @PANTHOR_IRQ_STATE_PROCESSING: IRQ is currently processing events. */ + PANTHOR_IRQ_STATE_PROCESSING, + /** @PANTHOR_IRQ_STATE_SUSPENDED: IRQ is suspended. */ + PANTHOR_IRQ_STATE_SUSPENDED, + /** @PANTHOR_IRQ_STATE_SUSPENDING: IRQ is being suspended. */ + PANTHOR_IRQ_STATE_SUSPENDING, +}; + /** * struct panthor_irq - IRQ data * @@ -76,8 +87,8 @@ struct panthor_irq { /** @mask: Current mask being applied to xxx_INT_MASK. */ u32 mask; - /** @suspended: Set to true when the IRQ is suspended. */ - atomic_t suspended; + /** @state: one of &enum panthor_irq_state reflecting the current state. */ + atomic_t state; }; /** @@ -409,12 +420,17 @@ static irqreturn_t panthor_ ## __name ## _irq_raw_handler(int irq, void *data) { \ struct panthor_irq *pirq = data; \ struct panthor_device *ptdev = pirq->ptdev; \ + enum panthor_irq_state old_state; \ \ - if (atomic_read(&pirq->suspended)) \ - return IRQ_NONE; \ if (!gpu_read(ptdev, __reg_prefix ## _INT_STAT)) \ return IRQ_NONE; \ \ + old_state = atomic_cmpxchg(&pirq->state, \ + PANTHOR_IRQ_STATE_ACTIVE, \ + PANTHOR_IRQ_STATE_PROCESSING); \ + if (old_state != PANTHOR_IRQ_STATE_ACTIVE) \ + return IRQ_NONE; \ + \ gpu_write(ptdev, __reg_prefix ## _INT_MASK, 0); \ return IRQ_WAKE_THREAD; \ } \ @@ -423,6 +439,7 @@ static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *da { \ struct panthor_irq *pirq = data; \ struct panthor_device *ptdev = pirq->ptdev; \ + enum panthor_irq_state old_state; \ irqreturn_t ret = IRQ_NONE; \ \ while (true) { \ @@ -435,7 +452,10 @@ static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *da ret = IRQ_HANDLED; \ } \ \ - if (!atomic_read(&pirq->suspended)) \ + old_state = atomic_cmpxchg(&pirq->state, \ + PANTHOR_IRQ_STATE_PROCESSING, \ + PANTHOR_IRQ_STATE_ACTIVE); \ + if (old_state == PANTHOR_IRQ_STATE_PROCESSING) \ gpu_write(ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \ \ return ret; \ @@ -445,14 +465,15 @@ static inline void panthor_ ## __name ## _irq_suspend(struct panthor_irq *pirq) { \ pirq->mask = 0; \ gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0); \ + atomic_set(&pirq->state, PANTHOR_IRQ_STATE_SUSPENDING); \ synchronize_irq(pirq->irq); \ - atomic_set(&pirq->suspended, true); \ + atomic_set(&pirq->state, PANTHOR_IRQ_STATE_SUSPENDED); \ } \ \ static inline void panthor_ ## __name ## _irq_resume(struct panthor_irq *pirq, u32 mask) \ { \ - atomic_set(&pirq->suspended, false); \ pirq->mask = mask; \ + atomic_set(&pirq->state, PANTHOR_IRQ_STATE_ACTIVE); \ gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, mask); \ gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, mask); \ } \ From c5bf1d4e4473f0f18dfba0266a8fd48cb3700e73 Mon Sep 17 00:00:00 2001 From: Nicolas Frattaroli Date: Fri, 16 Jan 2026 13:57:31 +0100 Subject: [PATCH 002/158] drm/panthor: Extend IRQ helpers for mask modification/restoration The current IRQ helpers do not guarantee mutual exclusion that covers the entire transaction from accessing the mask member and modifying the mask register. This makes it hard, if not impossible, to implement mask modification helpers that may change one of these outside the normal suspend/resume/isr code paths. Add a spinlock to struct panthor_irq that protects both the mask member and register. Acquire it in all code paths that access these, but drop it before processing the threaded handler function. Then, add the aforementioned new helpers: enable_events, and disable_events. They work by ORing and NANDing the mask bits. resume is changed to no longer have a mask passed, as pirq->mask is supposed to be the user-requested mask now, rather than a mirror of the INT_MASK register contents. Users of the resume helper are adjusted accordingly, including a rather painful refactor in panthor_mmu.c. In panthor_mmu.c, the bespoke mask modification is excised, and replaced with enable_events/disable_events in as_enable/as_disable. Co-developed-by: Boris Brezillon Signed-off-by: Boris Brezillon Signed-off-by: Nicolas Frattaroli Reviewed-by: Steven Price Reviewed-by: Boris Brezillon Link: https://patch.msgid.link/20260116-panthor-tracepoints-v10-2-d925986e3d1b@collabora.com --- drivers/gpu/drm/panthor/panthor_device.h | 84 +++++++++++++++++++----- drivers/gpu/drm/panthor/panthor_fw.c | 3 +- drivers/gpu/drm/panthor/panthor_gpu.c | 2 +- drivers/gpu/drm/panthor/panthor_mmu.c | 47 ++++++------- drivers/gpu/drm/panthor/panthor_pwr.c | 2 +- 5 files changed, 97 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h index 8597b388cc40..b6696f73a536 100644 --- a/drivers/gpu/drm/panthor/panthor_device.h +++ b/drivers/gpu/drm/panthor/panthor_device.h @@ -84,9 +84,19 @@ struct panthor_irq { /** @irq: IRQ number. */ int irq; - /** @mask: Current mask being applied to xxx_INT_MASK. */ + /** @mask: Values to write to xxx_INT_MASK if active. */ u32 mask; + /** + * @mask_lock: protects modifications to _INT_MASK and @mask. + * + * In paths where _INT_MASK is updated based on a state + * transition/check, it's crucial for the state update/check to be + * inside the locked section, otherwise it introduces a race window + * leading to potential _INT_MASK inconsistencies. + */ + spinlock_t mask_lock; + /** @state: one of &enum panthor_irq_state reflecting the current state. */ atomic_t state; }; @@ -425,6 +435,7 @@ static irqreturn_t panthor_ ## __name ## _irq_raw_handler(int irq, void *data) if (!gpu_read(ptdev, __reg_prefix ## _INT_STAT)) \ return IRQ_NONE; \ \ + guard(spinlock_irqsave)(&pirq->mask_lock); \ old_state = atomic_cmpxchg(&pirq->state, \ PANTHOR_IRQ_STATE_ACTIVE, \ PANTHOR_IRQ_STATE_PROCESSING); \ @@ -439,10 +450,17 @@ static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *da { \ struct panthor_irq *pirq = data; \ struct panthor_device *ptdev = pirq->ptdev; \ - enum panthor_irq_state old_state; \ irqreturn_t ret = IRQ_NONE; \ \ while (true) { \ + /* It's safe to access pirq->mask without the lock held here. If a new \ + * event gets added to the mask and the corresponding IRQ is pending, \ + * we'll process it right away instead of adding an extra raw -> threaded \ + * round trip. If an event is removed and the status bit is set, it will \ + * be ignored, just like it would have been if the mask had been adjusted \ + * right before the HW event kicks in. TLDR; it's all expected races we're \ + * covered for. \ + */ \ u32 status = gpu_read(ptdev, __reg_prefix ## _INT_RAWSTAT) & pirq->mask; \ \ if (!status) \ @@ -452,30 +470,36 @@ static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *da ret = IRQ_HANDLED; \ } \ \ - old_state = atomic_cmpxchg(&pirq->state, \ - PANTHOR_IRQ_STATE_PROCESSING, \ - PANTHOR_IRQ_STATE_ACTIVE); \ - if (old_state == PANTHOR_IRQ_STATE_PROCESSING) \ - gpu_write(ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \ + scoped_guard(spinlock_irqsave, &pirq->mask_lock) { \ + enum panthor_irq_state old_state; \ + \ + old_state = atomic_cmpxchg(&pirq->state, \ + PANTHOR_IRQ_STATE_PROCESSING, \ + PANTHOR_IRQ_STATE_ACTIVE); \ + if (old_state == PANTHOR_IRQ_STATE_PROCESSING) \ + gpu_write(ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \ + } \ \ return ret; \ } \ \ static inline void panthor_ ## __name ## _irq_suspend(struct panthor_irq *pirq) \ { \ - pirq->mask = 0; \ - gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0); \ - atomic_set(&pirq->state, PANTHOR_IRQ_STATE_SUSPENDING); \ + scoped_guard(spinlock_irqsave, &pirq->mask_lock) { \ + atomic_set(&pirq->state, PANTHOR_IRQ_STATE_SUSPENDING); \ + gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0); \ + } \ synchronize_irq(pirq->irq); \ atomic_set(&pirq->state, PANTHOR_IRQ_STATE_SUSPENDED); \ } \ \ -static inline void panthor_ ## __name ## _irq_resume(struct panthor_irq *pirq, u32 mask) \ +static inline void panthor_ ## __name ## _irq_resume(struct panthor_irq *pirq) \ { \ - pirq->mask = mask; \ + guard(spinlock_irqsave)(&pirq->mask_lock); \ + \ atomic_set(&pirq->state, PANTHOR_IRQ_STATE_ACTIVE); \ - gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, mask); \ - gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, mask); \ + gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, pirq->mask); \ + gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \ } \ \ static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \ @@ -484,13 +508,43 @@ static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \ { \ pirq->ptdev = ptdev; \ pirq->irq = irq; \ - panthor_ ## __name ## _irq_resume(pirq, mask); \ + pirq->mask = mask; \ + spin_lock_init(&pirq->mask_lock); \ + panthor_ ## __name ## _irq_resume(pirq); \ \ return devm_request_threaded_irq(ptdev->base.dev, irq, \ panthor_ ## __name ## _irq_raw_handler, \ panthor_ ## __name ## _irq_threaded_handler, \ IRQF_SHARED, KBUILD_MODNAME "-" # __name, \ pirq); \ +} \ + \ +static inline void panthor_ ## __name ## _irq_enable_events(struct panthor_irq *pirq, u32 mask) \ +{ \ + guard(spinlock_irqsave)(&pirq->mask_lock); \ + pirq->mask |= mask; \ + \ + /* The only situation where we need to write the new mask is if the IRQ is active. \ + * If it's being processed, the mask will be restored for us in _irq_threaded_handler() \ + * on the PROCESSING -> ACTIVE transition. \ + * If the IRQ is suspended/suspending, the mask is restored at resume time. \ + */ \ + if (atomic_read(&pirq->state) == PANTHOR_IRQ_STATE_ACTIVE) \ + gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \ +} \ + \ +static inline void panthor_ ## __name ## _irq_disable_events(struct panthor_irq *pirq, u32 mask)\ +{ \ + guard(spinlock_irqsave)(&pirq->mask_lock); \ + pirq->mask &= ~mask; \ + \ + /* The only situation where we need to write the new mask is if the IRQ is active. \ + * If it's being processed, the mask will be restored for us in _irq_threaded_handler() \ + * on the PROCESSING -> ACTIVE transition. \ + * If the IRQ is suspended/suspending, the mask is restored at resume time. \ + */ \ + if (atomic_read(&pirq->state) == PANTHOR_IRQ_STATE_ACTIVE) \ + gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \ } extern struct workqueue_struct *panthor_cleanup_wq; diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index a64ec8756bed..0e46625f7621 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -1080,7 +1080,8 @@ static int panthor_fw_start(struct panthor_device *ptdev) bool timedout = false; ptdev->fw->booted = false; - panthor_job_irq_resume(&ptdev->fw->irq, ~0); + panthor_job_irq_enable_events(&ptdev->fw->irq, ~0); + panthor_job_irq_resume(&ptdev->fw->irq); gpu_write(ptdev, MCU_CONTROL, MCU_CONTROL_AUTO); if (!wait_event_timeout(ptdev->fw->req_waitqueue, diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c index 057e167468d0..9304469a711a 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.c +++ b/drivers/gpu/drm/panthor/panthor_gpu.c @@ -395,7 +395,7 @@ void panthor_gpu_suspend(struct panthor_device *ptdev) */ void panthor_gpu_resume(struct panthor_device *ptdev) { - panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK); + panthor_gpu_irq_resume(&ptdev->gpu->irq); panthor_hw_l2_power_on(ptdev); } diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index b888fff05efe..912c833b4980 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -562,9 +562,21 @@ static u64 pack_region_range(struct panthor_device *ptdev, u64 *region_start, u6 return region_width | *region_start; } +static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as) +{ + return BIT(as); +} + +/* Forward declaration to call helpers within as_enable/disable */ +static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status); +PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler); + static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr, u64 transtab, u64 transcfg, u64 memattr) { + panthor_mmu_irq_enable_events(&ptdev->mmu->irq, + panthor_mmu_as_fault_mask(ptdev, as_nr)); + gpu_write64(ptdev, AS_TRANSTAB(as_nr), transtab); gpu_write64(ptdev, AS_MEMATTR(as_nr), memattr); gpu_write64(ptdev, AS_TRANSCFG(as_nr), transcfg); @@ -580,6 +592,9 @@ static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr, lockdep_assert_held(&ptdev->mmu->as.slots_lock); + panthor_mmu_irq_disable_events(&ptdev->mmu->irq, + panthor_mmu_as_fault_mask(ptdev, as_nr)); + /* Flush+invalidate RW caches, invalidate RO ones. */ ret = panthor_gpu_flush_caches(ptdev, CACHE_CLEAN | CACHE_INV, CACHE_CLEAN | CACHE_INV, CACHE_INV); @@ -612,11 +627,6 @@ static u32 panthor_mmu_fault_mask(struct panthor_device *ptdev, u32 value) return value & GENMASK(15, 0); } -static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as) -{ - return BIT(as); -} - /** * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults * @vm: VM to check. @@ -670,6 +680,7 @@ int panthor_vm_active(struct panthor_vm *vm) struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg; int ret = 0, as, cookie; u64 transtab, transcfg; + u32 fault_mask; if (!drm_dev_enter(&ptdev->base, &cookie)) return -ENODEV; @@ -743,14 +754,13 @@ int panthor_vm_active(struct panthor_vm *vm) /* If the VM is re-activated, we clear the fault. */ vm->unhandled_fault = false; - /* Unhandled pagefault on this AS, clear the fault and re-enable interrupts - * before enabling the AS. + /* Unhandled pagefault on this AS, clear the fault and enable the AS, + * which re-enables interrupts. */ - if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) { - gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as)); - ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as); - ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as); - gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask); + fault_mask = panthor_mmu_as_fault_mask(ptdev, as); + if (ptdev->mmu->as.faulty_mask & fault_mask) { + gpu_write(ptdev, MMU_INT_CLEAR, fault_mask); + ptdev->mmu->as.faulty_mask &= ~fault_mask; } /* The VM update is guarded by ::op_lock, which we take at the beginning @@ -1708,7 +1718,6 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status) while (status) { u32 as = ffs(status | (status >> 16)) - 1; u32 mask = panthor_mmu_as_fault_mask(ptdev, as); - u32 new_int_mask; u64 addr; u32 fault_status; u32 exception_type; @@ -1726,8 +1735,6 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status) mutex_lock(&ptdev->mmu->as.slots_lock); ptdev->mmu->as.faulty_mask |= mask; - new_int_mask = - panthor_mmu_fault_mask(ptdev, ~ptdev->mmu->as.faulty_mask); /* terminal fault, print info about the fault */ drm_err(&ptdev->base, @@ -1751,11 +1758,6 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status) */ gpu_write(ptdev, MMU_INT_CLEAR, mask); - /* Ignore MMU interrupts on this AS until it's been - * re-enabled. - */ - ptdev->mmu->irq.mask = new_int_mask; - if (ptdev->mmu->as.slots[as].vm) ptdev->mmu->as.slots[as].vm->unhandled_fault = true; @@ -1770,7 +1772,6 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status) if (has_unhandled_faults) panthor_sched_report_mmu_fault(ptdev); } -PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler); /** * panthor_mmu_suspend() - Suspend the MMU logic @@ -1815,7 +1816,7 @@ void panthor_mmu_resume(struct panthor_device *ptdev) ptdev->mmu->as.faulty_mask = 0; mutex_unlock(&ptdev->mmu->as.slots_lock); - panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0)); + panthor_mmu_irq_resume(&ptdev->mmu->irq); } /** @@ -1869,7 +1870,7 @@ void panthor_mmu_post_reset(struct panthor_device *ptdev) mutex_unlock(&ptdev->mmu->as.slots_lock); - panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0)); + panthor_mmu_irq_resume(&ptdev->mmu->irq); /* Restart the VM_BIND queues. */ mutex_lock(&ptdev->mmu->vm.lock); diff --git a/drivers/gpu/drm/panthor/panthor_pwr.c b/drivers/gpu/drm/panthor/panthor_pwr.c index 57cfc7ce715b..ed3b2b4479ca 100644 --- a/drivers/gpu/drm/panthor/panthor_pwr.c +++ b/drivers/gpu/drm/panthor/panthor_pwr.c @@ -545,5 +545,5 @@ void panthor_pwr_resume(struct panthor_device *ptdev) if (!ptdev->pwr) return; - panthor_pwr_irq_resume(&ptdev->pwr->irq, PWR_INTERRUPTS_MASK); + panthor_pwr_irq_resume(&ptdev->pwr->irq); } From 52ebfd8d2feb1f37bc75c6b662b620323de676ea Mon Sep 17 00:00:00 2001 From: Nicolas Frattaroli Date: Fri, 16 Jan 2026 13:57:32 +0100 Subject: [PATCH 003/158] drm/panthor: Add tracepoint for hardware utilisation changes Mali GPUs have three registers that indicate which parts of the hardware are powered at any moment. These take the form of bitmaps. In the case of SHADER_READY for example, a high bit indicates that the shader core corresponding to that bit index is powered on. These bitmaps aren't solely contiguous bits, as it's common to have holes in the sequence of shader core indices, and the actual set of which cores are present is defined by the "shader present" register. When the GPU finishes a power state transition, it fires a GPU_IRQ_POWER_CHANGED_ALL interrupt. After such an interrupt is received, the _READY registers will contain new interesting data. During power transitions, the GPU_IRQ_POWER_CHANGED interrupt will fire, and the registers will likewise contain potentially changed data. This is not to be confused with the PWR_IRQ_POWER_CHANGED_ALL interrupt, which is something related to Mali v14+'s power control logic. The _READY registers and corresponding interrupts are already available in v9 and onwards. Expose the data as a tracepoint to userspace. This allows users to debug various scenarios and gather interesting information, such as: knowing how much hardware is lit up at any given time, correlating graphics corruption with a specific powered shader core, measuring when hardware is allowed to go to a powered off state again, and so on. The registration/unregistration functions for the tracepoint go through a wrapper in panthor_hw.c, so that v14+ can implement the same tracepoint by adding its hardware specific IRQ on/off callbacks to the panthor_hw.ops member. Signed-off-by: Nicolas Frattaroli Reviewed-by: Steven Price Reviewed-by: Boris Brezillon Link: https://patch.msgid.link/20260116-panthor-tracepoints-v10-3-d925986e3d1b@collabora.com Signed-off-by: Boris Brezillon --- drivers/gpu/drm/panthor/panthor_gpu.c | 28 +++++++++++ drivers/gpu/drm/panthor/panthor_gpu.h | 2 + drivers/gpu/drm/panthor/panthor_hw.c | 62 +++++++++++++++++++++++++ drivers/gpu/drm/panthor/panthor_hw.h | 8 ++++ drivers/gpu/drm/panthor/panthor_trace.h | 58 +++++++++++++++++++++++ 5 files changed, 158 insertions(+) create mode 100644 drivers/gpu/drm/panthor/panthor_trace.h diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c index 9304469a711a..2ab444ee8c71 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.c +++ b/drivers/gpu/drm/panthor/panthor_gpu.c @@ -22,6 +22,9 @@ #include "panthor_hw.h" #include "panthor_regs.h" +#define CREATE_TRACE_POINTS +#include "panthor_trace.h" + /** * struct panthor_gpu - GPU block management data. */ @@ -48,6 +51,9 @@ struct panthor_gpu { GPU_IRQ_RESET_COMPLETED | \ GPU_IRQ_CLEAN_CACHES_COMPLETED) +#define GPU_POWER_INTERRUPTS_MASK \ + (GPU_IRQ_POWER_CHANGED | GPU_IRQ_POWER_CHANGED_ALL) + static void panthor_gpu_coherency_set(struct panthor_device *ptdev) { gpu_write(ptdev, GPU_COHERENCY_PROTOCOL, @@ -80,6 +86,12 @@ static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status) { gpu_write(ptdev, GPU_INT_CLEAR, status); + if (tracepoint_enabled(gpu_power_status) && (status & GPU_POWER_INTERRUPTS_MASK)) + trace_gpu_power_status(ptdev->base.dev, + gpu_read64(ptdev, SHADER_READY), + gpu_read64(ptdev, TILER_READY), + gpu_read64(ptdev, L2_READY)); + if (status & GPU_IRQ_FAULT) { u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS); u64 address = gpu_read64(ptdev, GPU_FAULT_ADDR); @@ -157,6 +169,22 @@ int panthor_gpu_init(struct panthor_device *ptdev) return 0; } +int panthor_gpu_power_changed_on(struct panthor_device *ptdev) +{ + guard(pm_runtime_active)(ptdev->base.dev); + + panthor_gpu_irq_enable_events(&ptdev->gpu->irq, GPU_POWER_INTERRUPTS_MASK); + + return 0; +} + +void panthor_gpu_power_changed_off(struct panthor_device *ptdev) +{ + guard(pm_runtime_active)(ptdev->base.dev); + + panthor_gpu_irq_disable_events(&ptdev->gpu->irq, GPU_POWER_INTERRUPTS_MASK); +} + /** * panthor_gpu_block_power_off() - Power-off a specific block of the GPU * @ptdev: Device. diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h index 12e66f48ced1..12c263a39928 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.h +++ b/drivers/gpu/drm/panthor/panthor_gpu.h @@ -51,5 +51,7 @@ int panthor_gpu_l2_power_on(struct panthor_device *ptdev); int panthor_gpu_flush_caches(struct panthor_device *ptdev, u32 l2, u32 lsc, u32 other); int panthor_gpu_soft_reset(struct panthor_device *ptdev); +void panthor_gpu_power_changed_off(struct panthor_device *ptdev); +int panthor_gpu_power_changed_on(struct panthor_device *ptdev); #endif diff --git a/drivers/gpu/drm/panthor/panthor_hw.c b/drivers/gpu/drm/panthor/panthor_hw.c index 80c521784cd3..d135aa6724fa 100644 --- a/drivers/gpu/drm/panthor/panthor_hw.c +++ b/drivers/gpu/drm/panthor/panthor_hw.c @@ -2,6 +2,8 @@ /* Copyright 2025 ARM Limited. All rights reserved. */ #include +#include + #include #include "panthor_device.h" @@ -30,6 +32,8 @@ static struct panthor_hw panthor_hw_arch_v10 = { .soft_reset = panthor_gpu_soft_reset, .l2_power_off = panthor_gpu_l2_power_off, .l2_power_on = panthor_gpu_l2_power_on, + .power_changed_off = panthor_gpu_power_changed_off, + .power_changed_on = panthor_gpu_power_changed_on, }, }; @@ -54,6 +58,64 @@ static struct panthor_hw_entry panthor_hw_match[] = { }, }; +static int panthor_hw_set_power_tracing(struct device *dev, void *data) +{ + struct panthor_device *ptdev = dev_get_drvdata(dev); + + if (!ptdev) + return -ENODEV; + + if (!ptdev->hw) + return 0; + + if (data) { + if (ptdev->hw->ops.power_changed_on) + return ptdev->hw->ops.power_changed_on(ptdev); + } else { + if (ptdev->hw->ops.power_changed_off) + ptdev->hw->ops.power_changed_off(ptdev); + } + + return 0; +} + +int panthor_hw_power_status_register(void) +{ + struct device_driver *drv; + int ret; + + drv = driver_find("panthor", &platform_bus_type); + if (!drv) + return -ENODEV; + + ret = driver_for_each_device(drv, NULL, (void *)true, + panthor_hw_set_power_tracing); + + return ret; +} + +void panthor_hw_power_status_unregister(void) +{ + struct device_driver *drv; + int ret; + + drv = driver_find("panthor", &platform_bus_type); + if (!drv) + return; + + ret = driver_for_each_device(drv, NULL, NULL, panthor_hw_set_power_tracing); + + /* + * Ideally, it'd be possible to ask driver_for_each_device to hand us + * another "start" to keep going after the failing device, but it + * doesn't do that. Minor inconvenience in what is probably a bad day + * on the computer already though. + */ + if (ret) + pr_warn("Couldn't mask power IRQ for at least one device: %pe\n", + ERR_PTR(ret)); +} + static char *get_gpu_model_name(struct panthor_device *ptdev) { const u32 gpu_id = ptdev->gpu_info.gpu_id; diff --git a/drivers/gpu/drm/panthor/panthor_hw.h b/drivers/gpu/drm/panthor/panthor_hw.h index 56c68c1e9c26..2c28aea82841 100644 --- a/drivers/gpu/drm/panthor/panthor_hw.h +++ b/drivers/gpu/drm/panthor/panthor_hw.h @@ -19,6 +19,12 @@ struct panthor_hw_ops { /** @l2_power_on: L2 power on function pointer */ int (*l2_power_on)(struct panthor_device *ptdev); + + /** @power_changed_on: Start listening to power change IRQs */ + int (*power_changed_on)(struct panthor_device *ptdev); + + /** @power_changed_off: Stop listening to power change IRQs */ + void (*power_changed_off)(struct panthor_device *ptdev); }; /** @@ -32,6 +38,8 @@ struct panthor_hw { }; int panthor_hw_init(struct panthor_device *ptdev); +int panthor_hw_power_status_register(void); +void panthor_hw_power_status_unregister(void); static inline int panthor_hw_soft_reset(struct panthor_device *ptdev) { diff --git a/drivers/gpu/drm/panthor/panthor_trace.h b/drivers/gpu/drm/panthor/panthor_trace.h new file mode 100644 index 000000000000..5bd420894745 --- /dev/null +++ b/drivers/gpu/drm/panthor/panthor_trace.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ +/* Copyright 2025 Collabora ltd. */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM panthor + +#if !defined(__PANTHOR_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ) +#define __PANTHOR_TRACE_H__ + +#include +#include + +#include "panthor_hw.h" + +/** + * gpu_power_status - called whenever parts of GPU hardware are turned on or off + * @dev: pointer to the &struct device, for printing the device name + * @shader_bitmap: bitmap where a high bit indicates the shader core at a given + * bit index is on, and a low bit indicates a shader core is + * either powered off or absent + * @tiler_bitmap: bitmap where a high bit indicates the tiler unit at a given + * bit index is on, and a low bit indicates a tiler unit is + * either powered off or absent + * @l2_bitmap: bitmap where a high bit indicates the L2 cache at a given bit + * index is on, and a low bit indicates the L2 cache is either + * powered off or absent + */ +TRACE_EVENT_FN(gpu_power_status, + TP_PROTO(const struct device *dev, u64 shader_bitmap, u64 tiler_bitmap, + u64 l2_bitmap), + TP_ARGS(dev, shader_bitmap, tiler_bitmap, l2_bitmap), + TP_STRUCT__entry( + __string(dev_name, dev_name(dev)) + __field(u64, shader_bitmap) + __field(u64, tiler_bitmap) + __field(u64, l2_bitmap) + ), + TP_fast_assign( + __assign_str(dev_name); + __entry->shader_bitmap = shader_bitmap; + __entry->tiler_bitmap = tiler_bitmap; + __entry->l2_bitmap = l2_bitmap; + ), + TP_printk("%s: shader_bitmap=0x%llx tiler_bitmap=0x%llx l2_bitmap=0x%llx", + __get_str(dev_name), __entry->shader_bitmap, __entry->tiler_bitmap, + __entry->l2_bitmap + ), + panthor_hw_power_status_register, panthor_hw_power_status_unregister +); + +#endif /* __PANTHOR_TRACE_H__ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE panthor_trace + +#include From 15bd2f5d52de890f745ac0c60a44cd27d095bb0d Mon Sep 17 00:00:00 2001 From: Nicolas Frattaroli Date: Fri, 16 Jan 2026 13:57:33 +0100 Subject: [PATCH 004/158] drm/panthor: Add gpu_job_irq tracepoint Mali's CSF firmware triggers the job IRQ whenever there's new firmware events for processing. While this can be a global event (BIT(31) of the status register), it's usually an event relating to a command stream group (the other bit indices). Panthor throws these events onto a workqueue for processing outside the IRQ handler. It's therefore useful to have an instrumented tracepoint that goes beyond the generic IRQ tracepoint for this specific case, as it can be augmented with additional data, namely the events bit mask. This can then be used to debug problems relating to GPU jobs events not being processed quickly enough. The duration_ns field can be used to work backwards from when the tracepoint fires (at the end of the IRQ handler) to figure out when the interrupt itself landed, providing not just information on how long the work queueing took, but also when the actual interrupt itself arrived. With this information in hand, the IRQ handler itself being slow can be excluded as a possible source of problems, and attention can be directed to the workqueue processing instead. Signed-off-by: Nicolas Frattaroli Reviewed-by: Steven Price Reviewed-by: Boris Brezillon Link: https://patch.msgid.link/20260116-panthor-tracepoints-v10-4-d925986e3d1b@collabora.com Signed-off-by: Boris Brezillon --- drivers/gpu/drm/panthor/panthor_fw.c | 13 ++++++++++++ drivers/gpu/drm/panthor/panthor_trace.h | 28 +++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 0e46625f7621..5a904ca64525 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -26,6 +26,7 @@ #include "panthor_mmu.h" #include "panthor_regs.h" #include "panthor_sched.h" +#include "panthor_trace.h" #define CSF_FW_NAME "mali_csffw.bin" @@ -1060,6 +1061,12 @@ static void panthor_fw_init_global_iface(struct panthor_device *ptdev) static void panthor_job_irq_handler(struct panthor_device *ptdev, u32 status) { + u32 duration; + u64 start = 0; + + if (tracepoint_enabled(gpu_job_irq)) + start = ktime_get_ns(); + gpu_write(ptdev, JOB_INT_CLEAR, status); if (!ptdev->fw->booted && (status & JOB_INT_GLOBAL_IF)) @@ -1072,6 +1079,12 @@ static void panthor_job_irq_handler(struct panthor_device *ptdev, u32 status) return; panthor_sched_report_fw_events(ptdev, status); + + if (tracepoint_enabled(gpu_job_irq) && start) { + if (check_sub_overflow(ktime_get_ns(), start, &duration)) + duration = U32_MAX; + trace_gpu_job_irq(ptdev->base.dev, status, duration); + } } PANTHOR_IRQ_HANDLER(job, JOB, panthor_job_irq_handler); diff --git a/drivers/gpu/drm/panthor/panthor_trace.h b/drivers/gpu/drm/panthor/panthor_trace.h index 5bd420894745..6ffeb4fe6599 100644 --- a/drivers/gpu/drm/panthor/panthor_trace.h +++ b/drivers/gpu/drm/panthor/panthor_trace.h @@ -48,6 +48,34 @@ TRACE_EVENT_FN(gpu_power_status, panthor_hw_power_status_register, panthor_hw_power_status_unregister ); +/** + * gpu_job_irq - called after a job interrupt from firmware completes + * @dev: pointer to the &struct device, for printing the device name + * @events: bitmask of BIT(CSG id) | BIT(31) for a global event + * @duration_ns: Nanoseconds between job IRQ handler entry and exit + * + * The panthor_job_irq_handler() function instrumented by this tracepoint exits + * once it has queued the firmware interrupts for processing, not when the + * firmware interrupts are fully processed. This tracepoint allows for debugging + * issues with delays in the workqueue's processing of events. + */ +TRACE_EVENT(gpu_job_irq, + TP_PROTO(const struct device *dev, u32 events, u32 duration_ns), + TP_ARGS(dev, events, duration_ns), + TP_STRUCT__entry( + __string(dev_name, dev_name(dev)) + __field(u32, events) + __field(u32, duration_ns) + ), + TP_fast_assign( + __assign_str(dev_name); + __entry->events = events; + __entry->duration_ns = duration_ns; + ), + TP_printk("%s: events=0x%x duration_ns=%d", __get_str(dev_name), + __entry->events, __entry->duration_ns) +); + #endif /* __PANTHOR_TRACE_H__ */ #undef TRACE_INCLUDE_PATH From f08f665f8cda9520d98ee24545d306a92f386616 Mon Sep 17 00:00:00 2001 From: Nicolas Frattaroli Date: Fri, 23 Jan 2026 14:11:44 +0100 Subject: [PATCH 005/158] drm/panthor: Add src path to includes of panthor_gpu.o The file that defines CREATE_TRACE_POINTS needs to have the src directory added to its include paths, or else the build may fail, as define_trace.h won't be able to find the included trace file. Add it to the Makefile's CFLAGS for panthor_gpu.o. Fixes: 52ebfd8d2feb ("drm/panthor: Add tracepoint for hardware utilisation changes") Reported-by: Sasha Levin Closes: https://lore.kernel.org/r/aXLyzd6pMmexwWlY@laps/ Signed-off-by: Nicolas Frattaroli Reviewed-by: Liviu Dudau Reviewed-by: Steven Price Reviewed-by: Boris Brezillon Link: https://patch.msgid.link/20260123-panthor-tracepoint-build-fix-v1-1-7d67b7c0ae9b@collabora.com Signed-off-by: Boris Brezillon --- drivers/gpu/drm/panthor/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/panthor/Makefile b/drivers/gpu/drm/panthor/Makefile index 753a32c446df..dd15d52a88ba 100644 --- a/drivers/gpu/drm/panthor/Makefile +++ b/drivers/gpu/drm/panthor/Makefile @@ -14,3 +14,5 @@ panthor-y := \ panthor_sched.o obj-$(CONFIG_DRM_PANTHOR) += panthor.o + +CFLAGS_panthor_gpu.o := -I$(src) From 2cb217301e0df17f7107a1b0941b28d4053eae8b Mon Sep 17 00:00:00 2001 From: Terry Hsiao Date: Thu, 22 Jan 2026 11:29:04 +0800 Subject: [PATCH 006/158] drm/panel-edp: Add AUO B116XAT04.1 (HW: 1A) Add support for the AUO - B116XAT04.1 (HW: 1A) panel. This panel is used on MT8186 Chromebooks The raw EDID: 00 ff ff ff ff ff ff 00 06 af ba 89 00 00 00 00 0c 23 01 04 95 1a 0e 78 02 9e a5 96 59 58 96 28 1b 50 54 00 00 00 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 ce 1d 56 ea 50 00 1a 30 30 20 46 00 00 90 10 00 00 18 df 13 56 ea 50 00 1a 30 30 20 46 00 00 90 10 00 00 18 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 00 10 48 ff 0f 3c 7d 0c 0a 2a 7d 20 20 20 00 21 Signed-off-by: Terry Hsiao Reviewed-by: Douglas Anderson Signed-off-by: Douglas Anderson Link: https://patch.msgid.link/20260122032904.3933-1-terry_hsiao@compal.corp-partner.google.com --- drivers/gpu/drm/panel/panel-edp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 679f4af5246d..108569490ed5 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1915,6 +1915,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x73aa, &delay_200_500_e50, "B116XTN02.3"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x89ba, &delay_200_500_e50, "B116XAT04.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x8bba, &delay_200_500_e50, "B140UAN08.5"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"), From 68e28facbc8ab3e701e1814323d397a75b400865 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Thu, 15 Jan 2026 09:50:48 -0300 Subject: [PATCH 007/158] drm/panel: ilitek-ili9882t: Select DRM_DISPLAY_DSC_HELPER MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The panel-ilitek-ili9882t driver uses drm_dsc_pps_payload_pack() which is provided by the DRM_DISPLAY_DSC_HELPER. Add the missing Kconfig select to fix the following build error: ERROR: modpost: "drm_dsc_pps_payload_pack" [drivers/gpu/drm/panel/panel-ilitek-ili9882t.ko] undefined! Fixes: 65ce1f5834e9 ("drm/panel: ilitek-ili9882t: Switch Tianma TL121BVMS07 to DSC 120Hz mode") Reviewed-by: Neil Armstrong Tested-by: Nicolas Frattaroli Reviewed-by: Nicolas Frattaroli Link: https://patch.msgid.link/20260115125136.64866-1-mcanal@igalia.com Signed-off-by: Maíra Canal --- drivers/gpu/drm/panel/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index a0fe6069e5e4..ae25f003aa33 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -280,6 +280,7 @@ config DRM_PANEL_ILITEK_ILI9882T depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE + select DRM_DISPLAY_DSC_HELPER help Say Y if you want to enable support for panels based on the Ilitek ILI9882t controller. From ef246da8e63c486780dca4d9b4d79589cbebf5e5 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sat, 24 Jan 2026 21:14:13 +0200 Subject: [PATCH 008/158] dma-buf: Rename .move_notify() callback to a clearer identifier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename the .move_notify() callback to .invalidate_mappings() to make its purpose explicit and highlight that it is responsible for invalidating existing mappings. Suggested-by: Christian König Reviewed-by: Christian König Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/20260124-dmabuf-revoke-v5-1-f98fca917e96@nvidia.com Signed-off-by: Christian König --- drivers/dma-buf/dma-buf.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 4 ++-- drivers/gpu/drm/virtio/virtgpu_prime.c | 2 +- drivers/gpu/drm/xe/tests/xe_dma_buf.c | 6 +++--- drivers/gpu/drm/xe/xe_dma_buf.c | 2 +- drivers/infiniband/core/umem_dmabuf.c | 4 ++-- drivers/infiniband/hw/mlx5/mr.c | 2 +- drivers/iommu/iommufd/pages.c | 2 +- include/linux/dma-buf.h | 6 +++--- 9 files changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 77555096e4c7..cc9b88214d97 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1017,7 +1017,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, if (WARN_ON(!dmabuf || !dev)) return ERR_PTR(-EINVAL); - if (WARN_ON(importer_ops && !importer_ops->move_notify)) + if (WARN_ON(importer_ops && !importer_ops->invalidate_mappings)) return ERR_PTR(-EINVAL); attach = kzalloc(sizeof(*attach), GFP_KERNEL); @@ -1130,7 +1130,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF"); * * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move * any mapping of @attach again and inform the importer through - * &dma_buf_attach_ops.move_notify. + * &dma_buf_attach_ops.invalidate_mappings. */ void dma_buf_unpin(struct dma_buf_attachment *attach) { @@ -1338,7 +1338,7 @@ void dma_buf_move_notify(struct dma_buf *dmabuf) list_for_each_entry(attach, &dmabuf->attachments, node) if (attach->importer_ops) - attach->importer_ops->move_notify(attach); + attach->importer_ops->invalidate_mappings(attach); } EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index e22cfa7c6d32..863454148b28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -450,7 +450,7 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) } /** - * amdgpu_dma_buf_move_notify - &attach.move_notify implementation + * amdgpu_dma_buf_move_notify - &attach.invalidate_mappings implementation * * @attach: the DMA-buf attachment * @@ -521,7 +521,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = { .allow_peer2peer = true, - .move_notify = amdgpu_dma_buf_move_notify + .invalidate_mappings = amdgpu_dma_buf_move_notify }; /** diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index ce49282198cb..19c78dd2ca77 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -288,7 +288,7 @@ static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = { .allow_peer2peer = true, - .move_notify = virtgpu_dma_buf_move_notify + .invalidate_mappings = virtgpu_dma_buf_move_notify }; struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c index 5df98de5ba3c..1f2cca5c2f81 100644 --- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c @@ -23,7 +23,7 @@ static bool p2p_enabled(struct dma_buf_test_params *params) static bool is_dynamic(struct dma_buf_test_params *params) { return IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) && params->attach_ops && - params->attach_ops->move_notify; + params->attach_ops->invalidate_mappings; } static void check_residency(struct kunit *test, struct xe_bo *exported, @@ -60,7 +60,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported, /* * Evict exporter. Evicting the exported bo will - * evict also the imported bo through the move_notify() functionality if + * evict also the imported bo through the invalidate_mappings() functionality if * importer is on a different device. If they're on the same device, * the exporter and the importer should be the same bo. */ @@ -198,7 +198,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe) static const struct dma_buf_attach_ops nop2p_attach_ops = { .allow_peer2peer = false, - .move_notify = xe_dma_buf_move_notify + .invalidate_mappings = xe_dma_buf_move_notify }; /* diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c index 54e42960daad..2e167b29d0c9 100644 --- a/drivers/gpu/drm/xe/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/xe_dma_buf.c @@ -287,7 +287,7 @@ static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach) static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = { .allow_peer2peer = true, - .move_notify = xe_dma_buf_move_notify + .invalidate_mappings = xe_dma_buf_move_notify }; #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c index 0ec2e4120cc9..d77a739cfe7a 100644 --- a/drivers/infiniband/core/umem_dmabuf.c +++ b/drivers/infiniband/core/umem_dmabuf.c @@ -129,7 +129,7 @@ ib_umem_dmabuf_get_with_dma_device(struct ib_device *device, if (check_add_overflow(offset, (unsigned long)size, &end)) return ret; - if (unlikely(!ops || !ops->move_notify)) + if (unlikely(!ops || !ops->invalidate_mappings)) return ret; dmabuf = dma_buf_get(fd); @@ -195,7 +195,7 @@ ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach) static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = { .allow_peer2peer = true, - .move_notify = ib_umem_dmabuf_unsupported_move_notify, + .invalidate_mappings = ib_umem_dmabuf_unsupported_move_notify, }; struct ib_umem_dmabuf * diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 325fa04cbe8a..97099d3b1688 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1620,7 +1620,7 @@ static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach) static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = { .allow_peer2peer = 1, - .move_notify = mlx5_ib_dmabuf_invalidate_cb, + .invalidate_mappings = mlx5_ib_dmabuf_invalidate_cb, }; static struct ib_mr * diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c index dbe51ecb9a20..76f900fa1687 100644 --- a/drivers/iommu/iommufd/pages.c +++ b/drivers/iommu/iommufd/pages.c @@ -1451,7 +1451,7 @@ static void iopt_revoke_notify(struct dma_buf_attachment *attach) static struct dma_buf_attach_ops iopt_dmabuf_attach_revoke_ops = { .allow_peer2peer = true, - .move_notify = iopt_revoke_notify, + .invalidate_mappings = iopt_revoke_notify, }; /* diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 91f4939db89b..d9ee4499b37d 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -407,7 +407,7 @@ struct dma_buf { * through the device. * * - Dynamic importers should set fences for any access that they can't - * disable immediately from their &dma_buf_attach_ops.move_notify + * disable immediately from their &dma_buf_attach_ops.invalidate_mappings * callback. * * IMPORTANT: @@ -446,7 +446,7 @@ struct dma_buf_attach_ops { bool allow_peer2peer; /** - * @move_notify: [optional] notification that the DMA-buf is moving + * @invalidate_mappings: [optional] notification that the DMA-buf is moving * * If this callback is provided the framework can avoid pinning the * backing store while mappings exists. @@ -463,7 +463,7 @@ struct dma_buf_attach_ops { * New mappings can be created after this callback returns, and will * point to the new location of the DMA-buf. */ - void (*move_notify)(struct dma_buf_attachment *attach); + void (*invalidate_mappings)(struct dma_buf_attachment *attach); }; /** From 95308225e5baeaae1e313816059c59a0036ab6b2 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sat, 24 Jan 2026 21:14:14 +0200 Subject: [PATCH 009/158] dma-buf: Rename dma_buf_move_notify() to dma_buf_invalidate_mappings() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Along with renaming the .move_notify() callback, rename the corresponding dma-buf core function. This makes the expected behavior clear to exporters calling this function. Signed-off-by: Leon Romanovsky Reviewed-by: Christian König Link: https://lore.kernel.org/r/20260124-dmabuf-revoke-v5-2-f98fca917e96@nvidia.com Signed-off-by: Christian König --- drivers/dma-buf/dma-buf.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/xe/xe_bo.c | 2 +- drivers/iommu/iommufd/selftest.c | 2 +- drivers/vfio/pci/vfio_pci_dmabuf.c | 4 ++-- include/linux/dma-buf.h | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index cc9b88214d97..1c257607a623 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -981,7 +981,7 @@ dma_buf_pin_on_map(struct dma_buf_attachment *attach) * 3. Exporters must hold the dma-buf reservation lock when calling these * functions: * - * - dma_buf_move_notify() + * - dma_buf_invalidate_mappings() */ /** @@ -1323,14 +1323,14 @@ void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF"); /** - * dma_buf_move_notify - notify attachments that DMA-buf is moving + * dma_buf_invalidate_mappings - notify attachments that DMA-buf is moving * * @dmabuf: [in] buffer which is moving * * Informs all attachments that they need to destroy and recreate all their * mappings. */ -void dma_buf_move_notify(struct dma_buf *dmabuf) +void dma_buf_invalidate_mappings(struct dma_buf *dmabuf) { struct dma_buf_attachment *attach; @@ -1340,7 +1340,7 @@ void dma_buf_move_notify(struct dma_buf *dmabuf) if (attach->importer_ops) attach->importer_ops->invalidate_mappings(attach); } -EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF"); +EXPORT_SYMBOL_NS_GPL(dma_buf_invalidate_mappings, "DMA_BUF"); /** * DOC: cpu access diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e08f58de4b17..f73dc99d1887 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1270,7 +1270,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) && old_mem && old_mem->mem_type != TTM_PL_SYSTEM) - dma_buf_move_notify(abo->tbo.base.dma_buf); + dma_buf_invalidate_mappings(abo->tbo.base.dma_buf); /* move_notify is called before move happens */ trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1, diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index b0bd31d14bb9..94712b05edff 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -819,7 +819,7 @@ static int xe_bo_move_notify(struct xe_bo *bo, /* Don't call move_notify() for imported dma-bufs. */ if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach) - dma_buf_move_notify(ttm_bo->base.dma_buf); + dma_buf_invalidate_mappings(ttm_bo->base.dma_buf); /* * TTM has already nuked the mmap for us (see ttm_bo_unmap_virtual), diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index c4322fd26f93..fd47953db4a3 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -2073,7 +2073,7 @@ static int iommufd_test_dmabuf_revoke(struct iommufd_ucmd *ucmd, int fd, priv = dmabuf->priv; dma_resv_lock(dmabuf->resv, NULL); priv->revoked = revoked; - dma_buf_move_notify(dmabuf); + dma_buf_invalidate_mappings(dmabuf); dma_resv_unlock(dmabuf->resv); err_put: diff --git a/drivers/vfio/pci/vfio_pci_dmabuf.c b/drivers/vfio/pci/vfio_pci_dmabuf.c index d4d0f7d08c53..362e3d149817 100644 --- a/drivers/vfio/pci/vfio_pci_dmabuf.c +++ b/drivers/vfio/pci/vfio_pci_dmabuf.c @@ -320,7 +320,7 @@ void vfio_pci_dma_buf_move(struct vfio_pci_core_device *vdev, bool revoked) if (priv->revoked != revoked) { dma_resv_lock(priv->dmabuf->resv, NULL); priv->revoked = revoked; - dma_buf_move_notify(priv->dmabuf); + dma_buf_invalidate_mappings(priv->dmabuf); dma_resv_unlock(priv->dmabuf->resv); } fput(priv->dmabuf->file); @@ -341,7 +341,7 @@ void vfio_pci_dma_buf_cleanup(struct vfio_pci_core_device *vdev) list_del_init(&priv->dmabufs_elm); priv->vdev = NULL; priv->revoked = true; - dma_buf_move_notify(priv->dmabuf); + dma_buf_invalidate_mappings(priv->dmabuf); dma_resv_unlock(priv->dmabuf->resv); vfio_device_put_registration(&vdev->vdev); fput(priv->dmabuf->file); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index d9ee4499b37d..d0470af8887e 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -588,7 +588,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, enum dma_data_direction); void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); -void dma_buf_move_notify(struct dma_buf *dma_buf); +void dma_buf_invalidate_mappings(struct dma_buf *dma_buf); int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); int dma_buf_end_cpu_access(struct dma_buf *dma_buf, From 609fc8766d68da10697e1df0514d079d54bf48df Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sat, 24 Jan 2026 21:14:15 +0200 Subject: [PATCH 010/158] dma-buf: Always build with DMABUF_MOVE_NOTIFY MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DMABUF_MOVE_NOTIFY was introduced in 2018 and has been marked as experimental and disabled by default ever since. Six years later, all new importers implement this callback. It is therefore reasonable to drop CONFIG_DMABUF_MOVE_NOTIFY and always build DMABUF with support for it enabled. Suggested-by: Christian König Signed-off-by: Leon Romanovsky Reviewed-by: Christian König Link: https://lore.kernel.org/r/20260124-dmabuf-revoke-v5-3-f98fca917e96@nvidia.com Signed-off-by: Christian König --- drivers/dma-buf/Kconfig | 12 ------------ drivers/dma-buf/dma-buf.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 10 +++------- drivers/gpu/drm/amd/amdkfd/Kconfig | 2 +- drivers/gpu/drm/xe/tests/xe_dma_buf.c | 3 +-- drivers/gpu/drm/xe/xe_dma_buf.c | 12 ++++-------- 6 files changed, 10 insertions(+), 32 deletions(-) diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 012d22e941d6..8d4f2f89f24e 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -40,18 +40,6 @@ config UDMABUF A driver to let userspace turn memfd regions into dma-bufs. Qemu can use this to create host dmabufs for guest framebuffers. -config DMABUF_MOVE_NOTIFY - bool "Move notify between drivers (EXPERIMENTAL)" - default n - depends on DMA_SHARED_BUFFER - help - Don't pin buffers if the dynamic DMA-buf interface is available on - both the exporter as well as the importer. This fixes a security - problem where userspace is able to pin unrestricted amounts of memory - through DMA-buf. - This is marked experimental because we don't yet have a consistent - execution context and memory management between drivers. - config DMABUF_DEBUG bool "DMA-BUF debug checks" depends on DMA_SHARED_BUFFER diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 1c257607a623..1b301d96f968 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -916,8 +916,7 @@ static bool dma_buf_pin_on_map(struct dma_buf_attachment *attach) { return attach->dmabuf->ops->pin && - (!dma_buf_attachment_is_dynamic(attach) || - !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)); + !dma_buf_attachment_is_dynamic(attach); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 863454148b28..349215549e8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -145,13 +145,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach) * notifiers are disabled, only allow pinning in VRAM when move * notiers are enabled. */ - if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { - domains &= ~AMDGPU_GEM_DOMAIN_VRAM; - } else { - list_for_each_entry(attach, &dmabuf->attachments, node) - if (!attach->peer2peer) - domains &= ~AMDGPU_GEM_DOMAIN_VRAM; - } + list_for_each_entry(attach, &dmabuf->attachments, node) + if (!attach->peer2peer) + domains &= ~AMDGPU_GEM_DOMAIN_VRAM; if (domains & AMDGPU_GEM_DOMAIN_VRAM) bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index 16e12c9913f9..a5d7467c2f34 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -27,7 +27,7 @@ config HSA_AMD_SVM config HSA_AMD_P2P bool "HSA kernel driver support for peer-to-peer for AMD GPU devices" - depends on HSA_AMD && PCI_P2PDMA && DMABUF_MOVE_NOTIFY + depends on HSA_AMD && PCI_P2PDMA help Enable peer-to-peer (P2P) communication between AMD GPUs over the PCIe bus. This can improve performance of multi-GPU compute diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c index 1f2cca5c2f81..c107687ef3c0 100644 --- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c @@ -22,8 +22,7 @@ static bool p2p_enabled(struct dma_buf_test_params *params) static bool is_dynamic(struct dma_buf_test_params *params) { - return IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) && params->attach_ops && - params->attach_ops->invalidate_mappings; + return params->attach_ops && params->attach_ops->invalidate_mappings; } static void check_residency(struct kunit *test, struct xe_bo *exported, diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c index 2e167b29d0c9..4e3a968bc6a0 100644 --- a/drivers/gpu/drm/xe/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/xe_dma_buf.c @@ -56,14 +56,10 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach) bool allow_vram = true; int ret; - if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { - allow_vram = false; - } else { - list_for_each_entry(attach, &dmabuf->attachments, node) { - if (!attach->peer2peer) { - allow_vram = false; - break; - } + list_for_each_entry(attach, &dmabuf->attachments, node) { + if (!attach->peer2peer) { + allow_vram = false; + break; } } From bda4b9b8cc2f4850af52fb35abf8ead434ba38dd Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Mon, 5 Jan 2026 18:52:04 +0100 Subject: [PATCH 011/158] drm/rcar-du: dsi: Clean up VCLK divider calculation Currently, in rcar_mipi_dsi_parameters_calc(), the VCLK divider is stored in setup_info structure as BIT(divider). The rcar_mipi_dsi_parameters_calc() is called at the early beginning of rcar_mipi_dsi_startup() function. Later, in the same rcar_mipi_dsi_startup() function, the stored BIT(divider) value is passed to __ffs() to calculate back the divider out of the value again. Factor out VCLK divider calculation into rcar_mipi_dsi_vclk_divider() function and call the function from both rcar_mipi_dsi_parameters_calc() and rcar_mipi_dsi_startup() to avoid this back and forth BIT() and _ffs() and avoid unnecessarily storing the divider value in setup_info at all. This rework has a slight side-effect, in that it should allow the compiler to better evaluate the code and avoid compiler warnings about variable value overflows, which can never happen. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202512051834.bESvhDiG-lkp@intel.com/ Closes: https://lore.kernel.org/oe-kbuild-all/202512222321.TeY4VbvK-lkp@intel.com/ Signed-off-by: Marek Vasut Reviewed-by: Laurent Pinchart Link: https://patch.msgid.link/20260105175250.64309-1-marek.vasut+renesas@mailbox.org Signed-off-by: Tomi Valkeinen --- .../gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c | 35 ++++++++++++++----- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c index 4ef2e3c129ed..508977b9e892 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c @@ -84,7 +84,6 @@ struct dsi_setup_info { unsigned long fout; u16 m; u16 n; - u16 vclk_divider; const struct dsi_clk_config *clkset; }; @@ -335,10 +334,24 @@ rcar_mipi_dsi_post_init_phtw_v4h(struct rcar_mipi_dsi *dsi, * Hardware Setup */ +static unsigned int rcar_mipi_dsi_vclk_divider(struct rcar_mipi_dsi *dsi, + struct dsi_setup_info *setup_info) +{ + switch (dsi->info->model) { + case RCAR_DSI_V3U: + default: + return (setup_info->clkset->vco_cntrl >> 4) & 0x3; + + case RCAR_DSI_V4H: + return (setup_info->clkset->vco_cntrl >> 3) & 0x7; + } +} + static void rcar_mipi_dsi_pll_calc(struct rcar_mipi_dsi *dsi, unsigned long fin_rate, unsigned long fout_target, - struct dsi_setup_info *setup_info) + struct dsi_setup_info *setup_info, + u16 vclk_divider) { unsigned int best_err = -1; const struct rcar_mipi_dsi_device_info *info = dsi->info; @@ -360,7 +373,7 @@ static void rcar_mipi_dsi_pll_calc(struct rcar_mipi_dsi *dsi, if (fout < info->fout_min || fout > info->fout_max) continue; - fout = div64_u64(fout, setup_info->vclk_divider); + fout = div64_u64(fout, vclk_divider); if (fout < setup_info->clkset->min_freq || fout > setup_info->clkset->max_freq) @@ -390,7 +403,9 @@ static void rcar_mipi_dsi_parameters_calc(struct rcar_mipi_dsi *dsi, unsigned long fout_target; unsigned long fin_rate; unsigned int i; + unsigned int div; unsigned int err; + u16 vclk_divider; /* * Calculate Fout = dot clock * ColorDepth / (2 * Lane Count) @@ -412,18 +427,20 @@ static void rcar_mipi_dsi_parameters_calc(struct rcar_mipi_dsi *dsi, fin_rate = clk_get_rate(clk); + div = rcar_mipi_dsi_vclk_divider(dsi, setup_info); + switch (dsi->info->model) { case RCAR_DSI_V3U: default: - setup_info->vclk_divider = 1 << ((clk_cfg->vco_cntrl >> 4) & 0x3); + vclk_divider = BIT_U16(div); break; case RCAR_DSI_V4H: - setup_info->vclk_divider = 1 << (((clk_cfg->vco_cntrl >> 3) & 0x7) + 1); + vclk_divider = BIT_U16(div + 1); break; } - rcar_mipi_dsi_pll_calc(dsi, fin_rate, fout_target, setup_info); + rcar_mipi_dsi_pll_calc(dsi, fin_rate, fout_target, setup_info, vclk_divider); /* Find hsfreqrange */ setup_info->hsfreq = setup_info->fout * 2; @@ -439,7 +456,7 @@ static void rcar_mipi_dsi_parameters_calc(struct rcar_mipi_dsi *dsi, dev_dbg(dsi->dev, "Fout = %u * %lu / (%u * %u * %u) = %lu (target %lu Hz, error %d.%02u%%)\n", setup_info->m, fin_rate, dsi->info->n_mul, setup_info->n, - setup_info->vclk_divider, setup_info->fout, fout_target, + vclk_divider, setup_info->fout, fout_target, err / 100, err % 100); dev_dbg(dsi->dev, @@ -653,11 +670,11 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi, switch (dsi->info->model) { case RCAR_DSI_V3U: default: - vclkset |= VCLKSET_DIV_V3U(__ffs(setup_info.vclk_divider)); + vclkset |= VCLKSET_DIV_V3U(rcar_mipi_dsi_vclk_divider(dsi, &setup_info)); break; case RCAR_DSI_V4H: - vclkset |= VCLKSET_DIV_V4H(__ffs(setup_info.vclk_divider) - 1); + vclkset |= VCLKSET_DIV_V4H(rcar_mipi_dsi_vclk_divider(dsi, &setup_info)); break; } From 42dab3138176a944b09996441d837986f9ef13f8 Mon Sep 17 00:00:00 2001 From: "Mario Limonciello (AMD)" Date: Tue, 6 Jan 2026 11:00:16 -0600 Subject: [PATCH 012/158] drm/connector: Add a new 'panel_type' property If the driver can make an assertion whether a connected panel is an OLED panel or not then it can attach a property to the connector that userspace can use as a hint for color schemes. Reviewed-by: Leo Li Link: https://patch.msgid.link/20260106170017.68158-2-superm1@kernel.org Signed-off-by: Mario Limonciello (AMD) --- drivers/gpu/drm/drm_connector.c | 33 +++++++++++++++++++++++++++++++++ include/drm/drm_connector.h | 1 + include/drm/drm_mode_config.h | 4 ++++ include/uapi/drm/drm_mode.h | 4 ++++ 4 files changed, 42 insertions(+) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 4f5b27fab475..aec05adbc889 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1173,6 +1173,11 @@ static const struct drm_prop_enum_list drm_link_status_enum_list[] = { { DRM_MODE_LINK_STATUS_BAD, "Bad" }, }; +static const struct drm_prop_enum_list drm_panel_type_enum_list[] = { + { DRM_MODE_PANEL_TYPE_UNKNOWN, "unknown" }, + { DRM_MODE_PANEL_TYPE_OLED, "OLED" }, +}; + /** * drm_display_info_set_bus_formats - set the supported bus formats * @info: display info to store bus formats in @@ -1501,6 +1506,9 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name); * Summarizing: Only set "DPMS" when the connector is known to be enabled, * assume that a successful SETCONFIG call also sets "DPMS" to on, and * never read back the value of "DPMS" because it can be incorrect. + * panel_type: + * Immutable enum property to indicate the type of connected panel. + * Possible values are "unknown" (default) and "OLED". * PATH: * Connector path property to identify how this sink is physically * connected. Used by DP MST. This should be set by calling @@ -1851,6 +1859,13 @@ int drm_connector_create_standard_properties(struct drm_device *dev) return -ENOMEM; dev->mode_config.link_status_property = prop; + prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "panel_type", + drm_panel_type_enum_list, + ARRAY_SIZE(drm_panel_type_enum_list)); + if (!prop) + return -ENOMEM; + dev->mode_config.panel_type_property = prop; + prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE, "non-desktop"); if (!prop) return -ENOMEM; @@ -3626,3 +3641,21 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, return tg; } EXPORT_SYMBOL(drm_mode_create_tile_group); + +/** + * drm_connector_attach_panel_type_property - attaches panel type property + * @connector: connector to attach the property on. + * + * This is used to add support for panel type detection. + */ +void drm_connector_attach_panel_type_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_property *prop = dev->mode_config.panel_type_property; + + if (!prop) + return; + + drm_object_attach_property(&connector->base, prop, DRM_MODE_PANEL_TYPE_UNKNOWN); +} +EXPORT_SYMBOL(drm_connector_attach_panel_type_property); diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 7eaec37ae1c7..c18be8c19de0 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -2493,6 +2493,7 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, u32 scaling_mode_mask); int drm_connector_attach_vrr_capable_property( struct drm_connector *connector); +void drm_connector_attach_panel_type_property(struct drm_connector *connector); int drm_connector_attach_broadcast_rgb_property(struct drm_connector *connector); int drm_connector_attach_colorspace_property(struct drm_connector *connector); int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector); diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 895fb820dba0..5e1dd0cfccde 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -600,6 +600,10 @@ struct drm_mode_config { * multiple CRTCs. */ struct drm_property *tile_property; + /** + * @panel_type_property: Default connector property for panel type + */ + struct drm_property *panel_type_property; /** * @link_status_property: Default connector property for link status * of a connector diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index cbbbfc1dfe2b..3693d82b5279 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -166,6 +166,10 @@ extern "C" { #define DRM_MODE_LINK_STATUS_GOOD 0 #define DRM_MODE_LINK_STATUS_BAD 1 +/* Panel type property */ +#define DRM_MODE_PANEL_TYPE_UNKNOWN 0 +#define DRM_MODE_PANEL_TYPE_OLED 1 + /* * DRM_MODE_ROTATE_ * From 5d9d1247b2d5aa75ceccc9f9a915d4ed1109a205 Mon Sep 17 00:00:00 2001 From: "Mario Limonciello (AMD)" Date: Tue, 6 Jan 2026 11:00:17 -0600 Subject: [PATCH 013/158] drm/amd/display: Attach OLED property to eDP panels amdgpu verifies that a given panel is an OLED panel from extended caps and can provide accurate information to userspace. Attach a property to the DRM connector. Reviewed-by: Leo Li Link: https://patch.msgid.link/20260106170017.68158-3-superm1@kernel.org Signed-off-by: Mario Limonciello (AMD) --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 740711ac1037..7c51d8d7e73c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -91,6 +91,7 @@ #include #include #include +#include #include #include #include @@ -3737,6 +3738,10 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->aux_support = false; + drm_object_property_set_value(&conn_base->base, + adev_to_drm(adev)->mode_config.panel_type_property, + caps->ext_caps->bits.oled ? DRM_MODE_PANEL_TYPE_OLED : DRM_MODE_PANEL_TYPE_UNKNOWN); + if (caps->ext_caps->bits.oled == 1 /* * || @@ -9017,6 +9022,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, if (connector_type == DRM_MODE_CONNECTOR_eDP) { struct drm_privacy_screen *privacy_screen; + drm_connector_attach_panel_type_property(&aconnector->base); + privacy_screen = drm_privacy_screen_get(adev_to_drm(adev)->dev, NULL); if (!IS_ERR(privacy_screen)) { drm_connector_attach_privacy_screen_provider(&aconnector->base, From 2ae03ce28679e45dc3c5cb7b93a8507edfe313b0 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 20 Jan 2026 20:19:29 -0300 Subject: [PATCH 014/158] dt-bindings: display: bridge: ldb: Document nxp, enable-termination-resistor Document the optional nxp,enable-termination-resistor property for the i.MX LVDS display bridge. This boolean property indicates that the built-in 100 Ohm termination resistor on the LVDS output is enabled. It is controlled via the HS_EN bit in the LVDS_CTRL register. Enabling the resistor can improve LVDS signal quality and may prevent visual artifacts on some boards, but increases the power consumption. Signed-off-by: Fabio Estevam Acked-by: Rob Herring (Arm) Signed-off-by: Liu Ying Link: https://lore.kernel.org/r/20260120231930.2782444-1-festevam@gmail.com --- .../bindings/display/bridge/fsl,ldb.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/Documentation/devicetree/bindings/display/bridge/fsl,ldb.yaml b/Documentation/devicetree/bindings/display/bridge/fsl,ldb.yaml index 07388bf2b90d..828b7a40e9b6 100644 --- a/Documentation/devicetree/bindings/display/bridge/fsl,ldb.yaml +++ b/Documentation/devicetree/bindings/display/bridge/fsl,ldb.yaml @@ -35,6 +35,15 @@ properties: - const: ldb - const: lvds + nxp,enable-termination-resistor: + type: boolean + description: + Indicates that the built-in 100 Ohm termination resistor on the LVDS + output is enabled. This property is optional and controlled via the + HS_EN bit in the LVDS_CTRL register. Enabling it can improve signal + quality and prevent visual artifacts on some boards, but increases + power consumption. + ports: $ref: /schemas/graph.yaml#/properties/ports @@ -74,6 +83,15 @@ allOf: properties: port@2: false + - if: + properties: + compatible: + contains: + const: fsl,imx6sx-ldb + then: + properties: + nxp,enable-termination-resistor: false + additionalProperties: false examples: From a50007089e078a1b7a826559a02277b1601ee189 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 20 Jan 2026 20:19:30 -0300 Subject: [PATCH 015/158] drm/bridge: fsl-ldb: Allow the termination resistor to be enabled The LVDS Control Register (LVDS_CTRL) register has an HS_EN bit that allows the 100 Ohm termination resistor in the chip to be enabled. Add support to setting the HS_EN bit when the optional property "nxp,enable-termination-resistor" is present. The motivation for introducing this property was a custom i.MX8MP board that was showing visual artifacts. After enabling the 100 Ohm termination resistor the LVDS signal quality improved causing the artifacts to disappear. Signed-off-by: Fabio Estevam Reviewed-by: Liu Ying Signed-off-by: Liu Ying Link: https://lore.kernel.org/r/20260120231930.2782444-2-festevam@gmail.com --- drivers/gpu/drm/bridge/fsl-ldb.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c index 5c3cf37200bc..7b71cde173e0 100644 --- a/drivers/gpu/drm/bridge/fsl-ldb.c +++ b/drivers/gpu/drm/bridge/fsl-ldb.c @@ -92,6 +92,7 @@ struct fsl_ldb { const struct fsl_ldb_devdata *devdata; bool ch0_enabled; bool ch1_enabled; + bool use_termination_resistor; }; static bool fsl_ldb_is_dual(const struct fsl_ldb *fsl_ldb) @@ -212,6 +213,9 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge, /* Program LVDS_CTRL */ reg = LVDS_CTRL_CC_ADJ(2) | LVDS_CTRL_PRE_EMPH_EN | LVDS_CTRL_PRE_EMPH_ADJ(3) | LVDS_CTRL_VBG_EN; + + if (fsl_ldb->use_termination_resistor) + reg |= LVDS_CTRL_HS_EN; regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl, reg); /* Wait for VBG to stabilize. */ @@ -340,6 +344,9 @@ static int fsl_ldb_probe(struct platform_device *pdev) if (IS_ERR(panel)) return PTR_ERR(panel); + if (of_property_present(dev->of_node, "nxp,enable-termination-resistor")) + fsl_ldb->use_termination_resistor = true; + fsl_ldb->panel_bridge = devm_drm_panel_bridge_add(dev, panel); if (IS_ERR(fsl_ldb->panel_bridge)) return PTR_ERR(fsl_ldb->panel_bridge); From 3aecd55af5b83d16d84e3c333d4163999ee8ff51 Mon Sep 17 00:00:00 2001 From: Caterina Shablia Date: Wed, 28 Jan 2026 18:40:57 +0000 Subject: [PATCH 016/158] drm: add ARM interleaved 64k modifier This modifier is primarily intended to be used by panvk to implement sparse partially-resident images with better map and unmap performance, and no worse access performance, compared to implementing them in terms of U-interleaved. With this modifier, the plane is divided into 64k byte 1:1 or 2:1 -sided tiles. The 64k tiles are laid out linearly. Each 64k tile is divided into blocks of 16x16 texel blocks each, which themselves are laid out linearly within a 64k tile. Then within each such 16x16 block, texel blocks are laid out according to U order, similar to 16X16_BLOCK_U_INTERLEAVED. Unlike 16X16_BLOCK_U_INTERLEAVED, the layout does not depend on whether a format is compressed or not. The hardware features corresponding to this modifier are available starting with v10 (second gen Valhall.) The corresponding panvk MR can be found at: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38986 Previous version: https://lists.freedesktop.org/archives/dri-devel/2026-January/547072.html No changes since v2 Changes since v1: * Rewrite the description of the modifier to be hopefully unambiguous. Signed-off-by: Caterina Shablia Reviewed-by: Boris Brezillon Reviewed-by: Liviu Dudau Link: https://patch.msgid.link/20260128184058.807213-1-caterina.shablia@collabora.com Signed-off-by: Boris Brezillon --- include/uapi/drm/drm_fourcc.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index e527b24bd824..452f901513ad 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -1422,6 +1422,22 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) #define DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED \ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_MISC, 1ULL) +/* + * ARM 64k interleaved modifier + * + * This is used by ARM Mali v10+ GPUs. With this modifier, the plane is divided + * into 64k byte 1:1 or 2:1 -sided tiles. The 64k tiles are laid out linearly. + * Each 64k tile is divided into blocks of 16x16 texel blocks, which are + * themselves laid out linearly within a 64k tile. Then within each 16x16 + * block, texel blocks are laid out according to U order, similar to + * 16X16_BLOCK_U_INTERLEAVED. + * + * Note that unlike 16X16_BLOCK_U_INTERLEAVED, the layout does not change + * depending on whether a format is compressed or not. + */ +#define DRM_FORMAT_MOD_ARM_INTERLEAVED_64K \ + DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_MISC, 2ULL) + /* * Allwinner tiled modifier * From 96476ab8690290aa27084b12a481e48f3af7afb2 Mon Sep 17 00:00:00 2001 From: Luca Ceresoli Date: Wed, 7 Jan 2026 10:56:26 +0100 Subject: [PATCH 017/158] drm/bridge: imx8qxp-pixel-link: simplify logic to find next bridge imx8qxp_pixel_link_find_next_bridge() uses a sophisticated logic to find the preferred next bridge, using an array with two supporting index variables. This is more sophisticated than required because we only ever need a pointer to the "current" bridge and to the "best so far" bridge. Additionally this logic is going to make the addition of proper refcounting quite complex. Rewrite the logic using two drm_bridge pointers, which is by itself slightly simpler and is a preparation step for introducing bridge refcounting in a later commit. Also reword a comment to make it clearer. Reviewed-by: Liu Ying Link: https://patch.msgid.link/20260107-drm-bridge-alloc-getput-drm_of_find_bridge-v4-1-a62b4399a6bf@bootlin.com Signed-off-by: Luca Ceresoli --- .../gpu/drm/bridge/imx/imx8qxp-pixel-link.c | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c index 433c080197a2..4f84825fddca 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c @@ -261,12 +261,10 @@ imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl) { struct device_node *np = pl->dev->of_node; struct device_node *port, *remote; - struct drm_bridge *next_bridge[PL_MAX_NEXT_BRIDGES]; + struct drm_bridge *selected_bridge = NULL; u32 port_id; bool found_port = false; - int reg, ep_cnt = 0; - /* select the first next bridge by default */ - int bridge_sel = 0; + int reg; for (port_id = 1; port_id <= PL_MAX_MST_ADDR + 1; port_id++) { port = of_graph_get_port_by_id(np, port_id); @@ -300,24 +298,25 @@ imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl) continue; } - next_bridge[ep_cnt] = of_drm_find_bridge(remote); - if (!next_bridge[ep_cnt]) { + struct drm_bridge *next_bridge = of_drm_find_bridge(remote); + if (!next_bridge) { of_node_put(remote); return ERR_PTR(-EPROBE_DEFER); } - /* specially select the next bridge with companion PXL2DPI */ - if (of_property_present(remote, "fsl,companion-pxl2dpi")) - bridge_sel = ep_cnt; - - ep_cnt++; + /* + * Select the next bridge with companion PXL2DPI if + * present, otherwise default to the first bridge + */ + if (!selected_bridge || of_property_present(remote, "fsl,companion-pxl2dpi")) + selected_bridge = next_bridge; of_node_put(remote); } pl->mst_addr = port_id - 1; - return next_bridge[bridge_sel]; + return selected_bridge; } static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev) From 42bb487369e56f8f07c82ac11fe771ba2b70cd68 Mon Sep 17 00:00:00 2001 From: Luca Ceresoli Date: Wed, 7 Jan 2026 10:56:27 +0100 Subject: [PATCH 018/158] drm/bridge: imx8qxp-pixel-link: simplify freeing of the remote device_node The main loop in imx8qxp_pixel_link_find_next_bridge() requires calling of_node_put() in multiple places, complicating code flow. Simplify it by using a cleanup action and making the 'remote' variable scope local to the loop. Reviewed-by: Maxime Ripard Reviewed-by: Liu Ying Link: https://patch.msgid.link/20260107-drm-bridge-alloc-getput-drm_of_find_bridge-v4-2-a62b4399a6bf@bootlin.com Signed-off-by: Luca Ceresoli --- drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c index 4f84825fddca..0c5ed06eee1b 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c @@ -260,7 +260,7 @@ static struct drm_bridge * imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl) { struct device_node *np = pl->dev->of_node; - struct device_node *port, *remote; + struct device_node *port; struct drm_bridge *selected_bridge = NULL; u32 port_id; bool found_port = false; @@ -286,7 +286,8 @@ imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl) } for (reg = 0; reg < PL_MAX_NEXT_BRIDGES; reg++) { - remote = of_graph_get_remote_node(np, port_id, reg); + struct device_node *remote __free(device_node) = + of_graph_get_remote_node(np, port_id, reg); if (!remote) continue; @@ -294,15 +295,12 @@ imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl) DRM_DEV_DEBUG(pl->dev, "port%u endpoint%u remote parent is not available\n", port_id, reg); - of_node_put(remote); continue; } struct drm_bridge *next_bridge = of_drm_find_bridge(remote); - if (!next_bridge) { - of_node_put(remote); + if (!next_bridge) return ERR_PTR(-EPROBE_DEFER); - } /* * Select the next bridge with companion PXL2DPI if @@ -310,8 +308,6 @@ imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl) */ if (!selected_bridge || of_property_present(remote, "fsl,companion-pxl2dpi")) selected_bridge = next_bridge; - - of_node_put(remote); } pl->mst_addr = port_id - 1; From 4eda1d5fe99db2a71da0156387b7b69e8e0dc32f Mon Sep 17 00:00:00 2001 From: Luca Ceresoli Date: Wed, 7 Jan 2026 10:56:28 +0100 Subject: [PATCH 019/158] drm/bridge: imx8qxp-pixel-link: imx8qxp_pixel_link_find_next_bridge: return int, not ERR_PTR In preparation for using bridge->next_bridge, we need to ensure that it will never contain anything but NULL or a valid bridge pointer. Current code stores an ERR_PTR when imx8qxp_pixel_link_find_next_bridge() errors out. Instead of fixing that after the facts in the caller, change the function to internally set pl->next_bridge and just return an int error value. No functional changes. Reviewed-by: Maxime Ripard Acked-by: Liu Ying Link: https://patch.msgid.link/20260107-drm-bridge-alloc-getput-drm_of_find_bridge-v4-3-a62b4399a6bf@bootlin.com Signed-off-by: Luca Ceresoli --- drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c index 0c5ed06eee1b..91e4f4d55469 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c @@ -256,8 +256,7 @@ static int imx8qxp_pixel_link_disable_all_controls(struct imx8qxp_pixel_link *pl return imx8qxp_pixel_link_disable_sync(pl); } -static struct drm_bridge * -imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl) +static int imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl) { struct device_node *np = pl->dev->of_node; struct device_node *port; @@ -282,7 +281,7 @@ imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl) if (!found_port) { DRM_DEV_ERROR(pl->dev, "no available output port\n"); - return ERR_PTR(-ENODEV); + return -ENODEV; } for (reg = 0; reg < PL_MAX_NEXT_BRIDGES; reg++) { @@ -300,7 +299,7 @@ imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl) struct drm_bridge *next_bridge = of_drm_find_bridge(remote); if (!next_bridge) - return ERR_PTR(-EPROBE_DEFER); + return -EPROBE_DEFER; /* * Select the next bridge with companion PXL2DPI if @@ -311,8 +310,9 @@ imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl) } pl->mst_addr = port_id - 1; + pl->next_bridge = selected_bridge; - return selected_bridge; + return 0; } static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev) @@ -368,9 +368,9 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev) if (ret) return ret; - pl->next_bridge = imx8qxp_pixel_link_find_next_bridge(pl); - if (IS_ERR(pl->next_bridge)) - return PTR_ERR(pl->next_bridge); + ret = imx8qxp_pixel_link_find_next_bridge(pl); + if (ret) + return ret; platform_set_drvdata(pdev, pl); From e54c6ac22a8fda74401b2d8c4857728a1ffddb64 Mon Sep 17 00:00:00 2001 From: Cristian Ciocaltea Date: Thu, 29 Jan 2026 14:13:32 +0200 Subject: [PATCH 020/158] drm/bridge: dw-hdmi-qp: Provide HDMI Vendor Specific InfoFrame Since commit b626b1a1c9cc ("drm/bridge: refactor HDMI InfoFrame callbacks"), the following warning is generated: [ 13.654149] rockchip-drm display-subsystem: [drm] HDMI VSI not supported Add the missing support for sending HDMI Vendor-Specific Infoframes. Additionally, introduce dw_hdmi_qp_write_{pkt|infoframe}() helpers, as a prerequisite to rework all dw_hdmi_qp_bridge_write_*_infoframe() callbacks and get rid of some boilerplate code. Signed-off-by: Cristian Ciocaltea Reviewed-by: Maxime Ripard Reviewed-by: Daniel Stone Link: https://patch.msgid.link/20260129-dw-hdmi-qp-iframe-v2-1-0157ad05232c@collabora.com Signed-off-by: Daniel Stone --- drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c | 39 ++++++++++++++++++-- drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h | 2 + 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c index 036316e2b60d..93aae1d8027d 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -969,9 +970,9 @@ static int dw_hdmi_qp_bridge_clear_avi_infoframe(struct drm_bridge *bridge) static int dw_hdmi_qp_bridge_clear_hdmi_infoframe(struct drm_bridge *bridge) { - /* FIXME: add support for this InfoFrame */ + struct dw_hdmi_qp *hdmi = bridge->driver_private; - drm_warn_once(bridge->encoder->dev, "HDMI VSI not supported\n"); + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_VSI_TX_EN, PKTSCHED_PKT_EN); return 0; } @@ -998,6 +999,32 @@ static int dw_hdmi_qp_bridge_clear_audio_infoframe(struct drm_bridge *bridge) return 0; } +static void dw_hdmi_qp_write_pkt(struct dw_hdmi_qp *hdmi, const u8 *buffer, + size_t start, size_t len, unsigned int reg) +{ + u32 val = 0; + size_t i; + + for (i = start; i < start + len; i++) + val |= buffer[i] << ((i % 4) * BITS_PER_BYTE); + + dw_hdmi_qp_write(hdmi, val, reg); +} + +static void dw_hdmi_qp_write_infoframe(struct dw_hdmi_qp *hdmi, const u8 *buffer, + size_t len, unsigned int reg) +{ + size_t i; + + /* InfoFrame packet header */ + dw_hdmi_qp_write_pkt(hdmi, buffer, 1, 2, reg); + + /* InfoFrame packet body */ + for (i = 0; i < len - 3; i += 4) + dw_hdmi_qp_write_pkt(hdmi, buffer + 3, i, min(len - i - 3, 4), + reg + i + 4); +} + static int dw_hdmi_qp_bridge_write_avi_infoframe(struct drm_bridge *bridge, const u8 *buffer, size_t len) { @@ -1011,9 +1038,15 @@ static int dw_hdmi_qp_bridge_write_avi_infoframe(struct drm_bridge *bridge, static int dw_hdmi_qp_bridge_write_hdmi_infoframe(struct drm_bridge *bridge, const u8 *buffer, size_t len) { + struct dw_hdmi_qp *hdmi = bridge->driver_private; + dw_hdmi_qp_bridge_clear_hdmi_infoframe(bridge); - /* FIXME: add support for the HDMI VSI */ + dw_hdmi_qp_write_infoframe(hdmi, buffer, len, PKT_VSI_CONTENTS0); + + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_VSI_FIELDRATE, PKTSCHED_PKT_CONFIG1); + dw_hdmi_qp_mod(hdmi, PKTSCHED_VSI_TX_EN, PKTSCHED_VSI_TX_EN, + PKTSCHED_PKT_EN); return 0; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h index 91a15f82e32a..53688eae8dba 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h @@ -198,6 +198,7 @@ #define PKTSCHED_PRQUEUE2_CONFIG2 0xa94 #define PKTSCHED_PKT_CONFIG0 0xa98 #define PKTSCHED_PKT_CONFIG1 0xa9c +#define PKTSCHED_VSI_FIELDRATE BIT(14) #define PKTSCHED_DRMI_FIELDRATE BIT(13) #define PKTSCHED_AVI_FIELDRATE BIT(12) #define PKTSCHED_PKT_CONFIG2 0xaa0 @@ -206,6 +207,7 @@ #define PKTSCHED_DRMI_TX_EN BIT(17) #define PKTSCHED_AUDI_TX_EN BIT(15) #define PKTSCHED_AVI_TX_EN BIT(13) +#define PKTSCHED_VSI_TX_EN BIT(12) #define PKTSCHED_EMP_CVTEM_TX_EN BIT(10) #define PKTSCHED_AMD_TX_EN BIT(8) #define PKTSCHED_GCP_TX_EN BIT(3) From 1df09ea64b04920d574b4277ff194bdfa38569a7 Mon Sep 17 00:00:00 2001 From: Cristian Ciocaltea Date: Thu, 29 Jan 2026 14:13:33 +0200 Subject: [PATCH 021/158] drm/bridge: dw-hdmi-qp: Provide SPD InfoFrame The hardware is capable of sending Source Product Description (SPD) InfoFrames, hence enable the missing support. Signed-off-by: Cristian Ciocaltea Reviewed-by: Maxime Ripard Reviewed-by: Daniel Stone Link: https://patch.msgid.link/20260129-dw-hdmi-qp-iframe-v2-2-0157ad05232c@collabora.com Signed-off-by: Daniel Stone --- drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c | 29 +++++++++++++++++++- drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h | 1 + 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c index 93aae1d8027d..c006939a0f87 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -986,6 +986,15 @@ static int dw_hdmi_qp_bridge_clear_hdr_drm_infoframe(struct drm_bridge *bridge) return 0; } +static int dw_hdmi_qp_bridge_clear_spd_infoframe(struct drm_bridge *bridge) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_SPDI_TX_EN, PKTSCHED_PKT_EN); + + return 0; +} + static int dw_hdmi_qp_bridge_clear_audio_infoframe(struct drm_bridge *bridge) { struct dw_hdmi_qp *hdmi = bridge->driver_private; @@ -1061,6 +1070,21 @@ static int dw_hdmi_qp_bridge_write_hdr_drm_infoframe(struct drm_bridge *bridge, return dw_hdmi_qp_config_drm_infoframe(hdmi, buffer, len); } +static int dw_hdmi_qp_bridge_write_spd_infoframe(struct drm_bridge *bridge, + const u8 *buffer, size_t len) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + dw_hdmi_qp_bridge_clear_spd_infoframe(bridge); + + dw_hdmi_qp_write_infoframe(hdmi, buffer, len, PKT_SPDI_CONTENTS0); + + dw_hdmi_qp_mod(hdmi, PKTSCHED_SPDI_TX_EN, PKTSCHED_SPDI_TX_EN, + PKTSCHED_PKT_EN); + + return 0; +} + static int dw_hdmi_qp_bridge_write_audio_infoframe(struct drm_bridge *bridge, const u8 *buffer, size_t len) { @@ -1259,6 +1283,8 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = { .hdmi_write_hdmi_infoframe = dw_hdmi_qp_bridge_write_hdmi_infoframe, .hdmi_clear_hdr_drm_infoframe = dw_hdmi_qp_bridge_clear_hdr_drm_infoframe, .hdmi_write_hdr_drm_infoframe = dw_hdmi_qp_bridge_write_hdr_drm_infoframe, + .hdmi_clear_spd_infoframe = dw_hdmi_qp_bridge_clear_spd_infoframe, + .hdmi_write_spd_infoframe = dw_hdmi_qp_bridge_write_spd_infoframe, .hdmi_clear_audio_infoframe = dw_hdmi_qp_bridge_clear_audio_infoframe, .hdmi_write_audio_infoframe = dw_hdmi_qp_bridge_write_audio_infoframe, .hdmi_audio_startup = dw_hdmi_qp_audio_enable, @@ -1375,7 +1401,8 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HDMI | DRM_BRIDGE_OP_HDMI_AUDIO | - DRM_BRIDGE_OP_HDMI_HDR_DRM_INFOFRAME; + DRM_BRIDGE_OP_HDMI_HDR_DRM_INFOFRAME | + DRM_BRIDGE_OP_HDMI_SPD_INFOFRAME; if (!hdmi->no_hpd) hdmi->bridge.ops |= DRM_BRIDGE_OP_HPD; hdmi->bridge.of_node = pdev->dev.of_node; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h index 53688eae8dba..c07847e8d7dd 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h @@ -206,6 +206,7 @@ #define PKTSCHED_PKT_EN 0xaa8 #define PKTSCHED_DRMI_TX_EN BIT(17) #define PKTSCHED_AUDI_TX_EN BIT(15) +#define PKTSCHED_SPDI_TX_EN BIT(14) #define PKTSCHED_AVI_TX_EN BIT(13) #define PKTSCHED_VSI_TX_EN BIT(12) #define PKTSCHED_EMP_CVTEM_TX_EN BIT(10) From 1933d1508a9d56fc08a1586ef244eb5516b271ac Mon Sep 17 00:00:00 2001 From: Cristian Ciocaltea Date: Thu, 29 Jan 2026 14:13:34 +0200 Subject: [PATCH 022/158] drm/bridge: dw-hdmi-qp: Rework AVI InfoFrame handler Make use of the recently introduced dw_hdmi_qp_write_infoframe() helper to simplify the writing of the Auxiliary Video InfoFrame (AVI) packet header and body registers. Moreover, since now having dedicated callbacks per InfoFrame type, move the implementation to dw_hdmi_qp_bridge_write_avi_infoframe() and drop dw_hdmi_qp_config_avi_infoframe(). While at it, also discard the superfluous infoframe size verification. Signed-off-by: Cristian Ciocaltea Reviewed-by: Maxime Ripard Reviewed-by: Daniel Stone Link: https://patch.msgid.link/20260129-dw-hdmi-qp-iframe-v2-3-0157ad05232c@collabora.com Signed-off-by: Daniel Stone --- drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c | 45 +++----------------- 1 file changed, 7 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c index c006939a0f87..abfe5641158f 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -748,43 +748,6 @@ static struct i2c_adapter *dw_hdmi_qp_i2c_adapter(struct dw_hdmi_qp *hdmi) return adap; } -static int dw_hdmi_qp_config_avi_infoframe(struct dw_hdmi_qp *hdmi, - const u8 *buffer, size_t len) -{ - u32 val, i, j; - - if (len != HDMI_INFOFRAME_SIZE(AVI)) { - dev_err(hdmi->dev, "failed to configure avi infoframe\n"); - return -EINVAL; - } - - /* - * DW HDMI QP IP uses a different byte format from standard AVI info - * frames, though generally the bits are in the correct bytes. - */ - val = buffer[1] << 8 | buffer[2] << 16; - dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS0); - - for (i = 0; i < 4; i++) { - for (j = 0; j < 4; j++) { - if (i * 4 + j >= 14) - break; - if (!j) - val = buffer[i * 4 + j + 3]; - val |= buffer[i * 4 + j + 3] << (8 * j); - } - - dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS1 + i * 4); - } - - dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_FIELDRATE, PKTSCHED_PKT_CONFIG1); - - dw_hdmi_qp_mod(hdmi, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, - PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, PKTSCHED_PKT_EN); - - return 0; -} - static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi, const u8 *buffer, size_t len) { @@ -1041,7 +1004,13 @@ static int dw_hdmi_qp_bridge_write_avi_infoframe(struct drm_bridge *bridge, dw_hdmi_qp_bridge_clear_avi_infoframe(bridge); - return dw_hdmi_qp_config_avi_infoframe(hdmi, buffer, len); + dw_hdmi_qp_write_infoframe(hdmi, buffer, len, PKT_AVI_CONTENTS0); + + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_FIELDRATE, PKTSCHED_PKT_CONFIG1); + dw_hdmi_qp_mod(hdmi, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, + PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, PKTSCHED_PKT_EN); + + return 0; } static int dw_hdmi_qp_bridge_write_hdmi_infoframe(struct drm_bridge *bridge, From 08116cf29e36968bf57abc2e2f93b5ccf2903e52 Mon Sep 17 00:00:00 2001 From: Cristian Ciocaltea Date: Thu, 29 Jan 2026 14:13:35 +0200 Subject: [PATCH 023/158] drm/bridge: dw-hdmi-qp: Rework DRM InfoFrame handler Make use of the recently introduced dw_hdmi_qp_write_infoframe() helper to simplify the writing of the Dynamic Range and Mastering (DRM) InfoFrame packet header and body registers. Moreover, since now having dedicated callbacks per InfoFrame type, move the implementation to dw_hdmi_qp_bridge_write_hdr_drm_infoframe() and drop dw_hdmi_qp_config_drm_infoframe(). While at it, also discard the unnecessary infoframe size verification, as well as the redundant disabling of the packet transmission (already done by the explicit call to the clear callback). Signed-off-by: Cristian Ciocaltea Reviewed-by: Maxime Ripard Reviewed-by: Daniel Stone Link: https://patch.msgid.link/20260129-dw-hdmi-qp-iframe-v2-4-0157ad05232c@collabora.com Signed-off-by: Daniel Stone --- drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c | 40 ++++---------------- 1 file changed, 7 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c index abfe5641158f..33c990e198e9 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -748,38 +748,6 @@ static struct i2c_adapter *dw_hdmi_qp_i2c_adapter(struct dw_hdmi_qp *hdmi) return adap; } -static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi, - const u8 *buffer, size_t len) -{ - u32 val, i; - - if (len != HDMI_INFOFRAME_SIZE(DRM)) { - dev_err(hdmi->dev, "failed to configure drm infoframe\n"); - return -EINVAL; - } - - dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN); - - val = buffer[1] << 8 | buffer[2] << 16; - dw_hdmi_qp_write(hdmi, val, PKT_DRMI_CONTENTS0); - - for (i = 0; i <= buffer[2]; i++) { - if (i % 4 == 0) - val = buffer[3 + i]; - val |= buffer[3 + i] << ((i % 4) * 8); - - if ((i % 4 == 3) || i == buffer[2]) - dw_hdmi_qp_write(hdmi, val, - PKT_DRMI_CONTENTS1 + ((i / 4) * 4)); - } - - dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_FIELDRATE, PKTSCHED_PKT_CONFIG1); - dw_hdmi_qp_mod(hdmi, PKTSCHED_DRMI_TX_EN, PKTSCHED_DRMI_TX_EN, - PKTSCHED_PKT_EN); - - return 0; -} - /* * Static values documented in the TRM * Different values are only used for debug purposes @@ -1036,7 +1004,13 @@ static int dw_hdmi_qp_bridge_write_hdr_drm_infoframe(struct drm_bridge *bridge, dw_hdmi_qp_bridge_clear_hdr_drm_infoframe(bridge); - return dw_hdmi_qp_config_drm_infoframe(hdmi, buffer, len); + dw_hdmi_qp_write_infoframe(hdmi, buffer, len, PKT_DRMI_CONTENTS0); + + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_FIELDRATE, PKTSCHED_PKT_CONFIG1); + dw_hdmi_qp_mod(hdmi, PKTSCHED_DRMI_TX_EN, PKTSCHED_DRMI_TX_EN, + PKTSCHED_PKT_EN); + + return 0; } static int dw_hdmi_qp_bridge_write_spd_infoframe(struct drm_bridge *bridge, From 3ea699b56d31c2a5140d9fac309ff5e0f2041411 Mon Sep 17 00:00:00 2001 From: Cristian Ciocaltea Date: Thu, 29 Jan 2026 14:13:36 +0200 Subject: [PATCH 024/158] drm/bridge: dw-hdmi-qp: Rework Audio InfoFrame handler Make use of the recently introduced dw_hdmi_qp_write_infoframe() helper to simplify the writing of the Audio InfoFrame packet header and body registers. Additionally, discard the redundant static values identifying the frame version and length. Moreover, since now having dedicated callbacks per InfoFrame type, move the implementation to dw_hdmi_qp_bridge_write_audio_infoframe() and drop dw_hdmi_qp_config_audio_infoframe(). Signed-off-by: Cristian Ciocaltea Reviewed-by: Maxime Ripard Reviewed-by: Daniel Stone Link: https://patch.msgid.link/20260129-dw-hdmi-qp-iframe-v2-5-0157ad05232c@collabora.com Signed-off-by: Daniel Stone --- drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c | 71 +++++++------------- 1 file changed, 25 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c index 33c990e198e9..464ace1221d0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -748,51 +748,6 @@ static struct i2c_adapter *dw_hdmi_qp_i2c_adapter(struct dw_hdmi_qp *hdmi) return adap; } -/* - * Static values documented in the TRM - * Different values are only used for debug purposes - */ -#define DW_HDMI_QP_AUDIO_INFOFRAME_HB1 0x1 -#define DW_HDMI_QP_AUDIO_INFOFRAME_HB2 0xa - -static int dw_hdmi_qp_config_audio_infoframe(struct dw_hdmi_qp *hdmi, - const u8 *buffer, size_t len) -{ - /* - * AUDI_CONTENTS0: { RSV, HB2, HB1, RSV } - * AUDI_CONTENTS1: { PB3, PB2, PB1, PB0 } - * AUDI_CONTENTS2: { PB7, PB6, PB5, PB4 } - * - * PB0: CheckSum - * PB1: | CT3 | CT2 | CT1 | CT0 | F13 | CC2 | CC1 | CC0 | - * PB2: | F27 | F26 | F25 | SF2 | SF1 | SF0 | SS1 | SS0 | - * PB3: | F37 | F36 | F35 | F34 | F33 | F32 | F31 | F30 | - * PB4: | CA7 | CA6 | CA5 | CA4 | CA3 | CA2 | CA1 | CA0 | - * PB5: | DM_INH | LSV3 | LSV2 | LSV1 | LSV0 | F52 | F51 | F50 | - * PB6~PB10: Reserved - * - * AUDI_CONTENTS0 default value defined by HDMI specification, - * and shall only be changed for debug purposes. - */ - u32 header_bytes = (DW_HDMI_QP_AUDIO_INFOFRAME_HB1 << 8) | - (DW_HDMI_QP_AUDIO_INFOFRAME_HB2 << 16); - - regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS0, &header_bytes, 1); - regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS1, &buffer[3], 1); - regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS2, &buffer[4], 1); - - /* Enable ACR, AUDI, AMD */ - dw_hdmi_qp_mod(hdmi, - PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN, - PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN, - PKTSCHED_PKT_EN); - - /* Enable AUDS */ - dw_hdmi_qp_mod(hdmi, PKTSCHED_AUDS_TX_EN, PKTSCHED_AUDS_TX_EN, PKTSCHED_PKT_EN); - - return 0; -} - static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_atomic_state *state) { @@ -1035,7 +990,31 @@ static int dw_hdmi_qp_bridge_write_audio_infoframe(struct drm_bridge *bridge, dw_hdmi_qp_bridge_clear_audio_infoframe(bridge); - return dw_hdmi_qp_config_audio_infoframe(hdmi, buffer, len); + /* + * AUDI_CONTENTS0: { RSV, HB2, HB1, RSV } + * AUDI_CONTENTS1: { PB3, PB2, PB1, PB0 } + * AUDI_CONTENTS2: { PB7, PB6, PB5, PB4 } + * + * PB0: CheckSum + * PB1: | CT3 | CT2 | CT1 | CT0 | F13 | CC2 | CC1 | CC0 | + * PB2: | F27 | F26 | F25 | SF2 | SF1 | SF0 | SS1 | SS0 | + * PB3: | F37 | F36 | F35 | F34 | F33 | F32 | F31 | F30 | + * PB4: | CA7 | CA6 | CA5 | CA4 | CA3 | CA2 | CA1 | CA0 | + * PB5: | DM_INH | LSV3 | LSV2 | LSV1 | LSV0 | F52 | F51 | F50 | + * PB6~PB10: Reserved + */ + dw_hdmi_qp_write_infoframe(hdmi, buffer, len, PKT_AUDI_CONTENTS0); + + /* Enable ACR, AUDI, AMD */ + dw_hdmi_qp_mod(hdmi, + PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN, + PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN, + PKTSCHED_PKT_EN); + + /* Enable AUDS */ + dw_hdmi_qp_mod(hdmi, PKTSCHED_AUDS_TX_EN, PKTSCHED_AUDS_TX_EN, PKTSCHED_PKT_EN); + + return 0; } #ifdef CONFIG_DRM_DW_HDMI_QP_CEC From 3d65e4c276b32c03450261d114e495fda03c8e97 Mon Sep 17 00:00:00 2001 From: Cong Yang Date: Fri, 30 Jan 2026 11:04:56 +0800 Subject: [PATCH 025/158] drm/panel-edp: Add CMN N116BCL-EAK (C2) Add support for the CMN N116BCL-EAK (C2) panel, pleace the EDID here for subsequent reference. edid-decode (hex): 00 ff ff ff ff ff ff 00 0d ae 7a 11 00 00 00 00 08 22 01 04 95 1a 0e 78 03 46 a5 9c 5b 53 8b 24 1d 50 54 00 00 00 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 e6 1e 56 e2 50 00 3c 30 30 20 a6 00 00 90 10 00 00 1a 00 00 00 fd 00 28 3c 32 32 08 01 0a 20 20 20 20 20 20 00 00 00 fe 00 43 4d 4e 0a 20 20 20 20 20 20 20 20 20 00 00 00 fe 00 4e 31 31 36 42 43 4c 2d 45 41 4b 0a 20 01 80 70 20 79 02 00 25 01 09 fc 34 01 fc 34 01 28 3c 80 81 00 10 72 1a 00 00 03 01 28 3c 00 00 00 00 00 00 3c 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 2f 90 Signed-off-by: Cong Yang Reviewed-by: Douglas Anderson Signed-off-by: Douglas Anderson Link: https://patch.msgid.link/20260130030456.2080748-1-yangcong5@huaqin.corp-partner.google.com --- drivers/gpu/drm/panel/panel-edp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 108569490ed5..c9eacfffd5b2 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -2014,6 +2014,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'M', 'N', 0x1160, &delay_200_500_e80_d50, "N116BCJ-EAK"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1161, &delay_200_500_e80, "N116BCP-EA2"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1163, &delay_200_500_e80_d50, "N116BCJ-EAK"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x117a, &delay_200_500_e80_d50, "N116BCL-EAK"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x124c, &delay_200_500_e80_d50, "N122JCA-ENK"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"), From 0a5b0d095bcdb219348ed8ae1c97ee99fc4913b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Tue, 27 Jan 2026 08:56:59 -0300 Subject: [PATCH 026/158] drm/v3d: Replace IDR with XArray for perfmon tracking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The IDR interface is deprecated and the XArray API is the recommended replacement. Replace the per-file IDR used to track perfmons with an XArray. This allows us to remove the external mutex that protects the IDR. While here, introduce the v3d_perfmon_delete() helper to consolidate the perfmon cleanup logic used by both v3d_perfmon_close_file() and v3d_perfmon_destroy_ioctl(). Reviewed-by: Iago Toral Quiroga Reviewed-by: Tvrtko Ursulin Link: https://patch.msgid.link/20260127115822.64401-1-mcanal@igalia.com Signed-off-by: Maíra Canal --- drivers/gpu/drm/v3d/v3d_drv.h | 5 +-- drivers/gpu/drm/v3d/v3d_perfmon.c | 57 +++++++++++-------------------- 2 files changed, 20 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 99a39329bb85..314213c26710 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -220,10 +220,7 @@ v3d_has_csd(struct v3d_dev *v3d) struct v3d_file_priv { struct v3d_dev *v3d; - struct { - struct idr idr; - struct mutex lock; - } perfmon; + struct xarray perfmons; struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index 9a3fe5255874..c090fc30ba4b 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -6,9 +6,6 @@ #include "v3d_drv.h" #include "v3d_regs.h" -#define V3D_PERFMONID_MIN 1 -#define V3D_PERFMONID_MAX U32_MAX - static const struct v3d_perf_counter_desc v3d_v42_performance_counters[] = { {"FEP", "FEP-valid-primitives-no-rendered-pixels", "[FEP] Valid primitives that result in no rendered pixels, for all rendered tiles"}, {"FEP", "FEP-valid-primitives-rendered-pixels", "[FEP] Valid primitives for all rendered tiles (primitives may be counted in more than one tile)"}, @@ -290,24 +287,23 @@ struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id) { struct v3d_perfmon *perfmon; - mutex_lock(&v3d_priv->perfmon.lock); - perfmon = idr_find(&v3d_priv->perfmon.idr, id); + xa_lock(&v3d_priv->perfmons); + perfmon = xa_load(&v3d_priv->perfmons, id); v3d_perfmon_get(perfmon); - mutex_unlock(&v3d_priv->perfmon.lock); + xa_unlock(&v3d_priv->perfmons); return perfmon; } void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv) { - mutex_init(&v3d_priv->perfmon.lock); - idr_init_base(&v3d_priv->perfmon.idr, 1); + xa_init_flags(&v3d_priv->perfmons, XA_FLAGS_ALLOC1); } -static int v3d_perfmon_idr_del(int id, void *elem, void *data) +static void v3d_perfmon_delete(struct v3d_file_priv *v3d_priv, + struct v3d_perfmon *perfmon) { - struct v3d_perfmon *perfmon = elem; - struct v3d_dev *v3d = (struct v3d_dev *)data; + struct v3d_dev *v3d = v3d_priv->v3d; /* If the active perfmon is being destroyed, stop it first */ if (perfmon == v3d->active_perfmon) @@ -317,19 +313,17 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data) cmpxchg(&v3d->global_perfmon, perfmon, NULL); v3d_perfmon_put(perfmon); - - return 0; } void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv) { - struct v3d_dev *v3d = v3d_priv->v3d; + struct v3d_perfmon *perfmon; + unsigned long id; - mutex_lock(&v3d_priv->perfmon.lock); - idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d); - idr_destroy(&v3d_priv->perfmon.idr); - mutex_unlock(&v3d_priv->perfmon.lock); - mutex_destroy(&v3d_priv->perfmon.lock); + xa_for_each(&v3d_priv->perfmons, id, perfmon) + v3d_perfmon_delete(v3d_priv, perfmon); + + xa_destroy(&v3d_priv->perfmons); } int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, @@ -341,6 +335,7 @@ int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, struct v3d_perfmon *perfmon; unsigned int i; int ret; + u32 id; /* Number of monitored counters cannot exceed HW limits. */ if (req->ncounters > DRM_V3D_MAX_PERF_COUNTERS || @@ -366,18 +361,15 @@ int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, refcount_set(&perfmon->refcnt, 1); mutex_init(&perfmon->lock); - mutex_lock(&v3d_priv->perfmon.lock); - ret = idr_alloc(&v3d_priv->perfmon.idr, perfmon, V3D_PERFMONID_MIN, - V3D_PERFMONID_MAX, GFP_KERNEL); - mutex_unlock(&v3d_priv->perfmon.lock); - + ret = xa_alloc(&v3d_priv->perfmons, &id, perfmon, xa_limit_32b, + GFP_KERNEL); if (ret < 0) { mutex_destroy(&perfmon->lock); kfree(perfmon); return ret; } - req->id = ret; + req->id = id; return 0; } @@ -387,24 +379,13 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, { struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct drm_v3d_perfmon_destroy *req = data; - struct v3d_dev *v3d = v3d_priv->v3d; struct v3d_perfmon *perfmon; - mutex_lock(&v3d_priv->perfmon.lock); - perfmon = idr_remove(&v3d_priv->perfmon.idr, req->id); - mutex_unlock(&v3d_priv->perfmon.lock); - + perfmon = xa_erase(&v3d_priv->perfmons, req->id); if (!perfmon) return -EINVAL; - /* If the active perfmon is being destroyed, stop it first */ - if (perfmon == v3d->active_perfmon) - v3d_perfmon_stop(v3d, perfmon, false); - - /* If the global perfmon is being destroyed, set it to NULL */ - cmpxchg(&v3d->global_perfmon, perfmon, NULL); - - v3d_perfmon_put(perfmon); + v3d_perfmon_delete(v3d_priv, perfmon); return 0; } From b1cc4172cc60b1039235c9dfc08c3c1ffa4fb863 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Tue, 27 Jan 2026 08:57:00 -0300 Subject: [PATCH 027/158] drm/vc4: Replace IDR with XArray for perfmon tracking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The IDR interface is deprecated and the XArray API is the recommended replacement. Replace the per-file IDR used to track perfmons with an XArray. This allows us to remove the external mutex that protects the IDR. While at it, introduce the vc4_perfmon_delete() helper to consolidate the perfmon cleanup logic used by both vc4_perfmon_close_file() and vc4_perfmon_destroy_ioctl(). Also, remove the redundant assignment of vc4file->dev to itself in vc4_perfmon_open_file(). Reviewed-by: Iago Toral Quiroga Reviewed-by: Tvrtko Ursulin Link: https://patch.msgid.link/20260127115822.64401-2-mcanal@igalia.com Signed-off-by: Maíra Canal --- drivers/gpu/drm/vc4/vc4_drv.h | 5 +-- drivers/gpu/drm/vc4/vc4_perfmon.c | 51 +++++++++++++------------------ 2 files changed, 22 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 221d8e01d539..dbcc83b7df00 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -791,10 +791,7 @@ struct vc4_exec_info { struct vc4_file { struct vc4_dev *dev; - struct { - struct idr idr; - struct mutex lock; - } perfmon; + struct xarray perfmons; bool bin_bo_used; }; diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index 1ac80c0b258f..c02609b3ca47 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -14,9 +14,6 @@ #include "vc4_drv.h" #include "vc4_regs.h" -#define VC4_PERFMONID_MIN 1 -#define VC4_PERFMONID_MAX U32_MAX - void vc4_perfmon_get(struct vc4_perfmon *perfmon) { struct vc4_dev *vc4; @@ -95,10 +92,10 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id) if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return NULL; - mutex_lock(&vc4file->perfmon.lock); - perfmon = idr_find(&vc4file->perfmon.idr, id); + xa_lock(&vc4file->perfmons); + perfmon = xa_load(&vc4file->perfmons, id); vc4_perfmon_get(perfmon); - mutex_unlock(&vc4file->perfmon.lock); + xa_unlock(&vc4file->perfmons); return perfmon; } @@ -110,37 +107,34 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file) if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; - mutex_init(&vc4file->perfmon.lock); - idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN); - vc4file->dev = vc4; + xa_init_flags(&vc4file->perfmons, XA_FLAGS_ALLOC1); } -static int vc4_perfmon_idr_del(int id, void *elem, void *data) +static void vc4_perfmon_delete(struct vc4_file *vc4file, + struct vc4_perfmon *perfmon) { - struct vc4_perfmon *perfmon = elem; - struct vc4_dev *vc4 = (struct vc4_dev *)data; + struct vc4_dev *vc4 = vc4file->dev; /* If the active perfmon is being destroyed, stop it first */ if (perfmon == vc4->active_perfmon) vc4_perfmon_stop(vc4, perfmon, false); vc4_perfmon_put(perfmon); - - return 0; } void vc4_perfmon_close_file(struct vc4_file *vc4file) { struct vc4_dev *vc4 = vc4file->dev; + struct vc4_perfmon *perfmon; + unsigned long id; if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; - mutex_lock(&vc4file->perfmon.lock); - idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4); - idr_destroy(&vc4file->perfmon.idr); - mutex_unlock(&vc4file->perfmon.lock); - mutex_destroy(&vc4file->perfmon.lock); + xa_for_each(&vc4file->perfmons, id, perfmon) + vc4_perfmon_delete(vc4file, perfmon); + + xa_destroy(&vc4file->perfmons); } int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, @@ -152,6 +146,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, struct vc4_perfmon *perfmon; unsigned int i; int ret; + u32 id; if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; @@ -185,17 +180,15 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, refcount_set(&perfmon->refcnt, 1); - mutex_lock(&vc4file->perfmon.lock); - ret = idr_alloc(&vc4file->perfmon.idr, perfmon, VC4_PERFMONID_MIN, - VC4_PERFMONID_MAX, GFP_KERNEL); - mutex_unlock(&vc4file->perfmon.lock); - + ret = xa_alloc(&vc4file->perfmons, &id, perfmon, xa_limit_32b, + GFP_KERNEL); if (ret < 0) { kfree(perfmon); return ret; } - req->id = ret; + req->id = id; + return 0; } @@ -215,14 +208,12 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, return -ENODEV; } - mutex_lock(&vc4file->perfmon.lock); - perfmon = idr_remove(&vc4file->perfmon.idr, req->id); - mutex_unlock(&vc4file->perfmon.lock); - + perfmon = xa_erase(&vc4file->perfmons, req->id); if (!perfmon) return -EINVAL; - vc4_perfmon_put(perfmon); + vc4_perfmon_delete(vc4file, perfmon); + return 0; } From 3c55330aac69150a1a4ff20684b41e7f66ad5fe9 Mon Sep 17 00:00:00 2001 From: Caio Ishikawa Date: Sat, 24 Jan 2026 12:51:13 +0000 Subject: [PATCH 028/158] drm/panel: boe-th101mb31ig002: Remove use of deprecated mipi_dsi_dcs_nop() Replace calls to deprecated mipi_dsi_dcs_nop() with mipi_dsi_dcs_nop_multi(). No intended functional changes. Signed-off-by: Caio Ishikawa Reviewed-by: Douglas Anderson Signed-off-by: Douglas Anderson Link: https://patch.msgid.link/20260124124959.196051-3-caio.ishikawa@proton.me --- drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c index f33d4f855929..01b4458e55ad 100644 --- a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c +++ b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c @@ -221,6 +221,7 @@ static int boe_th101mb31ig002_prepare(struct drm_panel *panel) struct boe_th101mb31ig002, panel); struct device *dev = &ctx->dsi->dev; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi }; int ret; ret = regulator_enable(ctx->power); @@ -233,9 +234,9 @@ static int boe_th101mb31ig002_prepare(struct drm_panel *panel) msleep(ctx->desc->vcioo_to_lp11_delay_ms); if (ctx->desc->lp11_before_reset) { - ret = mipi_dsi_dcs_nop(ctx->dsi); - if (ret) - return ret; + mipi_dsi_dcs_nop_multi(&dsi_ctx); + if (dsi_ctx.accum_err) + return dsi_ctx.accum_err; } if (ctx->desc->lp11_to_reset_delay_ms) From e2242223b714ac9fd8233546ca74ce972024111f Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 23 Jan 2026 19:22:20 +0000 Subject: [PATCH 029/158] dt-bindings: display: panel: Move FriendlyElec HD702E to eDP The "E" alludes to the fact that FriendlyElec's HD702E is actually an eDP panel - move its compatible to the appropriate binding doc. Cc: devicetree@vger.kernel.org Acked-by: Rob Herring (Arm) Signed-off-by: Robin Murphy Reviewed-by: Douglas Anderson Signed-off-by: Douglas Anderson Link: https://patch.msgid.link/c7f6f75d55a4801eab63a0dc81d14ae27866aca9.1769191673.git.robin.murphy@arm.com --- .../devicetree/bindings/display/panel/panel-edp-legacy.yaml | 2 ++ .../devicetree/bindings/display/panel/panel-simple.yaml | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/display/panel/panel-edp-legacy.yaml b/Documentation/devicetree/bindings/display/panel/panel-edp-legacy.yaml index b308047c1edf..afe7dc54ebf4 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-edp-legacy.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-edp-legacy.yaml @@ -44,6 +44,8 @@ properties: - boe,nv133fhm-n62 # BOE NV140FHM-N49 14.0" FHD a-Si FT panel - boe,nv140fhmn49 + # FriendlyELEC HD702E 800x1280 LCD panel + - friendlyarm,hd702e # Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel - innolux,n116bca-ea1 # Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index 868edb04989a..106ae91ff474 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -144,8 +144,6 @@ properties: - foxlink,fl500wvr00-a0t # Frida FRD350H54004 3.5" QVGA TFT LCD panel - frida,frd350h54004 - # FriendlyELEC HD702E 800x1280 LCD panel - - friendlyarm,hd702e # GiantPlus GPG48273QS5 4.3" (480x272) WQVGA TFT LCD panel - giantplus,gpg48273qs5 # GiantPlus GPM940B0 3.0" QVGA TFT LCD panel From 5c323ea948ff0f9e50418119cbbb3d94f2bd6a96 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 23 Jan 2026 19:22:21 +0000 Subject: [PATCH 030/158] drm/panel-edp: Move FriendlyELEC HD702E FriendlyELEC's HD702E module is an eDP panel (in as much as it's some LVDS LCD behind a Chrontel CH7511B eDP bridge), so move its data over to the eDP driver, also resolving the warning about the missing bpc value in the process. The unfortunate combination of HPD not being wired up and the RK3399 eDP controller's behaviour seems to result in the EDID not being readable over DP-AUX without probing the panel first, thus the hard-coded mode is still needed to get things going. Reviewed-by: Neil Armstrong Signed-off-by: Robin Murphy Reviewed-by: Douglas Anderson Signed-off-by: Douglas Anderson Link: https://patch.msgid.link/a00a59dcef3693efb02a8ee942848fbeaeaf05ba.1769191673.git.robin.murphy@arm.com --- drivers/gpu/drm/panel/panel-edp.c | 26 ++++++++++++++++++++++++++ drivers/gpu/drm/panel/panel-simple.c | 25 ------------------------- 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index c9eacfffd5b2..f5f0e2c505b6 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1256,6 +1256,29 @@ static const struct panel_desc boe_nv140fhmn49 = { }, }; +static const struct drm_display_mode friendlyarm_hd702e_mode = { + .clock = 67185, + .hdisplay = 800, + .hsync_start = 800 + 20, + .hsync_end = 800 + 20 + 24, + .htotal = 800 + 20 + 24 + 20, + .vdisplay = 1280, + .vsync_start = 1280 + 4, + .vsync_end = 1280 + 4 + 8, + .vtotal = 1280 + 4 + 8 + 4, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static const struct panel_desc friendlyarm_hd702e = { + .modes = &friendlyarm_hd702e_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 94, + .height = 151, + }, +}; + static const struct drm_display_mode innolux_n116bca_ea1_mode = { .clock = 76420, .hdisplay = 1366, @@ -1663,6 +1686,9 @@ static const struct of_device_id platform_of_match[] = { }, { .compatible = "boe,nv140fhmn49", .data = &boe_nv140fhmn49, + }, { + .compatible = "friendlyarm,hd702e", + .data = &friendlyarm_hd702e, }, { .compatible = "innolux,n116bca-ea1", .data = &innolux_n116bca_ea1, diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index c606e5932ca7..67efea702f46 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2359,28 +2359,6 @@ static const struct panel_desc frida_frd350h54004 = { .connector_type = DRM_MODE_CONNECTOR_DPI, }; -static const struct drm_display_mode friendlyarm_hd702e_mode = { - .clock = 67185, - .hdisplay = 800, - .hsync_start = 800 + 20, - .hsync_end = 800 + 20 + 24, - .htotal = 800 + 20 + 24 + 20, - .vdisplay = 1280, - .vsync_start = 1280 + 4, - .vsync_end = 1280 + 4 + 8, - .vtotal = 1280 + 4 + 8 + 4, - .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, -}; - -static const struct panel_desc friendlyarm_hd702e = { - .modes = &friendlyarm_hd702e_mode, - .num_modes = 1, - .size = { - .width = 94, - .height = 151, - }, -}; - static const struct drm_display_mode giantplus_gpg482739qs5_mode = { .clock = 9000, .hdisplay = 480, @@ -5286,9 +5264,6 @@ static const struct of_device_id platform_of_match[] = { }, { .compatible = "frida,frd350h54004", .data = &frida_frd350h54004, - }, { - .compatible = "friendlyarm,hd702e", - .data = &friendlyarm_hd702e, }, { .compatible = "giantplus,gpg482739qs5", .data = &giantplus_gpg482739qs5 From 3c05e956b85e70cbd5042e03f615b23ca4635385 Mon Sep 17 00:00:00 2001 From: Damon Ding Date: Wed, 17 Dec 2025 17:33:04 +0800 Subject: [PATCH 031/158] drm/display: bridge_connector: Ensure last bridge determines EDID/modes detection capabilities When multiple bridges are present, EDID detection capability (DRM_BRIDGE_OP_EDID) takes precedence over modes detection (DRM_BRIDGE_OP_MODES). To ensure the above two capabilities are determined by the last bridge in the chain, we handle three cases: Case 1: The later bridge declares only DRM_BRIDGE_OP_MODES - If the previous bridge declares DRM_BRIDGE_OP_EDID, set &drm_bridge_connector.bridge_edid to NULL and set &drm_bridge_connector.bridge_modes to the later bridge. - Ensure modes detection capability of the later bridge will not be ignored. Case 2: The later bridge declares only DRM_BRIDGE_OP_EDID - If the previous bridge declares DRM_BRIDGE_OP_MODES, set &drm_bridge_connector.bridge_modes to NULL and set &drm_bridge_connector.bridge_edid to the later bridge. - Although EDID detection capability has higher priority, this operation is for balance and makes sense. Case 3: the later bridge declares both of them - Assign later bridge as &drm_bridge_connector.bridge_edid and and &drm_bridge_connector.bridge_modes to this bridge. - Just leave transfer of these two capabilities as before. Signed-off-by: Damon Ding Suggested-by: Dmitry Baryshkov Tested-by: Marek Szyprowski Reviewed-by: Luca Ceresoli Tested-by: Heiko Stuebner (on rk3588) Link: https://patch.msgid.link/20251217093321.3108939-2-damon.ding@rock-chips.com Signed-off-by: Luca Ceresoli --- drivers/gpu/drm/display/drm_bridge_connector.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index ba8ff113cff1..d49ccf28d2b2 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -826,9 +826,19 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (!bridge->ycbcr_420_allowed) connector->ycbcr_420_allowed = false; - if (bridge->ops & DRM_BRIDGE_OP_EDID) { + /* + * Ensure the last bridge declares OP_EDID or OP_MODES or both. + */ + if (bridge->ops & DRM_BRIDGE_OP_EDID || bridge->ops & DRM_BRIDGE_OP_MODES) { drm_bridge_put(bridge_connector->bridge_edid); - bridge_connector->bridge_edid = drm_bridge_get(bridge); + bridge_connector->bridge_edid = NULL; + drm_bridge_put(bridge_connector->bridge_modes); + bridge_connector->bridge_modes = NULL; + + if (bridge->ops & DRM_BRIDGE_OP_EDID) + bridge_connector->bridge_edid = drm_bridge_get(bridge); + if (bridge->ops & DRM_BRIDGE_OP_MODES) + bridge_connector->bridge_modes = drm_bridge_get(bridge); } if (bridge->ops & DRM_BRIDGE_OP_HPD) { drm_bridge_put(bridge_connector->bridge_hpd); @@ -838,10 +848,6 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, drm_bridge_put(bridge_connector->bridge_detect); bridge_connector->bridge_detect = drm_bridge_get(bridge); } - if (bridge->ops & DRM_BRIDGE_OP_MODES) { - drm_bridge_put(bridge_connector->bridge_modes); - bridge_connector->bridge_modes = drm_bridge_get(bridge); - } if (bridge->ops & DRM_BRIDGE_OP_HDMI) { if (bridge_connector->bridge_hdmi) return ERR_PTR(-EBUSY); From 10a2694fde68ba214eca7d24229094fc2427fddd Mon Sep 17 00:00:00 2001 From: Damon Ding Date: Wed, 17 Dec 2025 17:33:05 +0800 Subject: [PATCH 032/158] drm/bridge: analogix_dp: Formalize the struct analogix_dp_device Use the tap instead of the space for &analogix_dp_device.aux and &analogix_dp_device.force_hpd. Signed-off-by: Damon Ding Reviewed-by: Dmitry Baryshkov Tested-by: Marek Szyprowski Reviewed-by: Luca Ceresoli Tested-by: Heiko Stuebner (on rk3588) Link: https://patch.msgid.link/20251217093321.3108939-3-damon.ding@rock-chips.com Signed-off-by: Luca Ceresoli --- drivers/gpu/drm/bridge/analogix/analogix_dp_core.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h index b86e93f30ed6..91b215c6a0cf 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h @@ -156,7 +156,7 @@ struct analogix_dp_device { struct drm_device *drm_dev; struct drm_connector connector; struct drm_bridge bridge; - struct drm_dp_aux aux; + struct drm_dp_aux aux; struct clk *clock; unsigned int irq; void __iomem *reg_base; @@ -166,7 +166,7 @@ struct analogix_dp_device { struct phy *phy; int dpms_mode; struct gpio_desc *hpd_gpiod; - bool force_hpd; + bool force_hpd; bool fast_train_enable; bool psr_supported; From cff3f89ffbdd4b6c43a117c01aaf5b290ff80803 Mon Sep 17 00:00:00 2001 From: Damon Ding Date: Wed, 17 Dec 2025 17:33:06 +0800 Subject: [PATCH 033/158] drm/bridge: analogix_dp: Move &drm_bridge_funcs.mode_set to &drm_bridge_funcs.atomic_enable According to the include/drm/drm_bridge.h, the callback &drm_bridge_funcs.mode_set is deprecated and it should be better to include the mode setting in the &drm_bridge_funcs.atomic_enable instead. Signed-off-by: Damon Ding Reviewed-by: Dmitry Baryshkov Tested-by: Marek Szyprowski Reviewed-by: Luca Ceresoli Tested-by: Heiko Stuebner (on rk3588) Link: https://patch.msgid.link/20251217093321.3108939-4-damon.ding@rock-chips.com Signed-off-by: Luca Ceresoli --- .../drm/bridge/analogix/analogix_dp_core.c | 161 +++++++++--------- 1 file changed, 82 insertions(+), 79 deletions(-) diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index efe534977d12..b1687a4aa047 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1177,12 +1177,88 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp) return ret; } +static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode) +{ + struct analogix_dp_device *dp = to_dp(bridge); + struct drm_display_info *display_info = &dp->connector.display_info; + struct video_info *video = &dp->video_info; + struct device_node *dp_node = dp->dev->of_node; + int vic; + + /* Input video interlaces & hsync pol & vsync pol */ + video->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); + video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); + video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); + + /* Input video dynamic_range & colorimetry */ + vic = drm_match_cea_mode(mode); + if ((vic == 6) || (vic == 7) || (vic == 21) || (vic == 22) || + (vic == 2) || (vic == 3) || (vic == 17) || (vic == 18)) { + video->dynamic_range = CEA; + video->ycbcr_coeff = COLOR_YCBCR601; + } else if (vic) { + video->dynamic_range = CEA; + video->ycbcr_coeff = COLOR_YCBCR709; + } else { + video->dynamic_range = VESA; + video->ycbcr_coeff = COLOR_YCBCR709; + } + + /* Input vide bpc and color_formats */ + switch (display_info->bpc) { + case 12: + video->color_depth = COLOR_12; + break; + case 10: + video->color_depth = COLOR_10; + break; + case 8: + video->color_depth = COLOR_8; + break; + case 6: + video->color_depth = COLOR_6; + break; + default: + video->color_depth = COLOR_8; + break; + } + if (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR444) + video->color_space = COLOR_YCBCR444; + else if (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422) + video->color_space = COLOR_YCBCR422; + else + video->color_space = COLOR_RGB; + + /* + * NOTE: those property parsing code is used for providing backward + * compatibility for samsung platform. + * Due to we used the "of_property_read_u32" interfaces, when this + * property isn't present, the "video_info" can keep the original + * values and wouldn't be modified. + */ + of_property_read_u32(dp_node, "samsung,color-space", + &video->color_space); + of_property_read_u32(dp_node, "samsung,dynamic-range", + &video->dynamic_range); + of_property_read_u32(dp_node, "samsung,ycbcr-coeff", + &video->ycbcr_coeff); + of_property_read_u32(dp_node, "samsung,color-depth", + &video->color_depth); + if (of_property_read_bool(dp_node, "hsync-active-high")) + video->h_sync_polarity = true; + if (of_property_read_bool(dp_node, "vsync-active-high")) + video->v_sync_polarity = true; + if (of_property_read_bool(dp_node, "interlaced")) + video->interlaced = true; +} + static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_atomic_state *old_state) { struct analogix_dp_device *dp = to_dp(bridge); struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_state; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; int timeout_loop = 0; int ret; @@ -1190,6 +1266,11 @@ static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge, if (!crtc) return; + new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc); + if (!new_crtc_state) + return; + analogix_dp_bridge_mode_set(bridge, &new_crtc_state->adjusted_mode); + old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc); /* Not a full enable, just disable PSR and continue */ if (old_crtc_state && old_crtc_state->self_refresh_active) { @@ -1296,83 +1377,6 @@ static void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge, DRM_ERROR("Failed to enable psr (%d)\n", ret); } -static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge, - const struct drm_display_mode *orig_mode, - const struct drm_display_mode *mode) -{ - struct analogix_dp_device *dp = to_dp(bridge); - struct drm_display_info *display_info = &dp->connector.display_info; - struct video_info *video = &dp->video_info; - struct device_node *dp_node = dp->dev->of_node; - int vic; - - /* Input video interlaces & hsync pol & vsync pol */ - video->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); - video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); - video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); - - /* Input video dynamic_range & colorimetry */ - vic = drm_match_cea_mode(mode); - if ((vic == 6) || (vic == 7) || (vic == 21) || (vic == 22) || - (vic == 2) || (vic == 3) || (vic == 17) || (vic == 18)) { - video->dynamic_range = CEA; - video->ycbcr_coeff = COLOR_YCBCR601; - } else if (vic) { - video->dynamic_range = CEA; - video->ycbcr_coeff = COLOR_YCBCR709; - } else { - video->dynamic_range = VESA; - video->ycbcr_coeff = COLOR_YCBCR709; - } - - /* Input vide bpc and color_formats */ - switch (display_info->bpc) { - case 12: - video->color_depth = COLOR_12; - break; - case 10: - video->color_depth = COLOR_10; - break; - case 8: - video->color_depth = COLOR_8; - break; - case 6: - video->color_depth = COLOR_6; - break; - default: - video->color_depth = COLOR_8; - break; - } - if (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR444) - video->color_space = COLOR_YCBCR444; - else if (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422) - video->color_space = COLOR_YCBCR422; - else - video->color_space = COLOR_RGB; - - /* - * NOTE: those property parsing code is used for providing backward - * compatibility for samsung platform. - * Due to we used the "of_property_read_u32" interfaces, when this - * property isn't present, the "video_info" can keep the original - * values and wouldn't be modified. - */ - of_property_read_u32(dp_node, "samsung,color-space", - &video->color_space); - of_property_read_u32(dp_node, "samsung,dynamic-range", - &video->dynamic_range); - of_property_read_u32(dp_node, "samsung,ycbcr-coeff", - &video->ycbcr_coeff); - of_property_read_u32(dp_node, "samsung,color-depth", - &video->color_depth); - if (of_property_read_bool(dp_node, "hsync-active-high")) - video->h_sync_polarity = true; - if (of_property_read_bool(dp_node, "vsync-active-high")) - video->v_sync_polarity = true; - if (of_property_read_bool(dp_node, "interlaced")) - video->interlaced = true; -} - static const struct drm_bridge_funcs analogix_dp_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, @@ -1381,7 +1385,6 @@ static const struct drm_bridge_funcs analogix_dp_bridge_funcs = { .atomic_enable = analogix_dp_bridge_atomic_enable, .atomic_disable = analogix_dp_bridge_atomic_disable, .atomic_post_disable = analogix_dp_bridge_atomic_post_disable, - .mode_set = analogix_dp_bridge_mode_set, .attach = analogix_dp_bridge_attach, }; From c131d78840d7487e41c3afdc52bb74fd3f8861ef Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Thu, 29 Jan 2026 10:39:15 +0800 Subject: [PATCH 034/158] dt-bindings: vendor-prefixes: add verisilicon VeriSilicon is a Silicon IP vendor, which is the current owner of Vivante series video-related IPs and Hantro series video codec IPs. Add a vendor prefix for this company. Signed-off-by: Icenowy Zheng Signed-off-by: Icenowy Zheng Acked-by: Rob Herring (Arm) Signed-off-by: Thomas Zimmermann Link: https://patch.msgid.link/20260129023922.1527729-2-zhengxingda@iscas.ac.cn --- Documentation/devicetree/bindings/vendor-prefixes.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index d459886e515a..fc7985f3a549 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -1747,6 +1747,8 @@ patternProperties: description: Variscite Ltd. "^vdl,.*": description: Van der Laan b.v. + "^verisilicon,.*": + description: VeriSilicon Microelectronics (Shanghai) Co., Ltd. "^vertexcom,.*": description: Vertexcom Technologies, Inc. "^via,.*": From 5f6965fa1e2ec8ac69e1d448d343a528dc60cdfb Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Thu, 29 Jan 2026 10:39:16 +0800 Subject: [PATCH 035/158] dt-bindings: display: add verisilicon,dc Verisilicon has a series of display controllers prefixed with DC and with self-identification facility like their GC series GPUs. Add a device tree binding for it. Depends on the specific DC model, it can have either one or two display outputs, and each display output could be set to DPI signal or "DP" signal (which seems to be some plain parallel bus to HDMI controllers). Signed-off-by: Icenowy Zheng Signed-off-by: Icenowy Zheng Reviewed-by: Rob Herring (Arm) Signed-off-by: Thomas Zimmermann Link: https://patch.msgid.link/20260129023922.1527729-3-zhengxingda@iscas.ac.cn --- .../bindings/display/verisilicon,dc.yaml | 122 ++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 Documentation/devicetree/bindings/display/verisilicon,dc.yaml diff --git a/Documentation/devicetree/bindings/display/verisilicon,dc.yaml b/Documentation/devicetree/bindings/display/verisilicon,dc.yaml new file mode 100644 index 000000000000..9dc35ab973f2 --- /dev/null +++ b/Documentation/devicetree/bindings/display/verisilicon,dc.yaml @@ -0,0 +1,122 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/verisilicon,dc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Verisilicon DC-series display controllers + +maintainers: + - Icenowy Zheng + +properties: + $nodename: + pattern: "^display@[0-9a-f]+$" + + compatible: + items: + - enum: + - thead,th1520-dc8200 + - const: verisilicon,dc # DC IPs have discoverable ID/revision registers + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + items: + - description: DC Core clock + - description: DMA AXI bus clock + - description: Configuration AHB bus clock + - description: Pixel clock of output 0 + - description: Pixel clock of output 1 + + clock-names: + items: + - const: core + - const: axi + - const: ahb + - const: pix0 + - const: pix1 + + resets: + items: + - description: DC Core reset + - description: DMA AXI bus reset + - description: Configuration AHB bus reset + + reset-names: + items: + - const: core + - const: axi + - const: ahb + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/properties/port + description: The first output channel , endpoint 0 should be + used for DPI format output and endpoint 1 should be used + for DP format output. + + port@1: + $ref: /schemas/graph.yaml#/properties/port + description: The second output channel if the DC variant + supports. Follow the same endpoint addressing rule with + the first port. + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - ports + +additionalProperties: false + +examples: + - | + #include + #include + #include + soc { + #address-cells = <2>; + #size-cells = <2>; + + display@ffef600000 { + compatible = "thead,th1520-dc8200", "verisilicon,dc"; + reg = <0xff 0xef600000 0x0 0x100000>; + interrupts = <93 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk_vo CLK_DPU_CCLK>, + <&clk_vo CLK_DPU_ACLK>, + <&clk_vo CLK_DPU_HCLK>, + <&clk_vo CLK_DPU_PIXELCLK0>, + <&clk_vo CLK_DPU_PIXELCLK1>; + clock-names = "core", "axi", "ahb", "pix0", "pix1"; + resets = <&rst TH1520_RESET_ID_DPU_CORE>, + <&rst TH1520_RESET_ID_DPU_AXI>, + <&rst TH1520_RESET_ID_DPU_AHB>; + reset-names = "core", "axi", "ahb"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + dpu_out_dp1: endpoint@1 { + reg = <1>; + remote-endpoint = <&hdmi_in>; + }; + }; + }; + }; + }; From dbf21777caa8b8c88c12f7f036b01208fec0d55a Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Thu, 29 Jan 2026 10:39:17 +0800 Subject: [PATCH 036/158] drm: verisilicon: add a driver for Verisilicon display controllers This is a from-scratch driver targeting Verisilicon DC-series display controllers, which feature self-identification functionality like their GC-series GPUs. Only DC8200 is being supported now, and only the main framebuffer is set up (as the DRM primary plane). Support for more DC models and more features is my further targets. As the display controller is delivered to SoC vendors as a whole part, this driver does not use component framework and extra bridges inside a SoC is expected to be implemented as dedicated bridges (this driver properly supports bridge chaining). Signed-off-by: Icenowy Zheng Signed-off-by: Icenowy Zheng Tested-by: Han Gao Tested-by: Michal Wilczynski Reviewed-by: Thomas Zimmermann Signed-off-by: Thomas Zimmermann Link: https://patch.msgid.link/20260129023922.1527729-4-zhengxingda@iscas.ac.cn --- MAINTAINERS | 7 + drivers/gpu/drm/Kconfig | 1 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/verisilicon/Kconfig | 16 + drivers/gpu/drm/verisilicon/Makefile | 5 + drivers/gpu/drm/verisilicon/vs_bridge.c | 371 ++++++++++++++++++ drivers/gpu/drm/verisilicon/vs_bridge.h | 39 ++ drivers/gpu/drm/verisilicon/vs_bridge_regs.h | 54 +++ drivers/gpu/drm/verisilicon/vs_crtc.c | 191 +++++++++ drivers/gpu/drm/verisilicon/vs_crtc.h | 31 ++ drivers/gpu/drm/verisilicon/vs_crtc_regs.h | 60 +++ drivers/gpu/drm/verisilicon/vs_dc.c | 207 ++++++++++ drivers/gpu/drm/verisilicon/vs_dc.h | 38 ++ drivers/gpu/drm/verisilicon/vs_dc_top_regs.h | 27 ++ drivers/gpu/drm/verisilicon/vs_drm.c | 182 +++++++++ drivers/gpu/drm/verisilicon/vs_drm.h | 28 ++ drivers/gpu/drm/verisilicon/vs_hwdb.c | 150 +++++++ drivers/gpu/drm/verisilicon/vs_hwdb.h | 29 ++ drivers/gpu/drm/verisilicon/vs_plane.c | 124 ++++++ drivers/gpu/drm/verisilicon/vs_plane.h | 72 ++++ .../gpu/drm/verisilicon/vs_primary_plane.c | 173 ++++++++ .../drm/verisilicon/vs_primary_plane_regs.h | 53 +++ 22 files changed, 1859 insertions(+) create mode 100644 drivers/gpu/drm/verisilicon/Kconfig create mode 100644 drivers/gpu/drm/verisilicon/Makefile create mode 100644 drivers/gpu/drm/verisilicon/vs_bridge.c create mode 100644 drivers/gpu/drm/verisilicon/vs_bridge.h create mode 100644 drivers/gpu/drm/verisilicon/vs_bridge_regs.h create mode 100644 drivers/gpu/drm/verisilicon/vs_crtc.c create mode 100644 drivers/gpu/drm/verisilicon/vs_crtc.h create mode 100644 drivers/gpu/drm/verisilicon/vs_crtc_regs.h create mode 100644 drivers/gpu/drm/verisilicon/vs_dc.c create mode 100644 drivers/gpu/drm/verisilicon/vs_dc.h create mode 100644 drivers/gpu/drm/verisilicon/vs_dc_top_regs.h create mode 100644 drivers/gpu/drm/verisilicon/vs_drm.c create mode 100644 drivers/gpu/drm/verisilicon/vs_drm.h create mode 100644 drivers/gpu/drm/verisilicon/vs_hwdb.c create mode 100644 drivers/gpu/drm/verisilicon/vs_hwdb.h create mode 100644 drivers/gpu/drm/verisilicon/vs_plane.c create mode 100644 drivers/gpu/drm/verisilicon/vs_plane.h create mode 100644 drivers/gpu/drm/verisilicon/vs_primary_plane.c create mode 100644 drivers/gpu/drm/verisilicon/vs_primary_plane_regs.h diff --git a/MAINTAINERS b/MAINTAINERS index 3b84ad595e22..bd04bf63e8b0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8668,6 +8668,13 @@ F: Documentation/devicetree/bindings/display/brcm,bcm2835-*.yaml F: drivers/gpu/drm/vc4/ F: include/uapi/drm/vc4_drm.h +DRM DRIVERS FOR VERISILICON DISPLAY CONTROLLER IP +M: Icenowy Zheng +L: dri-devel@lists.freedesktop.org +S: Maintained +F: Documentation/devicetree/bindings/display/verisilicon,dc.yaml +F: drivers/gpu/drm/verisilicon/ + DRM DRIVERS FOR VIVANTE GPU IP M: Lucas Stach R: Russell King diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index a33b90251530..bf5b919a0222 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -335,6 +335,7 @@ source "drivers/gpu/drm/udl/Kconfig" source "drivers/gpu/drm/v3d/Kconfig" source "drivers/gpu/drm/vboxvideo/Kconfig" source "drivers/gpu/drm/vc4/Kconfig" +source "drivers/gpu/drm/verisilicon/Kconfig" source "drivers/gpu/drm/vgem/Kconfig" source "drivers/gpu/drm/virtio/Kconfig" source "drivers/gpu/drm/vkms/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 0e1c668b46d2..f2dfa0ad0ab7 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -235,6 +235,7 @@ obj-y += solomon/ obj-$(CONFIG_DRM_SPRD) += sprd/ obj-$(CONFIG_DRM_LOONGSON) += loongson/ obj-$(CONFIG_DRM_POWERVR) += imagination/ +obj-$(CONFIG_DRM_VERISILICON_DC) += verisilicon/ # Ensure drm headers are self-contained and pass kernel-doc hdrtest-files := \ diff --git a/drivers/gpu/drm/verisilicon/Kconfig b/drivers/gpu/drm/verisilicon/Kconfig new file mode 100644 index 000000000000..7cce86ec8603 --- /dev/null +++ b/drivers/gpu/drm/verisilicon/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +config DRM_VERISILICON_DC + tristate "DRM Support for Verisilicon DC-series display controllers" + depends on DRM && COMMON_CLK + depends on RISCV || COMPILE_TEST + select DRM_BRIDGE_CONNECTOR + select DRM_CLIENT_SELECTION + select DRM_DISPLAY_HELPER + select DRM_GEM_DMA_HELPER + select DRM_KMS_HELPER + select REGMAP_MMIO + select VIDEOMODE_HELPERS + help + Choose this option if you have a SoC with Verisilicon DC-series + display controllers. If M is selected, the module will be called + verisilicon-dc. diff --git a/drivers/gpu/drm/verisilicon/Makefile b/drivers/gpu/drm/verisilicon/Makefile new file mode 100644 index 000000000000..fd8d805fbcde --- /dev/null +++ b/drivers/gpu/drm/verisilicon/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only + +verisilicon-dc-objs := vs_bridge.o vs_crtc.o vs_dc.o vs_drm.o vs_hwdb.o vs_plane.o vs_primary_plane.o + +obj-$(CONFIG_DRM_VERISILICON_DC) += verisilicon-dc.o diff --git a/drivers/gpu/drm/verisilicon/vs_bridge.c b/drivers/gpu/drm/verisilicon/vs_bridge.c new file mode 100644 index 000000000000..2a0ad00a94d6 --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_bridge.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Icenowy Zheng + */ + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vs_bridge.h" +#include "vs_bridge_regs.h" +#include "vs_crtc.h" +#include "vs_dc.h" + +static int vs_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) +{ + struct vs_bridge *vbridge = drm_bridge_to_vs_bridge(bridge); + + return drm_bridge_attach(encoder, vbridge->next_bridge, + bridge, flags); +} + +struct vsdc_dp_format { + u32 linux_fmt; + bool is_yuv; + u32 vsdc_fmt; +}; + +static struct vsdc_dp_format vsdc_dp_supported_fmts[] = { + /* default to RGB888 */ + { MEDIA_BUS_FMT_FIXED, false, VSDC_DISP_DP_CONFIG_FMT_RGB888 }, + { MEDIA_BUS_FMT_RGB888_1X24, false, VSDC_DISP_DP_CONFIG_FMT_RGB888 }, + { MEDIA_BUS_FMT_RGB565_1X16, false, VSDC_DISP_DP_CONFIG_FMT_RGB565 }, + { MEDIA_BUS_FMT_RGB666_1X18, false, VSDC_DISP_DP_CONFIG_FMT_RGB666 }, + { MEDIA_BUS_FMT_RGB101010_1X30, + false, VSDC_DISP_DP_CONFIG_FMT_RGB101010 }, + { MEDIA_BUS_FMT_UYVY8_1X16, true, VSDC_DISP_DP_CONFIG_YUV_FMT_UYVY8 }, + { MEDIA_BUS_FMT_UYVY10_1X20, true, VSDC_DISP_DP_CONFIG_YUV_FMT_UYVY10 }, + { MEDIA_BUS_FMT_YUV8_1X24, true, VSDC_DISP_DP_CONFIG_YUV_FMT_YUV8 }, + { MEDIA_BUS_FMT_YUV10_1X30, true, VSDC_DISP_DP_CONFIG_YUV_FMT_YUV10 }, + { MEDIA_BUS_FMT_UYYVYY8_0_5X24, + true, VSDC_DISP_DP_CONFIG_YUV_FMT_UYYVYY8 }, + { MEDIA_BUS_FMT_UYYVYY10_0_5X30, + true, VSDC_DISP_DP_CONFIG_YUV_FMT_UYYVYY10 }, +}; + +static u32 *vs_bridge_atomic_get_output_bus_fmts_dpi(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + unsigned int *num_output_fmts) +{ + u32 *output_fmts; + + *num_output_fmts = 2; + + output_fmts = kcalloc(*num_output_fmts, sizeof(*output_fmts), + GFP_KERNEL); + if (!output_fmts) + return NULL; + + /* TODO: support more DPI output formats */ + output_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; + output_fmts[1] = MEDIA_BUS_FMT_FIXED; + + return output_fmts; +} + +static u32 *vs_bridge_atomic_get_output_bus_fmts_dp(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + unsigned int *num_output_fmts) +{ + u32 *output_fmts; + unsigned int i; + + *num_output_fmts = ARRAY_SIZE(vsdc_dp_supported_fmts); + + output_fmts = kcalloc(*num_output_fmts, sizeof(*output_fmts), + GFP_KERNEL); + if (!output_fmts) + return NULL; + + for (i = 0; i < *num_output_fmts; i++) + output_fmts[i] = vsdc_dp_supported_fmts[i].linux_fmt; + + return output_fmts; +} + +static bool vs_bridge_out_dp_fmt_supported(u32 out_fmt) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(vsdc_dp_supported_fmts); i++) + if (vsdc_dp_supported_fmts[i].linux_fmt == out_fmt) + return true; + + return false; +} + +static u32 *vs_bridge_atomic_get_input_bus_fmts_dp(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + if (!vs_bridge_out_dp_fmt_supported(output_fmt)) { + *num_input_fmts = 0; + return NULL; + } + + return drm_atomic_helper_bridge_propagate_bus_fmt(bridge, bridge_state, + crtc_state, + conn_state, + output_fmt, + num_input_fmts); +} + +static int vs_bridge_atomic_check_dp(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + if (!vs_bridge_out_dp_fmt_supported(bridge_state->output_bus_cfg.format)) + return -EINVAL; + + return 0; +} + +static void vs_bridge_enable_common(struct vs_crtc *crtc, + struct drm_bridge_state *br_state) +{ + struct vs_dc *dc = crtc->dc; + unsigned int output = crtc->id; + + regmap_clear_bits(dc->regs, VSDC_DISP_PANEL_CONFIG(output), + VSDC_DISP_PANEL_CONFIG_DAT_POL); + regmap_assign_bits(dc->regs, VSDC_DISP_PANEL_CONFIG(output), + VSDC_DISP_PANEL_CONFIG_DE_POL, + br_state->output_bus_cfg.flags & + DRM_BUS_FLAG_DE_LOW); + regmap_assign_bits(dc->regs, VSDC_DISP_PANEL_CONFIG(output), + VSDC_DISP_PANEL_CONFIG_CLK_POL, + br_state->output_bus_cfg.flags & + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE); + regmap_set_bits(dc->regs, VSDC_DISP_PANEL_CONFIG(output), + VSDC_DISP_PANEL_CONFIG_DE_EN | + VSDC_DISP_PANEL_CONFIG_DAT_EN | + VSDC_DISP_PANEL_CONFIG_CLK_EN); + regmap_set_bits(dc->regs, VSDC_DISP_PANEL_CONFIG(output), + VSDC_DISP_PANEL_CONFIG_RUNNING); + regmap_clear_bits(dc->regs, VSDC_DISP_PANEL_START, + VSDC_DISP_PANEL_START_MULTI_DISP_SYNC); + regmap_set_bits(dc->regs, VSDC_DISP_PANEL_START, + VSDC_DISP_PANEL_START_RUNNING(output)); + + regmap_set_bits(dc->regs, VSDC_DISP_PANEL_CONFIG_EX(crtc->id), + VSDC_DISP_PANEL_CONFIG_EX_COMMIT); +} + +static void vs_bridge_atomic_enable_dpi(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct vs_bridge *vbridge = drm_bridge_to_vs_bridge(bridge); + struct drm_bridge_state *br_state = + drm_atomic_get_new_bridge_state(state, bridge); + struct vs_crtc *crtc = vbridge->crtc; + struct vs_dc *dc = crtc->dc; + unsigned int output = crtc->id; + + regmap_clear_bits(dc->regs, VSDC_DISP_DP_CONFIG(output), + VSDC_DISP_DP_CONFIG_DP_EN); + regmap_write(dc->regs, VSDC_DISP_DPI_CONFIG(output), + VSDC_DISP_DPI_CONFIG_FMT_RGB888); + + vs_bridge_enable_common(crtc, br_state); +} + +static void vs_bridge_atomic_enable_dp(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct vs_bridge *vbridge = drm_bridge_to_vs_bridge(bridge); + struct drm_bridge_state *br_state = + drm_atomic_get_new_bridge_state(state, bridge); + struct vs_crtc *crtc = vbridge->crtc; + struct vs_dc *dc = crtc->dc; + unsigned int output = crtc->id; + u32 dp_fmt; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(vsdc_dp_supported_fmts); i++) { + if (vsdc_dp_supported_fmts[i].linux_fmt == + br_state->output_bus_cfg.format) + break; + } + if (WARN_ON_ONCE(i == ARRAY_SIZE(vsdc_dp_supported_fmts))) + return; + dp_fmt = vsdc_dp_supported_fmts[i].vsdc_fmt; + dp_fmt |= VSDC_DISP_DP_CONFIG_DP_EN; + regmap_write(dc->regs, VSDC_DISP_DP_CONFIG(output), dp_fmt); + regmap_assign_bits(dc->regs, VSDC_DISP_PANEL_CONFIG(output), + VSDC_DISP_PANEL_CONFIG_YUV, + vsdc_dp_supported_fmts[i].is_yuv); + + vs_bridge_enable_common(crtc, br_state); +} + +static void vs_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct vs_bridge *vbridge = drm_bridge_to_vs_bridge(bridge); + struct vs_crtc *crtc = vbridge->crtc; + struct vs_dc *dc = crtc->dc; + unsigned int output = crtc->id; + + regmap_clear_bits(dc->regs, VSDC_DISP_PANEL_START, + VSDC_DISP_PANEL_START_MULTI_DISP_SYNC | + VSDC_DISP_PANEL_START_RUNNING(output)); + regmap_clear_bits(dc->regs, VSDC_DISP_PANEL_CONFIG(output), + VSDC_DISP_PANEL_CONFIG_RUNNING); + + regmap_set_bits(dc->regs, VSDC_DISP_PANEL_CONFIG_EX(crtc->id), + VSDC_DISP_PANEL_CONFIG_EX_COMMIT); +} + +static const struct drm_bridge_funcs vs_dpi_bridge_funcs = { + .attach = vs_bridge_attach, + .atomic_enable = vs_bridge_atomic_enable_dpi, + .atomic_disable = vs_bridge_atomic_disable, + .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt, + .atomic_get_output_bus_fmts = vs_bridge_atomic_get_output_bus_fmts_dpi, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +static const struct drm_bridge_funcs vs_dp_bridge_funcs = { + .attach = vs_bridge_attach, + .atomic_enable = vs_bridge_atomic_enable_dp, + .atomic_disable = vs_bridge_atomic_disable, + .atomic_check = vs_bridge_atomic_check_dp, + .atomic_get_input_bus_fmts = vs_bridge_atomic_get_input_bus_fmts_dp, + .atomic_get_output_bus_fmts = vs_bridge_atomic_get_output_bus_fmts_dp, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +static int vs_bridge_detect_output_interface(struct device_node *of_node, + unsigned int output) +{ + int ret; + struct device_node *remote; + + remote = of_graph_get_remote_node(of_node, output, + VSDC_OUTPUT_INTERFACE_DPI); + if (remote) { + ret = VSDC_OUTPUT_INTERFACE_DPI; + } else { + remote = of_graph_get_remote_node(of_node, output, + VSDC_OUTPUT_INTERFACE_DP); + if (remote) + ret = VSDC_OUTPUT_INTERFACE_DP; + else + ret = -ENODEV; + } + + if (remote) + of_node_put(remote); + + return ret; +} + +struct vs_bridge *vs_bridge_init(struct drm_device *drm_dev, + struct vs_crtc *crtc) +{ + unsigned int output = crtc->id; + struct vs_bridge *bridge; + struct drm_bridge *next; + enum vs_bridge_output_interface intf; + const struct drm_bridge_funcs *bridge_funcs; + int ret, enctype; + + intf = vs_bridge_detect_output_interface(drm_dev->dev->of_node, + output); + if (intf == -ENODEV) { + drm_dbg(drm_dev, "Skipping output %u\n", output); + return NULL; + } + + next = devm_drm_of_get_bridge(drm_dev->dev, drm_dev->dev->of_node, + output, intf); + if (IS_ERR(next)) { + ret = PTR_ERR(next); + if (ret != -EPROBE_DEFER) + drm_err(drm_dev, + "Cannot get downstream bridge of output %u\n", + output); + return ERR_PTR(ret); + } + + if (intf == VSDC_OUTPUT_INTERFACE_DPI) + bridge_funcs = &vs_dpi_bridge_funcs; + else + bridge_funcs = &vs_dp_bridge_funcs; + + bridge = devm_drm_bridge_alloc(drm_dev->dev, struct vs_bridge, base, + bridge_funcs); + if (IS_ERR(bridge)) + return ERR_PTR(PTR_ERR(bridge)); + + bridge->crtc = crtc; + bridge->intf = intf; + bridge->next_bridge = next; + + if (intf == VSDC_OUTPUT_INTERFACE_DPI) + enctype = DRM_MODE_ENCODER_DPI; + else + enctype = DRM_MODE_ENCODER_NONE; + + bridge->enc = drmm_plain_encoder_alloc(drm_dev, NULL, enctype, NULL); + if (IS_ERR(bridge->enc)) { + drm_err(drm_dev, + "Cannot initialize encoder for output %u\n", output); + ret = PTR_ERR(bridge->enc); + return ERR_PTR(ret); + } + + bridge->enc->possible_crtcs = drm_crtc_mask(&crtc->base); + + ret = devm_drm_bridge_add(drm_dev->dev, &bridge->base); + if (ret) { + drm_err(drm_dev, + "Cannot add bridge for output %u\n", output); + return ERR_PTR(ret); + } + + ret = drm_bridge_attach(bridge->enc, &bridge->base, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) { + drm_err(drm_dev, + "Cannot attach bridge for output %u\n", output); + return ERR_PTR(ret); + } + + bridge->conn = drm_bridge_connector_init(drm_dev, bridge->enc); + if (IS_ERR(bridge->conn)) { + drm_err(drm_dev, + "Cannot create connector for output %u\n", output); + ret = PTR_ERR(bridge->conn); + return ERR_PTR(ret); + } + drm_connector_attach_encoder(bridge->conn, bridge->enc); + + return bridge; +} diff --git a/drivers/gpu/drm/verisilicon/vs_bridge.h b/drivers/gpu/drm/verisilicon/vs_bridge.h new file mode 100644 index 000000000000..70fee1749699 --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_bridge.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Icenowy Zheng + */ + +#ifndef _VS_BRIDGE_H_ +#define _VS_BRIDGE_H_ + +#include + +#include +#include +#include + +struct vs_crtc; + +enum vs_bridge_output_interface { + VSDC_OUTPUT_INTERFACE_DPI = 0, + VSDC_OUTPUT_INTERFACE_DP = 1 +}; + +struct vs_bridge { + struct drm_bridge base; + struct drm_encoder *enc; + struct drm_connector *conn; + + struct vs_crtc *crtc; + struct drm_bridge *next_bridge; + enum vs_bridge_output_interface intf; +}; + +static inline struct vs_bridge *drm_bridge_to_vs_bridge(struct drm_bridge *bridge) +{ + return container_of(bridge, struct vs_bridge, base); +} + +struct vs_bridge *vs_bridge_init(struct drm_device *drm_dev, + struct vs_crtc *crtc); +#endif /* _VS_BRIDGE_H_ */ diff --git a/drivers/gpu/drm/verisilicon/vs_bridge_regs.h b/drivers/gpu/drm/verisilicon/vs_bridge_regs.h new file mode 100644 index 000000000000..9eb30e4564be --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_bridge_regs.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Icenowy Zheng + * + * Based on vs_dc_hw.h, which is: + * Copyright (C) 2023 VeriSilicon Holdings Co., Ltd. + */ + +#ifndef _VS_BRIDGE_REGS_H_ +#define _VS_BRIDGE_REGS_H_ + +#include + +#define VSDC_DISP_PANEL_CONFIG(n) (0x1418 + 0x4 * (n)) +#define VSDC_DISP_PANEL_CONFIG_DE_EN BIT(0) +#define VSDC_DISP_PANEL_CONFIG_DE_POL BIT(1) +#define VSDC_DISP_PANEL_CONFIG_DAT_EN BIT(4) +#define VSDC_DISP_PANEL_CONFIG_DAT_POL BIT(5) +#define VSDC_DISP_PANEL_CONFIG_CLK_EN BIT(8) +#define VSDC_DISP_PANEL_CONFIG_CLK_POL BIT(9) +#define VSDC_DISP_PANEL_CONFIG_RUNNING BIT(12) +#define VSDC_DISP_PANEL_CONFIG_GAMMA BIT(13) +#define VSDC_DISP_PANEL_CONFIG_YUV BIT(16) + +#define VSDC_DISP_DPI_CONFIG(n) (0x14B8 + 0x4 * (n)) +#define VSDC_DISP_DPI_CONFIG_FMT_MASK GENMASK(2, 0) +#define VSDC_DISP_DPI_CONFIG_FMT_RGB565 (0) +#define VSDC_DISP_DPI_CONFIG_FMT_RGB666 (3) +#define VSDC_DISP_DPI_CONFIG_FMT_RGB888 (5) +#define VSDC_DISP_DPI_CONFIG_FMT_RGB101010 (6) + +#define VSDC_DISP_PANEL_START 0x1CCC +#define VSDC_DISP_PANEL_START_RUNNING(n) BIT(n) +#define VSDC_DISP_PANEL_START_MULTI_DISP_SYNC BIT(3) + +#define VSDC_DISP_DP_CONFIG(n) (0x1CD0 + 0x4 * (n)) +#define VSDC_DISP_DP_CONFIG_DP_EN BIT(3) +#define VSDC_DISP_DP_CONFIG_FMT_MASK GENMASK(2, 0) +#define VSDC_DISP_DP_CONFIG_FMT_RGB565 (0) +#define VSDC_DISP_DP_CONFIG_FMT_RGB666 (1) +#define VSDC_DISP_DP_CONFIG_FMT_RGB888 (2) +#define VSDC_DISP_DP_CONFIG_FMT_RGB101010 (3) +#define VSDC_DISP_DP_CONFIG_YUV_FMT_MASK GENMASK(7, 4) +#define VSDC_DISP_DP_CONFIG_YUV_FMT_UYVY8 (2 << 4) +#define VSDC_DISP_DP_CONFIG_YUV_FMT_YUV8 (4 << 4) +#define VSDC_DISP_DP_CONFIG_YUV_FMT_UYVY10 (8 << 4) +#define VSDC_DISP_DP_CONFIG_YUV_FMT_YUV10 (10 << 4) +#define VSDC_DISP_DP_CONFIG_YUV_FMT_UYYVYY8 (12 << 4) +#define VSDC_DISP_DP_CONFIG_YUV_FMT_UYYVYY10 (13 << 4) + +#define VSDC_DISP_PANEL_CONFIG_EX(n) (0x2518 + 0x4 * (n)) +#define VSDC_DISP_PANEL_CONFIG_EX_COMMIT BIT(0) + +#endif /* _VS_BRIDGE_REGS_H_ */ diff --git a/drivers/gpu/drm/verisilicon/vs_crtc.c b/drivers/gpu/drm/verisilicon/vs_crtc.c new file mode 100644 index 000000000000..f49401713000 --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_crtc.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Icenowy Zheng + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "vs_crtc_regs.h" +#include "vs_crtc.h" +#include "vs_dc.h" +#include "vs_dc_top_regs.h" +#include "vs_drm.h" +#include "vs_plane.h" + +static void vs_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct vs_crtc *vcrtc = drm_crtc_to_vs_crtc(crtc); + struct vs_dc *dc = vcrtc->dc; + unsigned int output = vcrtc->id; + + drm_crtc_vblank_off(crtc); + + clk_disable_unprepare(dc->pix_clk[output]); +} + +static void vs_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct vs_crtc *vcrtc = drm_crtc_to_vs_crtc(crtc); + struct vs_dc *dc = vcrtc->dc; + unsigned int output = vcrtc->id; + + drm_WARN_ON(&dc->drm_dev->base, + clk_prepare_enable(dc->pix_clk[output])); + + drm_crtc_vblank_on(crtc); +} + +static void vs_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct vs_crtc *vcrtc = drm_crtc_to_vs_crtc(crtc); + struct vs_dc *dc = vcrtc->dc; + unsigned int output = vcrtc->id; + + regmap_write(dc->regs, VSDC_DISP_HSIZE(output), + VSDC_DISP_HSIZE_DISP(mode->hdisplay) | + VSDC_DISP_HSIZE_TOTAL(mode->htotal)); + regmap_write(dc->regs, VSDC_DISP_VSIZE(output), + VSDC_DISP_VSIZE_DISP(mode->vdisplay) | + VSDC_DISP_VSIZE_TOTAL(mode->vtotal)); + regmap_write(dc->regs, VSDC_DISP_HSYNC(output), + VSDC_DISP_HSYNC_START(mode->hsync_start) | + VSDC_DISP_HSYNC_END(mode->hsync_end) | + VSDC_DISP_HSYNC_EN); + if (!(mode->flags & DRM_MODE_FLAG_PHSYNC)) + regmap_set_bits(dc->regs, VSDC_DISP_HSYNC(output), + VSDC_DISP_HSYNC_POL); + regmap_write(dc->regs, VSDC_DISP_VSYNC(output), + VSDC_DISP_VSYNC_START(mode->vsync_start) | + VSDC_DISP_VSYNC_END(mode->vsync_end) | + VSDC_DISP_VSYNC_EN); + if (!(mode->flags & DRM_MODE_FLAG_PVSYNC)) + regmap_set_bits(dc->regs, VSDC_DISP_VSYNC(output), + VSDC_DISP_VSYNC_POL); + + WARN_ON(clk_set_rate(dc->pix_clk[output], mode->crtc_clock * 1000)); +} + +static enum drm_mode_status +vs_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) +{ + struct vs_crtc *vcrtc = drm_crtc_to_vs_crtc(crtc); + struct vs_dc *dc = vcrtc->dc; + unsigned int output = vcrtc->id; + long rate; + + if (mode->htotal > VSDC_DISP_TIMING_VALUE_MAX) + return MODE_BAD_HVALUE; + if (mode->vtotal > VSDC_DISP_TIMING_VALUE_MAX) + return MODE_BAD_VVALUE; + + rate = clk_round_rate(dc->pix_clk[output], mode->clock * HZ_PER_KHZ); + if (rate <= 0) + return MODE_CLOCK_RANGE; + + return MODE_OK; +} + +static bool vs_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *m, + struct drm_display_mode *adjusted_mode) +{ + struct vs_crtc *vcrtc = drm_crtc_to_vs_crtc(crtc); + struct vs_dc *dc = vcrtc->dc; + unsigned int output = vcrtc->id; + long clk_rate; + + drm_mode_set_crtcinfo(adjusted_mode, 0); + + /* Feedback the pixel clock to crtc_clock */ + clk_rate = adjusted_mode->crtc_clock * HZ_PER_KHZ; + clk_rate = clk_round_rate(dc->pix_clk[output], clk_rate); + if (clk_rate <= 0) + return false; + + adjusted_mode->crtc_clock = clk_rate / HZ_PER_KHZ; + + return true; +} + +static const struct drm_crtc_helper_funcs vs_crtc_helper_funcs = { + .atomic_flush = drm_crtc_vblank_atomic_flush, + .atomic_enable = vs_crtc_atomic_enable, + .atomic_disable = vs_crtc_atomic_disable, + .mode_set_nofb = vs_crtc_mode_set_nofb, + .mode_valid = vs_crtc_mode_valid, + .mode_fixup = vs_crtc_mode_fixup, +}; + +static int vs_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct vs_crtc *vcrtc = drm_crtc_to_vs_crtc(crtc); + struct vs_dc *dc = vcrtc->dc; + + regmap_set_bits(dc->regs, VSDC_TOP_IRQ_EN, VSDC_TOP_IRQ_VSYNC(vcrtc->id)); + + return 0; +} + +static void vs_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct vs_crtc *vcrtc = drm_crtc_to_vs_crtc(crtc); + struct vs_dc *dc = vcrtc->dc; + + regmap_clear_bits(dc->regs, VSDC_TOP_IRQ_EN, VSDC_TOP_IRQ_VSYNC(vcrtc->id)); +} + +static const struct drm_crtc_funcs vs_crtc_funcs = { + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .set_config = drm_atomic_helper_set_config, + .enable_vblank = vs_crtc_enable_vblank, + .disable_vblank = vs_crtc_disable_vblank, +}; + +struct vs_crtc *vs_crtc_init(struct drm_device *drm_dev, struct vs_dc *dc, + unsigned int output) +{ + struct vs_crtc *vcrtc; + struct drm_plane *primary; + int ret; + + vcrtc = drmm_kzalloc(drm_dev, sizeof(*vcrtc), GFP_KERNEL); + if (!vcrtc) + return ERR_PTR(-ENOMEM); + vcrtc->dc = dc; + vcrtc->id = output; + + /* Create our primary plane */ + primary = vs_primary_plane_init(drm_dev, dc); + if (IS_ERR(primary)) { + drm_err(drm_dev, "Couldn't create the primary plane\n"); + return ERR_PTR(PTR_ERR(primary)); + } + + ret = drmm_crtc_init_with_planes(drm_dev, &vcrtc->base, + primary, + NULL, + &vs_crtc_funcs, + NULL); + if (ret) { + drm_err(drm_dev, "Couldn't initialize CRTC\n"); + return ERR_PTR(ret); + } + + drm_crtc_helper_add(&vcrtc->base, &vs_crtc_helper_funcs); + + return vcrtc; +} diff --git a/drivers/gpu/drm/verisilicon/vs_crtc.h b/drivers/gpu/drm/verisilicon/vs_crtc.h new file mode 100644 index 000000000000..b45580bd99b3 --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_crtc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Icenowy Zheng + */ + +#ifndef _VS_CRTC_H_ +#define _VS_CRTC_H_ + +#include +#include + +#define VSDC_DISP_TIMING_VALUE_MAX BIT_MASK(15) + +struct vs_dc; + +struct vs_crtc { + struct drm_crtc base; + + struct vs_dc *dc; + unsigned int id; +}; + +static inline struct vs_crtc *drm_crtc_to_vs_crtc(struct drm_crtc *crtc) +{ + return container_of(crtc, struct vs_crtc, base); +} + +struct vs_crtc *vs_crtc_init(struct drm_device *drm_dev, struct vs_dc *dc, + unsigned int output); + +#endif /* _VS_CRTC_H_ */ diff --git a/drivers/gpu/drm/verisilicon/vs_crtc_regs.h b/drivers/gpu/drm/verisilicon/vs_crtc_regs.h new file mode 100644 index 000000000000..c7930e817635 --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_crtc_regs.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Icenowy Zheng + * + * Based on vs_dc_hw.h, which is: + * Copyright (C) 2023 VeriSilicon Holdings Co., Ltd. + */ + +#ifndef _VS_CRTC_REGS_H_ +#define _VS_CRTC_REGS_H_ + +#include + +#define VSDC_DISP_DITHER_CONFIG(n) (0x1410 + 0x4 * (n)) + +#define VSDC_DISP_DITHER_TABLE_LOW(n) (0x1420 + 0x4 * (n)) +#define VSDC_DISP_DITHER_TABLE_LOW_DEFAULT 0x7B48F3C0 + +#define VSDC_DISP_DITHER_TABLE_HIGH(n) (0x1428 + 0x4 * (n)) +#define VSDC_DISP_DITHER_TABLE_HIGH_DEFAULT 0x596AD1E2 + +#define VSDC_DISP_HSIZE(n) (0x1430 + 0x4 * (n)) +#define VSDC_DISP_HSIZE_DISP_MASK GENMASK(14, 0) +#define VSDC_DISP_HSIZE_DISP(v) ((v) << 0) +#define VSDC_DISP_HSIZE_TOTAL_MASK GENMASK(30, 16) +#define VSDC_DISP_HSIZE_TOTAL(v) ((v) << 16) + +#define VSDC_DISP_HSYNC(n) (0x1438 + 0x4 * (n)) +#define VSDC_DISP_HSYNC_START_MASK GENMASK(14, 0) +#define VSDC_DISP_HSYNC_START(v) ((v) << 0) +#define VSDC_DISP_HSYNC_END_MASK GENMASK(29, 15) +#define VSDC_DISP_HSYNC_END(v) ((v) << 15) +#define VSDC_DISP_HSYNC_EN BIT(30) +#define VSDC_DISP_HSYNC_POL BIT(31) + +#define VSDC_DISP_VSIZE(n) (0x1440 + 0x4 * (n)) +#define VSDC_DISP_VSIZE_DISP_MASK GENMASK(14, 0) +#define VSDC_DISP_VSIZE_DISP(v) ((v) << 0) +#define VSDC_DISP_VSIZE_TOTAL_MASK GENMASK(30, 16) +#define VSDC_DISP_VSIZE_TOTAL(v) ((v) << 16) + +#define VSDC_DISP_VSYNC(n) (0x1448 + 0x4 * (n)) +#define VSDC_DISP_VSYNC_START_MASK GENMASK(14, 0) +#define VSDC_DISP_VSYNC_START(v) ((v) << 0) +#define VSDC_DISP_VSYNC_END_MASK GENMASK(29, 15) +#define VSDC_DISP_VSYNC_END(v) ((v) << 15) +#define VSDC_DISP_VSYNC_EN BIT(30) +#define VSDC_DISP_VSYNC_POL BIT(31) + +#define VSDC_DISP_CURRENT_LOCATION(n) (0x1450 + 0x4 * (n)) + +#define VSDC_DISP_GAMMA_INDEX(n) (0x1458 + 0x4 * (n)) + +#define VSDC_DISP_GAMMA_DATA(n) (0x1460 + 0x4 * (n)) + +#define VSDC_DISP_IRQ_STA 0x147C + +#define VSDC_DISP_IRQ_EN 0x1480 + +#endif /* _VS_CRTC_REGS_H_ */ diff --git a/drivers/gpu/drm/verisilicon/vs_dc.c b/drivers/gpu/drm/verisilicon/vs_dc.c new file mode 100644 index 000000000000..ba1b3f261a3a --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_dc.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Icenowy Zheng + */ + +#include +#include +#include +#include + +#include "vs_crtc.h" +#include "vs_dc.h" +#include "vs_dc_top_regs.h" +#include "vs_drm.h" +#include "vs_hwdb.h" + +static const struct regmap_config vs_dc_regmap_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = sizeof(u32), + /* VSDC_OVL_CONFIG_EX(1) */ + .max_register = 0x2544, +}; + +static const struct of_device_id vs_dc_driver_dt_match[] = { + { .compatible = "verisilicon,dc" }, + {}, +}; +MODULE_DEVICE_TABLE(of, vs_dc_driver_dt_match); + +static irqreturn_t vs_dc_irq_handler(int irq, void *private) +{ + struct vs_dc *dc = private; + u32 irqs; + + regmap_read(dc->regs, VSDC_TOP_IRQ_ACK, &irqs); + + vs_drm_handle_irq(dc, irqs); + + return IRQ_HANDLED; +} + +static int vs_dc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct vs_dc *dc; + void __iomem *regs; + unsigned int port_count, i; + /* pix0/pix1 */ + char pixclk_name[5]; + int irq, ret; + + if (!dev->of_node) { + dev_err(dev, "can't find DC devices\n"); + return -ENODEV; + } + + port_count = of_graph_get_port_count(dev->of_node); + if (!port_count) { + dev_err(dev, "can't find DC downstream ports\n"); + return -ENODEV; + } + if (port_count > VSDC_MAX_OUTPUTS) { + dev_err(dev, "too many DC downstream ports than possible\n"); + return -EINVAL; + } + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "No suitable DMA available\n"); + return ret; + } + + dc = devm_kzalloc(dev, sizeof(*dc), GFP_KERNEL); + if (!dc) + return -ENOMEM; + + dc->rsts[0].id = "core"; + dc->rsts[1].id = "axi"; + dc->rsts[2].id = "ahb"; + + ret = devm_reset_control_bulk_get_optional_shared(dev, VSDC_RESET_COUNT, + dc->rsts); + if (ret) { + dev_err(dev, "can't get reset lines\n"); + return ret; + } + + dc->core_clk = devm_clk_get_enabled(dev, "core"); + if (IS_ERR(dc->core_clk)) { + dev_err(dev, "can't get core clock\n"); + return PTR_ERR(dc->core_clk); + } + + dc->axi_clk = devm_clk_get_enabled(dev, "axi"); + if (IS_ERR(dc->axi_clk)) { + dev_err(dev, "can't get axi clock\n"); + return PTR_ERR(dc->axi_clk); + } + + dc->ahb_clk = devm_clk_get_enabled(dev, "ahb"); + if (IS_ERR(dc->ahb_clk)) { + dev_err(dev, "can't get ahb clock\n"); + return PTR_ERR(dc->ahb_clk); + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "can't get irq\n"); + return irq; + } + + ret = reset_control_bulk_deassert(VSDC_RESET_COUNT, dc->rsts); + if (ret) { + dev_err(dev, "can't deassert reset lines\n"); + return ret; + } + + regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regs)) { + dev_err(dev, "can't map registers"); + ret = PTR_ERR(regs); + goto err_rst_assert; + } + + dc->regs = devm_regmap_init_mmio(dev, regs, &vs_dc_regmap_cfg); + if (IS_ERR(dc->regs)) { + ret = PTR_ERR(dc->regs); + goto err_rst_assert; + } + + ret = vs_fill_chip_identity(dc->regs, &dc->identity); + if (ret) + goto err_rst_assert; + + dev_info(dev, "Found DC%x rev %x customer %x\n", dc->identity.model, + dc->identity.revision, dc->identity.customer_id); + + if (port_count > dc->identity.display_count) { + dev_err(dev, "too many downstream ports than HW capability\n"); + ret = -EINVAL; + goto err_rst_assert; + } + + for (i = 0; i < dc->identity.display_count; i++) { + snprintf(pixclk_name, sizeof(pixclk_name), "pix%u", i); + dc->pix_clk[i] = devm_clk_get(dev, pixclk_name); + if (IS_ERR(dc->pix_clk[i])) { + dev_err(dev, "can't get pixel clk %u\n", i); + ret = PTR_ERR(dc->pix_clk[i]); + goto err_rst_assert; + } + } + + ret = devm_request_irq(dev, irq, vs_dc_irq_handler, 0, + dev_name(dev), dc); + if (ret) { + dev_err(dev, "can't request irq\n"); + goto err_rst_assert; + } + + dev_set_drvdata(dev, dc); + + ret = vs_drm_initialize(dc, pdev); + if (ret) + goto err_rst_assert; + + return 0; + +err_rst_assert: + reset_control_bulk_assert(VSDC_RESET_COUNT, dc->rsts); + return ret; +} + +static void vs_dc_remove(struct platform_device *pdev) +{ + struct vs_dc *dc = dev_get_drvdata(&pdev->dev); + + vs_drm_finalize(dc); + + dev_set_drvdata(&pdev->dev, NULL); + + reset_control_bulk_assert(VSDC_RESET_COUNT, dc->rsts); +} + +static void vs_dc_shutdown(struct platform_device *pdev) +{ + struct vs_dc *dc = dev_get_drvdata(&pdev->dev); + + vs_drm_shutdown_handler(dc); +} + +struct platform_driver vs_dc_platform_driver = { + .probe = vs_dc_probe, + .remove = vs_dc_remove, + .shutdown = vs_dc_shutdown, + .driver = { + .name = "verisilicon-dc", + .of_match_table = vs_dc_driver_dt_match, + }, +}; + +module_platform_driver(vs_dc_platform_driver); + +MODULE_AUTHOR("Icenowy Zheng "); +MODULE_DESCRIPTION("Verisilicon display controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/verisilicon/vs_dc.h b/drivers/gpu/drm/verisilicon/vs_dc.h new file mode 100644 index 000000000000..ed1016f18758 --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_dc.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Icenowy Zheng + * + * Based on vs_dc_hw.h, which is: + * Copyright (C) 2023 VeriSilicon Holdings Co., Ltd. + */ + +#ifndef _VS_DC_H_ +#define _VS_DC_H_ + +#include +#include +#include + +#include + +#include "vs_hwdb.h" + +#define VSDC_MAX_OUTPUTS 2 +#define VSDC_RESET_COUNT 3 + +struct vs_drm_dev; +struct vs_crtc; + +struct vs_dc { + struct regmap *regs; + struct clk *core_clk; + struct clk *axi_clk; + struct clk *ahb_clk; + struct clk *pix_clk[VSDC_MAX_OUTPUTS]; + struct reset_control_bulk_data rsts[VSDC_RESET_COUNT]; + + struct vs_drm_dev *drm_dev; + struct vs_chip_identity identity; +}; + +#endif /* _VS_DC_H_ */ diff --git a/drivers/gpu/drm/verisilicon/vs_dc_top_regs.h b/drivers/gpu/drm/verisilicon/vs_dc_top_regs.h new file mode 100644 index 000000000000..50509bbbff08 --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_dc_top_regs.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Icenowy Zheng + * + * Based on vs_dc_hw.h, which is: + * Copyright (C) 2023 VeriSilicon Holdings Co., Ltd. + */ + +#ifndef _VS_DC_TOP_H_ +#define _VS_DC_TOP_H_ + +#include + +#define VSDC_TOP_RST 0x0000 + +#define VSDC_TOP_IRQ_ACK 0x0010 +#define VSDC_TOP_IRQ_VSYNC(n) BIT(n) + +#define VSDC_TOP_IRQ_EN 0x0014 + +#define VSDC_TOP_CHIP_MODEL 0x0020 + +#define VSDC_TOP_CHIP_REV 0x0024 + +#define VSDC_TOP_CHIP_CUSTOMER_ID 0x0030 + +#endif /* _VS_DC_TOP_H_ */ diff --git a/drivers/gpu/drm/verisilicon/vs_drm.c b/drivers/gpu/drm/verisilicon/vs_drm.c new file mode 100644 index 000000000000..fd259d53f49f --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_drm.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Icenowy Zheng + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vs_bridge.h" +#include "vs_crtc.h" +#include "vs_dc.h" +#include "vs_dc_top_regs.h" +#include "vs_drm.h" + +#define DRIVER_NAME "verisilicon" +#define DRIVER_DESC "Verisilicon DC-series display controller driver" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 + +static int vs_gem_dumb_create(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args) +{ + int ret; + + /* The hardware wants 128B-aligned pitches for linear buffers. */ + ret = drm_mode_size_dumb(drm, args, 128, 0); + if (ret) + return ret; + + return drm_gem_dma_dumb_create_internal(file_priv, drm, args); +} + +DEFINE_DRM_GEM_FOPS(vs_drm_driver_fops); + +static const struct drm_driver vs_drm_driver = { + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, + .fops = &vs_drm_driver_fops, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + + /* GEM Operations */ + DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vs_gem_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, +}; + +static const struct drm_mode_config_funcs vs_mode_config_funcs = { + .fb_create = drm_gem_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static struct drm_mode_config_helper_funcs vs_mode_config_helper_funcs = { + .atomic_commit_tail = drm_atomic_helper_commit_tail, +}; + +static void vs_mode_config_init(struct drm_device *drm) +{ + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; + drm->mode_config.max_width = 8192; + drm->mode_config.max_height = 8192; + drm->mode_config.funcs = &vs_mode_config_funcs; + drm->mode_config.helper_private = &vs_mode_config_helper_funcs; +} + +int vs_drm_initialize(struct vs_dc *dc, struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct vs_drm_dev *vdrm; + struct drm_device *drm; + struct vs_crtc *crtc; + struct vs_bridge *bridge; + unsigned int i; + int ret; + + vdrm = devm_drm_dev_alloc(dev, &vs_drm_driver, struct vs_drm_dev, base); + if (IS_ERR(vdrm)) + return PTR_ERR(vdrm); + + drm = &vdrm->base; + vdrm->dc = dc; + dc->drm_dev = vdrm; + + ret = drmm_mode_config_init(drm); + if (ret) + return ret; + + /* Remove early framebuffers (ie. simple-framebuffer) */ + ret = aperture_remove_all_conflicting_devices(DRIVER_NAME); + if (ret) + return ret; + + for (i = 0; i < dc->identity.display_count; i++) { + crtc = vs_crtc_init(drm, dc, i); + if (IS_ERR(crtc)) + return PTR_ERR(crtc); + + bridge = vs_bridge_init(drm, crtc); + if (IS_ERR(bridge)) + return PTR_ERR(bridge); + + vdrm->crtcs[i] = crtc; + } + + ret = drm_vblank_init(drm, dc->identity.display_count); + if (ret) + return ret; + + vs_mode_config_init(drm); + + /* Enable connectors polling */ + drm_kms_helper_poll_init(drm); + + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + goto err_fini_poll; + + drm_client_setup(drm, NULL); + + return 0; + +err_fini_poll: + drm_kms_helper_poll_fini(drm); + return ret; +} + +void vs_drm_finalize(struct vs_dc *dc) +{ + struct vs_drm_dev *vdrm = dc->drm_dev; + struct drm_device *drm = &vdrm->base; + + drm_dev_unregister(drm); + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); + dc->drm_dev = NULL; +} + +void vs_drm_shutdown_handler(struct vs_dc *dc) +{ + struct vs_drm_dev *vdrm = dc->drm_dev; + + drm_atomic_helper_shutdown(&vdrm->base); +} + +void vs_drm_handle_irq(struct vs_dc *dc, u32 irqs) +{ + unsigned int i; + + for (i = 0; i < dc->identity.display_count; i++) { + if (irqs & VSDC_TOP_IRQ_VSYNC(i)) { + irqs &= ~VSDC_TOP_IRQ_VSYNC(i); + if (dc->drm_dev->crtcs[i]) + drm_crtc_handle_vblank(&dc->drm_dev->crtcs[i]->base); + } + } + + if (irqs) + drm_warn_once(&dc->drm_dev->base, + "Unknown Verisilicon DC interrupt 0x%x fired!\n", + irqs); +} diff --git a/drivers/gpu/drm/verisilicon/vs_drm.h b/drivers/gpu/drm/verisilicon/vs_drm.h new file mode 100644 index 000000000000..606338206a42 --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_drm.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Icenowy Zheng + */ + +#ifndef _VS_DRM_H_ +#define _VS_DRM_H_ + +#include +#include + +#include + +struct vs_dc; + +struct vs_drm_dev { + struct drm_device base; + + struct vs_dc *dc; + struct vs_crtc *crtcs[VSDC_MAX_OUTPUTS]; +}; + +int vs_drm_initialize(struct vs_dc *dc, struct platform_device *pdev); +void vs_drm_finalize(struct vs_dc *dc); +void vs_drm_shutdown_handler(struct vs_dc *dc); +void vs_drm_handle_irq(struct vs_dc *dc, u32 irqs); + +#endif /* _VS_DRM_H_ */ diff --git a/drivers/gpu/drm/verisilicon/vs_hwdb.c b/drivers/gpu/drm/verisilicon/vs_hwdb.c new file mode 100644 index 000000000000..09336af0900a --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_hwdb.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Icenowy Zheng + */ + +#include + +#include + +#include "vs_dc_top_regs.h" +#include "vs_hwdb.h" + +static const u32 vs_formats_array_no_yuv444[] = { + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + /* TODO: non-RGB formats */ +}; + +static const u32 vs_formats_array_with_yuv444[] = { + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + /* TODO: non-RGB formats */ +}; + +static const struct vs_formats vs_formats_no_yuv444 = { + .array = vs_formats_array_no_yuv444, + .num = ARRAY_SIZE(vs_formats_array_no_yuv444) +}; + +static const struct vs_formats vs_formats_with_yuv444 = { + .array = vs_formats_array_with_yuv444, + .num = ARRAY_SIZE(vs_formats_array_with_yuv444) +}; + +static struct vs_chip_identity vs_chip_identities[] = { + { + .model = 0x8200, + .revision = 0x5720, + .customer_id = ~0U, + + .display_count = 2, + .formats = &vs_formats_no_yuv444, + }, + { + .model = 0x8200, + .revision = 0x5721, + .customer_id = 0x30B, + + .display_count = 2, + .formats = &vs_formats_no_yuv444, + }, + { + .model = 0x8200, + .revision = 0x5720, + .customer_id = 0x310, + + .display_count = 2, + .formats = &vs_formats_with_yuv444, + }, + { + .model = 0x8200, + .revision = 0x5720, + .customer_id = 0x311, + + .display_count = 2, + .formats = &vs_formats_no_yuv444, + }, +}; + +int vs_fill_chip_identity(struct regmap *regs, + struct vs_chip_identity *ident) +{ + u32 model; + u32 revision; + u32 customer_id; + int i; + + regmap_read(regs, VSDC_TOP_CHIP_MODEL, &model); + regmap_read(regs, VSDC_TOP_CHIP_REV, &revision); + regmap_read(regs, VSDC_TOP_CHIP_CUSTOMER_ID, &customer_id); + + for (i = 0; i < ARRAY_SIZE(vs_chip_identities); i++) { + if (vs_chip_identities[i].model == model && + vs_chip_identities[i].revision == revision && + (vs_chip_identities[i].customer_id == customer_id || + vs_chip_identities[i].customer_id == ~0U)) { + memcpy(ident, &vs_chip_identities[i], sizeof(*ident)); + ident->customer_id = customer_id; + return 0; + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/verisilicon/vs_hwdb.h b/drivers/gpu/drm/verisilicon/vs_hwdb.h new file mode 100644 index 000000000000..92192e4fa086 --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_hwdb.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Icenowy Zheng + */ + +#ifndef _VS_HWDB_H_ +#define _VS_HWDB_H_ + +#include +#include + +struct vs_formats { + const u32 *array; + unsigned int num; +}; + +struct vs_chip_identity { + u32 model; + u32 revision; + u32 customer_id; + + u32 display_count; + const struct vs_formats *formats; +}; + +int vs_fill_chip_identity(struct regmap *regs, + struct vs_chip_identity *ident); + +#endif /* _VS_HWDB_H_ */ diff --git a/drivers/gpu/drm/verisilicon/vs_plane.c b/drivers/gpu/drm/verisilicon/vs_plane.c new file mode 100644 index 000000000000..2f3953e588a3 --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_plane.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Icenowy Zheng + */ + +#include +#include + +#include +#include +#include + +#include "vs_plane.h" + +void drm_format_to_vs_format(u32 drm_format, struct vs_format *vs_format) +{ + switch (drm_format) { + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_BGRX4444: + vs_format->color = VSDC_COLOR_FORMAT_X4R4G4B4; + break; + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_BGRA4444: + vs_format->color = VSDC_COLOR_FORMAT_A4R4G4B4; + break; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_BGRX5551: + vs_format->color = VSDC_COLOR_FORMAT_X1R5G5B5; + break; + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_BGRA5551: + vs_format->color = VSDC_COLOR_FORMAT_A1R5G5B5; + break; + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + vs_format->color = VSDC_COLOR_FORMAT_R5G6B5; + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_BGRX8888: + vs_format->color = VSDC_COLOR_FORMAT_X8R8G8B8; + break; + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_BGRA8888: + vs_format->color = VSDC_COLOR_FORMAT_A8R8G8B8; + break; + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_BGRA1010102: + vs_format->color = VSDC_COLOR_FORMAT_A2R10G10B10; + break; + default: + pr_warn("Unexpected drm format!\n"); + } + + switch (drm_format) { + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_RGBA1010102: + vs_format->swizzle = VSDC_SWIZZLE_RGBA; + break; + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_ABGR2101010: + vs_format->swizzle = VSDC_SWIZZLE_ABGR; + break; + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_BGRA1010102: + vs_format->swizzle = VSDC_SWIZZLE_BGRA; + break; + default: + /* N/A for YUV formats */ + vs_format->swizzle = VSDC_SWIZZLE_ARGB; + } + + /* N/A for non-YUV formats */ + vs_format->uv_swizzle = false; +} + +dma_addr_t vs_fb_get_dma_addr(struct drm_framebuffer *fb, + const struct drm_rect *src_rect) +{ + struct drm_gem_dma_object *gem; + dma_addr_t dma_addr; + + /* Get the physical address of the buffer in memory */ + gem = drm_fb_dma_get_gem_obj(fb, 0); + + /* Compute the start of the displayed memory */ + dma_addr = gem->dma_addr + fb->offsets[0]; + + /* Fixup framebuffer address for src coordinates */ + dma_addr += drm_format_info_min_pitch(fb->format, 0, + src_rect->x1 >> 16); + dma_addr += (src_rect->y1 >> 16) * fb->pitches[0]; + + return dma_addr; +} diff --git a/drivers/gpu/drm/verisilicon/vs_plane.h b/drivers/gpu/drm/verisilicon/vs_plane.h new file mode 100644 index 000000000000..41875ea3d66a --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_plane.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Icenowy Zheng + * + * Based on vs_dc_hw.h, which is: + * Copyright (C) 2023 VeriSilicon Holdings Co., Ltd. + */ + +#ifndef _VS_PLANE_H_ +#define _VS_PLANE_H_ + +#include + +#include +#include +#include +#include + +#define VSDC_MAKE_PLANE_SIZE(w, h) (((w) & 0x7fff) | (((h) & 0x7fff) << 15)) +#define VSDC_MAKE_PLANE_POS(x, y) (((x) & 0x7fff) | (((y) & 0x7fff) << 15)) + +struct vs_dc; + +enum vs_color_format { + VSDC_COLOR_FORMAT_X4R4G4B4, + VSDC_COLOR_FORMAT_A4R4G4B4, + VSDC_COLOR_FORMAT_X1R5G5B5, + VSDC_COLOR_FORMAT_A1R5G5B5, + VSDC_COLOR_FORMAT_R5G6B5, + VSDC_COLOR_FORMAT_X8R8G8B8, + VSDC_COLOR_FORMAT_A8R8G8B8, + VSDC_COLOR_FORMAT_YUY2, + VSDC_COLOR_FORMAT_UYVY, + VSDC_COLOR_FORMAT_INDEX8, + VSDC_COLOR_FORMAT_MONOCHROME, + VSDC_COLOR_FORMAT_YV12 = 0xf, + VSDC_COLOR_FORMAT_A8, + VSDC_COLOR_FORMAT_NV12, + VSDC_COLOR_FORMAT_NV16, + VSDC_COLOR_FORMAT_RG16, + VSDC_COLOR_FORMAT_R8, + VSDC_COLOR_FORMAT_NV12_10BIT, + VSDC_COLOR_FORMAT_A2R10G10B10, + VSDC_COLOR_FORMAT_NV16_10BIT, + VSDC_COLOR_FORMAT_INDEX1, + VSDC_COLOR_FORMAT_INDEX2, + VSDC_COLOR_FORMAT_INDEX4, + VSDC_COLOR_FORMAT_P010, + VSDC_COLOR_FORMAT_YUV444, + VSDC_COLOR_FORMAT_YUV444_10BIT +}; + +enum vs_swizzle { + VSDC_SWIZZLE_ARGB, + VSDC_SWIZZLE_RGBA, + VSDC_SWIZZLE_ABGR, + VSDC_SWIZZLE_BGRA, +}; + +struct vs_format { + enum vs_color_format color; + enum vs_swizzle swizzle; + bool uv_swizzle; +}; + +void drm_format_to_vs_format(u32 drm_format, struct vs_format *vs_format); +dma_addr_t vs_fb_get_dma_addr(struct drm_framebuffer *fb, + const struct drm_rect *src_rect); + +struct drm_plane *vs_primary_plane_init(struct drm_device *dev, struct vs_dc *dc); + +#endif /* _VS_PLANE_H_ */ diff --git a/drivers/gpu/drm/verisilicon/vs_primary_plane.c b/drivers/gpu/drm/verisilicon/vs_primary_plane.c new file mode 100644 index 000000000000..e8fcb5958615 --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_primary_plane.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Icenowy Zheng + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vs_crtc.h" +#include "vs_plane.h" +#include "vs_dc.h" +#include "vs_primary_plane_regs.h" + +static int vs_primary_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_crtc *crtc = new_plane_state->crtc; + struct drm_crtc_state *crtc_state; + + if (!crtc) + return 0; + + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + return drm_atomic_helper_check_plane_state(new_plane_state, + crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, true); +} + +static void vs_primary_plane_commit(struct vs_dc *dc, unsigned int output) +{ + regmap_set_bits(dc->regs, VSDC_FB_CONFIG_EX(output), + VSDC_FB_CONFIG_EX_COMMIT); +} + +static void vs_primary_plane_atomic_enable(struct drm_plane *plane, + struct drm_atomic_state *atomic_state) +{ + struct drm_plane_state *state = drm_atomic_get_new_plane_state(atomic_state, + plane); + struct drm_crtc *crtc = state->crtc; + struct vs_crtc *vcrtc = drm_crtc_to_vs_crtc(crtc); + unsigned int output = vcrtc->id; + struct vs_dc *dc = vcrtc->dc; + + regmap_set_bits(dc->regs, VSDC_FB_CONFIG_EX(output), + VSDC_FB_CONFIG_EX_FB_EN); + regmap_update_bits(dc->regs, VSDC_FB_CONFIG_EX(output), + VSDC_FB_CONFIG_EX_DISPLAY_ID_MASK, + VSDC_FB_CONFIG_EX_DISPLAY_ID(output)); + + vs_primary_plane_commit(dc, output); +} + +static void vs_primary_plane_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *atomic_state) +{ + struct drm_plane_state *state = drm_atomic_get_old_plane_state(atomic_state, + plane); + struct drm_crtc *crtc = state->crtc; + struct vs_crtc *vcrtc = drm_crtc_to_vs_crtc(crtc); + unsigned int output = vcrtc->id; + struct vs_dc *dc = vcrtc->dc; + + regmap_set_bits(dc->regs, VSDC_FB_CONFIG_EX(output), + VSDC_FB_CONFIG_EX_FB_EN); + + vs_primary_plane_commit(dc, output); +} + +static void vs_primary_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *atomic_state) +{ + struct drm_plane_state *state = drm_atomic_get_new_plane_state(atomic_state, + plane); + struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = state->crtc; + struct vs_dc *dc; + struct vs_crtc *vcrtc; + struct vs_format fmt; + unsigned int output; + dma_addr_t dma_addr; + + if (!state->visible) { + vs_primary_plane_atomic_disable(plane, atomic_state); + return; + } + + vcrtc = drm_crtc_to_vs_crtc(crtc); + output = vcrtc->id; + dc = vcrtc->dc; + + drm_format_to_vs_format(state->fb->format->format, &fmt); + + regmap_update_bits(dc->regs, VSDC_FB_CONFIG(output), + VSDC_FB_CONFIG_FMT_MASK, + VSDC_FB_CONFIG_FMT(fmt.color)); + regmap_update_bits(dc->regs, VSDC_FB_CONFIG(output), + VSDC_FB_CONFIG_SWIZZLE_MASK, + VSDC_FB_CONFIG_SWIZZLE(fmt.swizzle)); + regmap_assign_bits(dc->regs, VSDC_FB_CONFIG(output), + VSDC_FB_CONFIG_UV_SWIZZLE_EN, fmt.uv_swizzle); + + dma_addr = vs_fb_get_dma_addr(fb, &state->src); + + regmap_write(dc->regs, VSDC_FB_ADDRESS(output), + lower_32_bits(dma_addr)); + regmap_write(dc->regs, VSDC_FB_STRIDE(output), + fb->pitches[0]); + + regmap_write(dc->regs, VSDC_FB_TOP_LEFT(output), + VSDC_MAKE_PLANE_POS(state->crtc_x, state->crtc_y)); + regmap_write(dc->regs, VSDC_FB_BOTTOM_RIGHT(output), + VSDC_MAKE_PLANE_POS(state->crtc_x + state->crtc_w, + state->crtc_y + state->crtc_h)); + regmap_write(dc->regs, VSDC_FB_SIZE(output), + VSDC_MAKE_PLANE_SIZE(state->crtc_w, state->crtc_h)); + + regmap_write(dc->regs, VSDC_FB_BLEND_CONFIG(output), + VSDC_FB_BLEND_CONFIG_BLEND_DISABLE); + + vs_primary_plane_commit(dc, output); +} + +static const struct drm_plane_helper_funcs vs_primary_plane_helper_funcs = { + .atomic_check = vs_primary_plane_atomic_check, + .atomic_update = vs_primary_plane_atomic_update, + .atomic_enable = vs_primary_plane_atomic_enable, + .atomic_disable = vs_primary_plane_atomic_disable, +}; + +static const struct drm_plane_funcs vs_primary_plane_funcs = { + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .disable_plane = drm_atomic_helper_disable_plane, + .reset = drm_atomic_helper_plane_reset, + .update_plane = drm_atomic_helper_update_plane, +}; + +struct drm_plane *vs_primary_plane_init(struct drm_device *drm_dev, struct vs_dc *dc) +{ + struct drm_plane *plane; + + plane = drmm_universal_plane_alloc(drm_dev, struct drm_plane, dev, 0, + &vs_primary_plane_funcs, + dc->identity.formats->array, + dc->identity.formats->num, + NULL, + DRM_PLANE_TYPE_PRIMARY, + NULL); + + if (IS_ERR(plane)) + return plane; + + drm_plane_helper_add(plane, &vs_primary_plane_helper_funcs); + + return plane; +} diff --git a/drivers/gpu/drm/verisilicon/vs_primary_plane_regs.h b/drivers/gpu/drm/verisilicon/vs_primary_plane_regs.h new file mode 100644 index 000000000000..cbb125c46b39 --- /dev/null +++ b/drivers/gpu/drm/verisilicon/vs_primary_plane_regs.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Icenowy Zheng + * + * Based on vs_dc_hw.h, which is: + * Copyright (C) 2023 VeriSilicon Holdings Co., Ltd. + */ + +#ifndef _VS_PRIMARY_PLANE_REGS_H_ +#define _VS_PRIMARY_PLANE_REGS_H_ + +#include + +#define VSDC_FB_ADDRESS(n) (0x1400 + 0x4 * (n)) + +#define VSDC_FB_STRIDE(n) (0x1408 + 0x4 * (n)) + +#define VSDC_FB_CONFIG(n) (0x1518 + 0x4 * (n)) +#define VSDC_FB_CONFIG_CLEAR_EN BIT(8) +#define VSDC_FB_CONFIG_ROT_MASK GENMASK(13, 11) +#define VSDC_FB_CONFIG_ROT(v) ((v) << 11) +#define VSDC_FB_CONFIG_YUV_SPACE_MASK GENMASK(16, 14) +#define VSDC_FB_CONFIG_YUV_SPACE(v) ((v) << 14) +#define VSDC_FB_CONFIG_TILE_MODE_MASK GENMASK(21, 17) +#define VSDC_FB_CONFIG_TILE_MODE(v) ((v) << 14) +#define VSDC_FB_CONFIG_SCALE_EN BIT(22) +#define VSDC_FB_CONFIG_SWIZZLE_MASK GENMASK(24, 23) +#define VSDC_FB_CONFIG_SWIZZLE(v) ((v) << 23) +#define VSDC_FB_CONFIG_UV_SWIZZLE_EN BIT(25) +#define VSDC_FB_CONFIG_FMT_MASK GENMASK(31, 26) +#define VSDC_FB_CONFIG_FMT(v) ((v) << 26) + +#define VSDC_FB_SIZE(n) (0x1810 + 0x4 * (n)) +/* Fill with value generated with VSDC_MAKE_PLANE_SIZE(w, h) */ + +#define VSDC_FB_CONFIG_EX(n) (0x1CC0 + 0x4 * (n)) +#define VSDC_FB_CONFIG_EX_COMMIT BIT(12) +#define VSDC_FB_CONFIG_EX_FB_EN BIT(13) +#define VSDC_FB_CONFIG_EX_ZPOS_MASK GENMASK(18, 16) +#define VSDC_FB_CONFIG_EX_ZPOS(v) ((v) << 16) +#define VSDC_FB_CONFIG_EX_DISPLAY_ID_MASK GENMASK(19, 19) +#define VSDC_FB_CONFIG_EX_DISPLAY_ID(v) ((v) << 19) + +#define VSDC_FB_TOP_LEFT(n) (0x24D8 + 0x4 * (n)) +/* Fill with value generated with VSDC_MAKE_PLANE_POS(x, y) */ + +#define VSDC_FB_BOTTOM_RIGHT(n) (0x24E0 + 0x4 * (n)) +/* Fill with value generated with VSDC_MAKE_PLANE_POS(x, y) */ + +#define VSDC_FB_BLEND_CONFIG(n) (0x2510 + 0x4 * (n)) +#define VSDC_FB_BLEND_CONFIG_BLEND_DISABLE BIT(1) + +#endif /* _VS_PRIMARY_PLANE_REGS_H_ */ From 3d60ff99a78ccd3b72765542dd083b134d6ae4bb Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Thu, 29 Jan 2026 10:39:18 +0800 Subject: [PATCH 037/158] dt-bindings: display/bridge: add binding for TH1520 HDMI controller T-Head TH1520 SoC contains a Synopsys DesignWare HDMI controller paired with DesignWare HDMI PHY, with an extra clock gate for HDMI pixel clock and two reset controls. Add a device tree binding to it. Signed-off-by: Icenowy Zheng Signed-off-by: Icenowy Zheng Reviewed-by: Krzysztof Kozlowski Signed-off-by: Thomas Zimmermann Link: https://patch.msgid.link/20260129023922.1527729-5-zhengxingda@iscas.ac.cn --- .../display/bridge/thead,th1520-dw-hdmi.yaml | 120 ++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 Documentation/devicetree/bindings/display/bridge/thead,th1520-dw-hdmi.yaml diff --git a/Documentation/devicetree/bindings/display/bridge/thead,th1520-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/bridge/thead,th1520-dw-hdmi.yaml new file mode 100644 index 000000000000..68fff885ce15 --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/thead,th1520-dw-hdmi.yaml @@ -0,0 +1,120 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/bridge/thead,th1520-dw-hdmi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: T-Head TH1520 DesignWare HDMI TX Encoder + +maintainers: + - Icenowy Zheng + +description: + The HDMI transmitter is a Synopsys DesignWare HDMI TX controller + paired with a DesignWare HDMI Gen2 TX PHY. + +allOf: + - $ref: /schemas/display/bridge/synopsys,dw-hdmi.yaml# + +properties: + compatible: + enum: + - thead,th1520-dw-hdmi + + reg-io-width: + const: 4 + + clocks: + maxItems: 4 + + clock-names: + items: + - const: iahb + - const: isfr + - const: cec + - const: pix + + resets: + items: + - description: Main reset + - description: Configuration APB reset + + reset-names: + items: + - const: main + - const: apb + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/properties/port + description: Input port connected to DC8200 DPU "DP" output + + port@1: + $ref: /schemas/graph.yaml#/properties/port + description: HDMI output port + + required: + - port@0 + - port@1 + +required: + - compatible + - reg + - reg-io-width + - clocks + - clock-names + - resets + - reset-names + - interrupts + - ports + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + + soc { + #address-cells = <2>; + #size-cells = <2>; + + hdmi@ffef540000 { + compatible = "thead,th1520-dw-hdmi"; + reg = <0xff 0xef540000 0x0 0x40000>; + reg-io-width = <4>; + interrupts = <111 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk_vo CLK_HDMI_PCLK>, + <&clk_vo CLK_HDMI_SFR>, + <&clk_vo CLK_HDMI_CEC>, + <&clk_vo CLK_HDMI_PIXCLK>; + clock-names = "iahb", "isfr", "cec", "pix"; + resets = <&rst_vo TH1520_RESET_ID_HDMI>, + <&rst_vo TH1520_RESET_ID_HDMI_APB>; + reset-names = "main", "apb"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + + hdmi_in: endpoint { + remote-endpoint = <&dpu_out_dp1>; + }; + }; + + port@1 { + reg = <1>; + + hdmi_out_conn: endpoint { + remote-endpoint = <&hdmi_conn_in>; + }; + }; + }; + }; + }; From 96f30ee0fb9db1663eb8fd55c12e4c67da8c4a90 Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Thu, 29 Jan 2026 10:39:19 +0800 Subject: [PATCH 038/158] drm/bridge: add a driver for T-Head TH1520 HDMI controller T-Head TH1520 SoC contains a Synopsys DesignWare HDMI controller (paired with DesignWare HDMI TX PHY Gen2) that takes the "DP" output from the display controller. Add a driver for this controller utilizing the common DesignWare HDMI code in the kernel. Signed-off-by: Icenowy Zheng Signed-off-by: Icenowy Zheng Tested-by: Han Gao Tested-by: Michal Wilczynski Acked-by: Thomas Zimmermann Signed-off-by: Thomas Zimmermann Link: https://patch.msgid.link/20260129023922.1527729-6-zhengxingda@iscas.ac.cn --- MAINTAINERS | 1 + drivers/gpu/drm/bridge/Kconfig | 10 ++ drivers/gpu/drm/bridge/Makefile | 1 + drivers/gpu/drm/bridge/th1520-dw-hdmi.c | 173 ++++++++++++++++++++++++ 4 files changed, 185 insertions(+) create mode 100644 drivers/gpu/drm/bridge/th1520-dw-hdmi.c diff --git a/MAINTAINERS b/MAINTAINERS index bd04bf63e8b0..dca1337b55df 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22593,6 +22593,7 @@ F: Documentation/devicetree/bindings/reset/thead,th1520-reset.yaml F: arch/riscv/boot/dts/thead/ F: drivers/clk/thead/clk-th1520-ap.c F: drivers/firmware/thead,th1520-aon.c +F: drivers/gpu/drm/bridge/th1520-dw-hdmi.c F: drivers/mailbox/mailbox-th1520.c F: drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c F: drivers/pinctrl/pinctrl-th1520.c diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 39385deafc68..1cabfa1d2b2e 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -342,6 +342,16 @@ config DRM_THINE_THC63LVD1024 help Thine THC63LVD1024 LVDS/parallel converter driver. +config DRM_THEAD_TH1520_DW_HDMI + tristate "T-Head TH1520 DesignWare HDMI bridge" + depends on OF + depends on COMMON_CLK + depends on ARCH_THEAD || COMPILE_TEST + select DRM_DW_HDMI + help + Choose this to enable support for the internal HDMI bridge found + on the T-Head TH1520 SoC. + config DRM_TOSHIBA_TC358762 tristate "TC358762 DSI/DPI bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 909c21cc3acd..fb0cf0bf8875 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_DRM_SII902X) += sii902x.o obj-$(CONFIG_DRM_SII9234) += sii9234.o obj-$(CONFIG_DRM_SIMPLE_BRIDGE) += simple-bridge.o obj-$(CONFIG_DRM_SOLOMON_SSD2825) += ssd2825.o +obj-$(CONFIG_DRM_THEAD_TH1520_DW_HDMI) += th1520-dw-hdmi.o obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o obj-$(CONFIG_DRM_TOSHIBA_TC358762) += tc358762.o obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o diff --git a/drivers/gpu/drm/bridge/th1520-dw-hdmi.c b/drivers/gpu/drm/bridge/th1520-dw-hdmi.c new file mode 100644 index 000000000000..389eead5f1c4 --- /dev/null +++ b/drivers/gpu/drm/bridge/th1520-dw-hdmi.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2025 Icenowy Zheng + * + * Based on rcar_dw_hdmi.c, which is: + * Copyright (C) 2016 Renesas Electronics Corporation + * Based on imx8mp-hdmi-tx.c, which is: + * Copyright (C) 2022 Pengutronix, Lucas Stach + */ + +#include +#include +#include +#include +#include + +#include +#include + +#define TH1520_HDMI_PHY_OPMODE_PLLCFG 0x06 /* Mode of operation and PLL dividers */ +#define TH1520_HDMI_PHY_CKSYMTXCTRL 0x09 /* Clock Symbol and Transmitter Control Register */ +#define TH1520_HDMI_PHY_VLEVCTRL 0x0e /* Voltage Level Control Register */ +#define TH1520_HDMI_PHY_PLLCURRGMPCTRL 0x10 /* PLL current and Gmp (conductance) */ +#define TH1520_HDMI_PHY_PLLDIVCTRL 0x11 /* PLL dividers */ +#define TH1520_HDMI_PHY_TXTERM 0x19 /* Transmission Termination Register */ + +struct th1520_hdmi_phy_params { + unsigned long mpixelclock; + u16 opmode_pllcfg; + u16 pllcurrgmpctrl; + u16 plldivctrl; + u16 cksymtxctrl; + u16 vlevctrl; + u16 txterm; +}; + +static const struct th1520_hdmi_phy_params th1520_hdmi_phy_params[] = { + { 35500000, 0x0003, 0x0283, 0x0628, 0x8088, 0x01a0, 0x0007 }, + { 44900000, 0x0003, 0x0285, 0x0228, 0x8088, 0x01a0, 0x0007 }, + { 71000000, 0x0002, 0x1183, 0x0614, 0x8088, 0x01a0, 0x0007 }, + { 90000000, 0x0002, 0x1142, 0x0214, 0x8088, 0x01a0, 0x0007 }, + { 121750000, 0x0001, 0x20c0, 0x060a, 0x8088, 0x01a0, 0x0007 }, + { 165000000, 0x0001, 0x2080, 0x020a, 0x8088, 0x01a0, 0x0007 }, + { 198000000, 0x0000, 0x3040, 0x0605, 0x83c8, 0x0120, 0x0004 }, + { 297000000, 0x0000, 0x3041, 0x0205, 0x81dc, 0x0200, 0x0005 }, + { 371250000, 0x0640, 0x3041, 0x0205, 0x80f6, 0x0140, 0x0000 }, + { 495000000, 0x0640, 0x3080, 0x0005, 0x80f6, 0x0140, 0x0000 }, + { 594000000, 0x0640, 0x3080, 0x0005, 0x80fa, 0x01e0, 0x0004 }, +}; + +struct th1520_hdmi { + struct dw_hdmi_plat_data plat_data; + struct dw_hdmi *dw_hdmi; + struct clk *pixclk; + struct reset_control *mainrst, *prst; +}; + +static enum drm_mode_status +th1520_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + /* + * The maximum supported clock frequency is 594 MHz, as shown in the PHY + * parameters table. + */ + if (mode->clock > 594000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static void th1520_hdmi_phy_set_params(struct dw_hdmi *hdmi, + const struct th1520_hdmi_phy_params *params) +{ + dw_hdmi_phy_i2c_write(hdmi, params->opmode_pllcfg, + TH1520_HDMI_PHY_OPMODE_PLLCFG); + dw_hdmi_phy_i2c_write(hdmi, params->pllcurrgmpctrl, + TH1520_HDMI_PHY_PLLCURRGMPCTRL); + dw_hdmi_phy_i2c_write(hdmi, params->plldivctrl, + TH1520_HDMI_PHY_PLLDIVCTRL); + dw_hdmi_phy_i2c_write(hdmi, params->vlevctrl, + TH1520_HDMI_PHY_VLEVCTRL); + dw_hdmi_phy_i2c_write(hdmi, params->cksymtxctrl, + TH1520_HDMI_PHY_CKSYMTXCTRL); + dw_hdmi_phy_i2c_write(hdmi, params->txterm, + TH1520_HDMI_PHY_TXTERM); +} + +static int th1520_hdmi_phy_configure(struct dw_hdmi *hdmi, void *data, + unsigned long mpixelclock) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(th1520_hdmi_phy_params); i++) { + if (mpixelclock <= th1520_hdmi_phy_params[i].mpixelclock) { + th1520_hdmi_phy_set_params(hdmi, + &th1520_hdmi_phy_params[i]); + return 0; + } + } + + return -EINVAL; +} + +static int th1520_dw_hdmi_probe(struct platform_device *pdev) +{ + struct th1520_hdmi *hdmi; + struct dw_hdmi_plat_data *plat_data; + struct device *dev = &pdev->dev; + + hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return -ENOMEM; + + plat_data = &hdmi->plat_data; + + hdmi->pixclk = devm_clk_get_enabled(dev, "pix"); + if (IS_ERR(hdmi->pixclk)) + return dev_err_probe(dev, PTR_ERR(hdmi->pixclk), + "Unable to get pixel clock\n"); + + hdmi->mainrst = devm_reset_control_get_exclusive_deasserted(dev, "main"); + if (IS_ERR(hdmi->mainrst)) + return dev_err_probe(dev, PTR_ERR(hdmi->mainrst), + "Unable to get main reset\n"); + + hdmi->prst = devm_reset_control_get_exclusive_deasserted(dev, "apb"); + if (IS_ERR(hdmi->prst)) + return dev_err_probe(dev, PTR_ERR(hdmi->prst), + "Unable to get apb reset\n"); + + plat_data->output_port = 1; + plat_data->mode_valid = th1520_hdmi_mode_valid; + plat_data->configure_phy = th1520_hdmi_phy_configure; + plat_data->priv_data = hdmi; + + hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data); + if (IS_ERR(hdmi)) + return PTR_ERR(hdmi); + + platform_set_drvdata(pdev, hdmi); + + return 0; +} + +static void th1520_dw_hdmi_remove(struct platform_device *pdev) +{ + struct dw_hdmi *hdmi = platform_get_drvdata(pdev); + + dw_hdmi_remove(hdmi); +} + +static const struct of_device_id th1520_dw_hdmi_of_table[] = { + { .compatible = "thead,th1520-dw-hdmi" }, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, th1520_dw_hdmi_of_table); + +static struct platform_driver th1520_dw_hdmi_platform_driver = { + .probe = th1520_dw_hdmi_probe, + .remove = th1520_dw_hdmi_remove, + .driver = { + .name = "th1520-dw-hdmi", + .of_match_table = th1520_dw_hdmi_of_table, + }, +}; + +module_platform_driver(th1520_dw_hdmi_platform_driver); + +MODULE_AUTHOR("Icenowy Zheng "); +MODULE_DESCRIPTION("T-Head TH1520 HDMI Encoder Driver"); +MODULE_LICENSE("GPL"); From 2bcbc706dfa02ae50118173a6f6d8a12e735480c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 19 Dec 2025 11:41:54 +0100 Subject: [PATCH 039/158] dma-buf: add dma_fence_was_initialized function v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some driver use fence->ops to test if a fence was initialized or not. The problem is that this utilizes internal behavior of the dma_fence implementation. So better abstract that into a function. v2: use a flag instead of testing fence->ops, rename the function, move to the beginning of the patch set. Signed-off-by: Christian König Reviewed-by: Tvrtko Ursulin Link: https://lore.kernel.org/r/20260120105655.7134-2-christian.koenig@amd.com --- drivers/dma-buf/dma-fence.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 13 +++++++------ drivers/gpu/drm/qxl/qxl_release.c | 2 +- include/linux/dma-fence.h | 15 +++++++++++++++ 4 files changed, 24 insertions(+), 8 deletions(-) diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 21c5c30b4f34..c9a036b0d592 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -1054,7 +1054,7 @@ __dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, fence->lock = lock; fence->context = context; fence->seqno = seqno; - fence->flags = flags; + fence->flags = flags | BIT(DMA_FENCE_FLAG_INITIALIZED_BIT); fence->error = 0; trace_dma_fence_init(fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index aaf5477fcd7a..f05683d59f8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -282,9 +282,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) unsigned i; /* Check if any fences were initialized */ - if (job->base.s_fence && job->base.s_fence->finished.ops) + if (job->base.s_fence && + dma_fence_was_initialized(&job->base.s_fence->finished)) f = &job->base.s_fence->finished; - else if (job->hw_fence && job->hw_fence->base.ops) + else if (dma_fence_was_initialized(&job->hw_fence->base)) f = &job->hw_fence->base; else f = NULL; @@ -301,11 +302,11 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) amdgpu_sync_free(&job->explicit_sync); - if (job->hw_fence->base.ops) + if (dma_fence_was_initialized(&job->hw_fence->base)) dma_fence_put(&job->hw_fence->base); else kfree(job->hw_fence); - if (job->hw_vm_fence->base.ops) + if (dma_fence_was_initialized(&job->hw_vm_fence->base)) dma_fence_put(&job->hw_vm_fence->base); else kfree(job->hw_vm_fence); @@ -339,11 +340,11 @@ void amdgpu_job_free(struct amdgpu_job *job) if (job->gang_submit != &job->base.s_fence->scheduled) dma_fence_put(job->gang_submit); - if (job->hw_fence->base.ops) + if (dma_fence_was_initialized(&job->hw_fence->base)) dma_fence_put(&job->hw_fence->base); else kfree(job->hw_fence); - if (job->hw_vm_fence->base.ops) + if (dma_fence_was_initialized(&job->hw_vm_fence->base)) dma_fence_put(&job->hw_vm_fence->base); else kfree(job->hw_vm_fence); diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 7b3c9a6016db..06b0b2aa7953 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -146,7 +146,7 @@ qxl_release_free(struct qxl_device *qdev, idr_remove(&qdev->release_idr, release->id); spin_unlock(&qdev->release_idr_lock); - if (release->base.ops) { + if (dma_fence_was_initialized(&release->base)) { WARN_ON(list_empty(&release->bos)); qxl_release_free_list(release); diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index d4c92fd35092..9c4d25289239 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -48,6 +48,7 @@ struct seq_file; * atomic ops (bit_*), so taking the spinlock will not be needed most * of the time. * + * DMA_FENCE_FLAG_INITIALIZED_BIT - fence was initialized * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled * DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called @@ -98,6 +99,7 @@ struct dma_fence { }; enum dma_fence_flag_bits { + DMA_FENCE_FLAG_INITIALIZED_BIT, DMA_FENCE_FLAG_SEQNO64_BIT, DMA_FENCE_FLAG_SIGNALED_BIT, DMA_FENCE_FLAG_TIMESTAMP_BIT, @@ -263,6 +265,19 @@ void dma_fence_release(struct kref *kref); void dma_fence_free(struct dma_fence *fence); void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq); +/** + * dma_fence_was_initialized - test if fence was initialized + * @fence: fence to test + * + * Return: True if fence was ever initialized, false otherwise. Works correctly + * only when memory backing the fence structure is zero initialized on + * allocation. + */ +static inline bool dma_fence_was_initialized(struct dma_fence *fence) +{ + return fence && test_bit(DMA_FENCE_FLAG_INITIALIZED_BIT, &fence->flags); +} + /** * dma_fence_put - decreases refcount of the fence * @fence: fence to reduce refcount of From 779ec12c85c9e4547519e3903a371a3b26a289de Mon Sep 17 00:00:00 2001 From: Alexander Konyukhov Date: Tue, 3 Feb 2026 16:48:46 +0300 Subject: [PATCH 040/158] drm/komeda: fix integer overflow in AFBC framebuffer size check The AFBC framebuffer size validation calculates the minimum required buffer size by adding the AFBC payload size to the framebuffer offset. This addition is performed without checking for integer overflow. If the addition oveflows, the size check may incorrectly succed and allow userspace to provide an undersized drm_gem_object, potentially leading to out-of-bounds memory access. Add usage of check_add_overflow() to safely compute the minimum required size and reject the framebuffer if an overflow is detected. This makes the AFBC size validation more robust against malformed. Found by Linux Verification Center (linuxtesting.org) with SVACE. Fixes: 65ad2392dd6d ("drm/komeda: Added AFBC support for komeda driver") Signed-off-by: Alexander Konyukhov Acked-by: Liviu Dudau Signed-off-by: Liviu Dudau Link: https://lore.kernel.org/r/20260203134907.1587067-1-Alexander.Konyukhov@kaspersky.com --- drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c index 3ca461eb0a24..3cb34d03f7f8 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c @@ -4,6 +4,8 @@ * Author: James.Qian.Wang * */ +#include + #include #include #include @@ -93,7 +95,9 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file, kfb->afbc_size = kfb->offset_payload + n_blocks * ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8, AFBC_SUPERBLK_ALIGNMENT); - min_size = kfb->afbc_size + fb->offsets[0]; + if (check_add_overflow(kfb->afbc_size, fb->offsets[0], &min_size)) { + goto check_failed; + } if (min_size > obj->size) { DRM_DEBUG_KMS("afbc size check failed, obj_size: 0x%zx. min_size 0x%llx.\n", obj->size, min_size); From 4a9671a03f2be13acde0cb15c5208767a9cc56e4 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Fri, 6 Feb 2026 08:52:38 +1000 Subject: [PATCH 041/158] gpu: Move DRM buddy allocator one level up (part one) Move the DRM buddy allocator one level up so that it can be used by GPU drivers (example, nova-core) that have usecases other than DRM (such as VFIO vGPU support). Modify the API, structures and Kconfigs to use "gpu_buddy" terminology. Adapt the drivers and tests to use the new API. The commit cannot be split due to bisectability, however no functional change is intended. Verified by running K-UNIT tests and build tested various configurations. Signed-off-by: Joel Fernandes Reviewed-by: Dave Airlie [airlied: I've split this into two so git can find copies easier. I've also just nuked drm_random library, that stuff needs to be done elsewhere and only the buddy tests seem to be using it]. Signed-off-by: Dave Airlie --- Documentation/gpu/drm-mm.rst | 6 +++--- drivers/gpu/Makefile | 2 +- drivers/gpu/{drm/drm_buddy.c => buddy.c} | 2 +- drivers/gpu/drm/Kconfig | 4 ---- drivers/gpu/drm/Kconfig.debug | 1 - drivers/gpu/drm/Makefile | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h | 2 +- drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 2 +- drivers/gpu/drm/i915/i915_scatterlist.c | 2 +- drivers/gpu/drm/i915/i915_ttm_buddy_manager.c | 2 +- drivers/gpu/drm/tests/Makefile | 1 - drivers/gpu/drm/tests/drm_exec_test.c | 2 -- drivers/gpu/drm/tests/drm_mm_test.c | 2 -- drivers/gpu/drm/ttm/tests/ttm_mock_manager.h | 2 +- drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h | 2 +- drivers/gpu/tests/Makefile | 4 ++++ .../{drm/tests/drm_buddy_test.c => tests/gpu_buddy_test.c} | 4 ++-- drivers/gpu/{drm/lib/drm_random.c => tests/gpu_random.c} | 2 +- drivers/gpu/{drm/lib/drm_random.h => tests/gpu_random.h} | 0 include/{drm/drm_buddy.h => linux/gpu_buddy.h} | 0 20 files changed, 19 insertions(+), 26 deletions(-) rename drivers/gpu/{drm/drm_buddy.c => buddy.c} (99%) create mode 100644 drivers/gpu/tests/Makefile rename drivers/gpu/{drm/tests/drm_buddy_test.c => tests/gpu_buddy_test.c} (99%) rename drivers/gpu/{drm/lib/drm_random.c => tests/gpu_random.c} (97%) rename drivers/gpu/{drm/lib/drm_random.h => tests/gpu_random.h} (100%) rename include/{drm/drm_buddy.h => linux/gpu_buddy.h} (100%) diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index f22433470c76..ceee0e663237 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -526,10 +526,10 @@ DRM GPUVM Function References DRM Buddy Allocator =================== -DRM Buddy Function References ------------------------------ +Buddy Allocator Function References (GPU buddy) +----------------------------------------------- -.. kernel-doc:: drivers/gpu/drm/drm_buddy.c +.. kernel-doc:: drivers/gpu/buddy.c :export: DRM Cache Handling and Fast WC memcpy() diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index 36a54d456630..c5292ee2c852 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -2,7 +2,7 @@ # drm/tegra depends on host1x, so if both drivers are built-in care must be # taken to initialize them in the correct order. Link order is the only way # to ensure this currently. -obj-y += host1x/ drm/ vga/ +obj-y += host1x/ drm/ vga/ tests/ obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/ obj-$(CONFIG_TRACE_GPU_MEM) += trace/ obj-$(CONFIG_NOVA_CORE) += nova-core/ diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/buddy.c similarity index 99% rename from drivers/gpu/drm/drm_buddy.c rename to drivers/gpu/buddy.c index fd34d3755f7c..4cc63d961d26 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/buddy.c @@ -10,7 +10,7 @@ #include #include -#include +#include #include enum drm_buddy_free_tree { diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 5888eb147ed1..862ff4000969 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -269,10 +269,6 @@ config DRM_SCHED config DRM_PANEL_BACKLIGHT_QUIRKS tristate -config DRM_LIB_RANDOM - bool - default n - config DRM_PRIVACY_SCREEN bool default n diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug index 05dc43c0b8c5..3b7886865335 100644 --- a/drivers/gpu/drm/Kconfig.debug +++ b/drivers/gpu/drm/Kconfig.debug @@ -69,7 +69,6 @@ config DRM_KUNIT_TEST select DRM_EXPORT_FOR_TESTS if m select DRM_GEM_SHMEM_HELPER select DRM_KUNIT_TEST_HELPERS - select DRM_LIB_RANDOM select DRM_SYSFB_HELPER select PRIME_NUMBERS default KUNIT_ALL_TESTS diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 75840ec4d782..892859cfe95f 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -79,7 +79,6 @@ drm-$(CONFIG_DRM_CLIENT) += \ drm_client_event.o \ drm_client_modeset.o \ drm_client_sysrq.o -drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o drm-$(CONFIG_OF) += drm_of.o @@ -115,7 +114,7 @@ drm_gpusvm_helper-$(CONFIG_ZONE_DEVICE) += \ obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o -obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o +obj-$(CONFIG_DRM_BUDDY) += ../buddy.o drm_dma_helper-y := drm_gem_dma_helper.o drm_dma_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_dma.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h index 5f5fd9a911c2..874779618056 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h @@ -24,7 +24,7 @@ #ifndef __AMDGPU_VRAM_MGR_H__ #define __AMDGPU_VRAM_MGR_H__ -#include +#include struct amdgpu_vram_mgr { struct ttm_resource_manager manager; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index f65fe86c02b5..eeda5daa544f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -5,7 +5,7 @@ #include -#include +#include #include #include #include diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c index 4d830740946d..30246f02bcfe 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.c +++ b/drivers/gpu/drm/i915/i915_scatterlist.c @@ -7,7 +7,7 @@ #include "i915_scatterlist.h" #include "i915_ttm_buddy_manager.h" -#include +#include #include #include diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c index d5c6e6605086..6b256d95badd 100644 --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c @@ -5,7 +5,7 @@ #include -#include +#include #include #include #include diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile index 87d5d5f9332a..d2e2e3d8349a 100644 --- a/drivers/gpu/drm/tests/Makefile +++ b/drivers/gpu/drm/tests/Makefile @@ -7,7 +7,6 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \ drm_atomic_test.o \ drm_atomic_state_test.o \ drm_bridge_test.o \ - drm_buddy_test.o \ drm_cmdline_parser_test.o \ drm_connector_test.o \ drm_damage_helper_test.o \ diff --git a/drivers/gpu/drm/tests/drm_exec_test.c b/drivers/gpu/drm/tests/drm_exec_test.c index 3a20c788c51f..2fc47f3b463b 100644 --- a/drivers/gpu/drm/tests/drm_exec_test.c +++ b/drivers/gpu/drm/tests/drm_exec_test.c @@ -16,8 +16,6 @@ #include #include -#include "../lib/drm_random.h" - struct drm_exec_priv { struct device *dev; struct drm_device *drm; diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c index aec9eccdeae9..e24a619059d8 100644 --- a/drivers/gpu/drm/tests/drm_mm_test.c +++ b/drivers/gpu/drm/tests/drm_mm_test.c @@ -16,8 +16,6 @@ #include #include -#include "../lib/drm_random.h" - enum { BEST, BOTTOMUP, diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h index e4c95f86a467..96ea8c9aae34 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h +++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h @@ -5,7 +5,7 @@ #ifndef TTM_MOCK_MANAGER_H #define TTM_MOCK_MANAGER_H -#include +#include struct ttm_mock_manager { struct ttm_resource_manager man; diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h index a71e14818ec2..babeec5511d9 100644 --- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h +++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h @@ -6,7 +6,7 @@ #ifndef _XE_TTM_VRAM_MGR_TYPES_H_ #define _XE_TTM_VRAM_MGR_TYPES_H_ -#include +#include #include /** diff --git a/drivers/gpu/tests/Makefile b/drivers/gpu/tests/Makefile new file mode 100644 index 000000000000..8e7654e87d82 --- /dev/null +++ b/drivers/gpu/tests/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 + +gpu_buddy_tests-y = gpu_buddy_test.o gpu_random.o +obj-$(CONFIG_DRM_KUNIT_TEST) += gpu_buddy_tests.o diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/tests/gpu_buddy_test.c similarity index 99% rename from drivers/gpu/drm/tests/drm_buddy_test.c rename to drivers/gpu/tests/gpu_buddy_test.c index e6f8459c6c54..b905932da990 100644 --- a/drivers/gpu/drm/tests/drm_buddy_test.c +++ b/drivers/gpu/tests/gpu_buddy_test.c @@ -10,9 +10,9 @@ #include #include -#include +#include -#include "../lib/drm_random.h" +#include "gpu_random.h" static unsigned int random_seed; diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/tests/gpu_random.c similarity index 97% rename from drivers/gpu/drm/lib/drm_random.c rename to drivers/gpu/tests/gpu_random.c index 0e9dba1ef4af..ddd1f594b5d5 100644 --- a/drivers/gpu/drm/lib/drm_random.c +++ b/drivers/gpu/tests/gpu_random.c @@ -6,7 +6,7 @@ #include #include -#include "drm_random.h" +#include "gpu_random.h" u32 drm_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state) { diff --git a/drivers/gpu/drm/lib/drm_random.h b/drivers/gpu/tests/gpu_random.h similarity index 100% rename from drivers/gpu/drm/lib/drm_random.h rename to drivers/gpu/tests/gpu_random.h diff --git a/include/drm/drm_buddy.h b/include/linux/gpu_buddy.h similarity index 100% rename from include/drm/drm_buddy.h rename to include/linux/gpu_buddy.h From ba110db8e1bc206c13fd7d985e79b033f53bfdea Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Fri, 6 Feb 2026 08:52:38 +1000 Subject: [PATCH 042/158] gpu: Move DRM buddy allocator one level up (part two) Move the DRM buddy allocator one level up so that it can be used by GPU drivers (example, nova-core) that have usecases other than DRM (such as VFIO vGPU support). Modify the API, structures and Kconfigs to use "gpu_buddy" terminology. Adapt the drivers and tests to use the new API. The commit cannot be split due to bisectability, however no functional change is intended. Verified by running K-UNIT tests and build tested various configurations. Signed-off-by: Joel Fernandes Reviewed-by: Dave Airlie [airlied: I've split this into two so git can find copies easier. I've also just nuked drm_random library, that stuff needs to be done elsewhere and only the buddy tests seem to be using it]. Signed-off-by: Dave Airlie --- Documentation/gpu/drm-mm.rst | 6 + MAINTAINERS | 8 +- drivers/gpu/Kconfig | 13 + drivers/gpu/Makefile | 1 + drivers/gpu/buddy.c | 556 +++++++++--------- drivers/gpu/drm/Kconfig | 1 + drivers/gpu/drm/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 2 +- .../gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 79 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h | 18 +- drivers/gpu/drm/drm_buddy.c | 77 +++ drivers/gpu/drm/i915/i915_scatterlist.c | 8 +- drivers/gpu/drm/i915/i915_ttm_buddy_manager.c | 55 +- drivers/gpu/drm/i915/i915_ttm_buddy_manager.h | 4 +- .../drm/i915/selftests/intel_memory_region.c | 20 +- .../gpu/drm/ttm/tests/ttm_bo_validate_test.c | 4 +- drivers/gpu/drm/ttm/tests/ttm_mock_manager.c | 18 +- drivers/gpu/drm/ttm/tests/ttm_mock_manager.h | 2 +- drivers/gpu/drm/xe/xe_res_cursor.h | 34 +- drivers/gpu/drm/xe/xe_svm.c | 12 +- drivers/gpu/drm/xe/xe_ttm_vram_mgr.c | 71 +-- drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h | 2 +- drivers/gpu/tests/Makefile | 2 +- drivers/gpu/tests/gpu_buddy_test.c | 412 ++++++------- drivers/gpu/tests/gpu_random.c | 16 +- drivers/gpu/tests/gpu_random.h | 18 +- drivers/video/Kconfig | 1 + include/drm/drm_buddy.h | 18 + include/linux/gpu_buddy.h | 120 ++-- 30 files changed, 853 insertions(+), 739 deletions(-) create mode 100644 drivers/gpu/Kconfig create mode 100644 drivers/gpu/drm/drm_buddy.c create mode 100644 include/drm/drm_buddy.h diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index ceee0e663237..32fb506db05b 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -532,6 +532,12 @@ Buddy Allocator Function References (GPU buddy) .. kernel-doc:: drivers/gpu/buddy.c :export: +DRM Buddy Specific Logging Function References +---------------------------------------------- + +.. kernel-doc:: drivers/gpu/drm/drm_buddy.c + :export: + DRM Cache Handling and Fast WC memcpy() ======================================= diff --git a/MAINTAINERS b/MAINTAINERS index 086cbf5c36b3..f2bec2c0d7e3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8797,15 +8797,17 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git F: drivers/gpu/drm/ttm/ F: include/drm/ttm/ -DRM BUDDY ALLOCATOR +GPU BUDDY ALLOCATOR M: Matthew Auld M: Arun Pravin R: Christian Koenig L: dri-devel@lists.freedesktop.org S: Maintained T: git https://gitlab.freedesktop.org/drm/misc/kernel.git -F: drivers/gpu/drm/drm_buddy.c -F: drivers/gpu/drm/tests/drm_buddy_test.c +F: drivers/gpu/drm_buddy.c +F: drivers/gpu/buddy.c +F: drivers/gpu/tests/gpu_buddy_test.c +F: include/linux/gpu_buddy.h F: include/drm/drm_buddy.h DRM AUTOMATED TESTING diff --git a/drivers/gpu/Kconfig b/drivers/gpu/Kconfig new file mode 100644 index 000000000000..ebb2ad4b7ea0 --- /dev/null +++ b/drivers/gpu/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 + +config GPU_BUDDY + bool + help + A page based buddy allocator for GPU memory. + +config GPU_BUDDY_KUNIT_TEST + tristate "KUnit tests for GPU buddy allocator" if !KUNIT_ALL_TESTS + depends on GPU_BUDDY && KUNIT + default KUNIT_ALL_TESTS + help + KUnit tests for the GPU buddy allocator. diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index c5292ee2c852..5cd54d06e262 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -6,3 +6,4 @@ obj-y += host1x/ drm/ vga/ tests/ obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/ obj-$(CONFIG_TRACE_GPU_MEM) += trace/ obj-$(CONFIG_NOVA_CORE) += nova-core/ +obj-$(CONFIG_GPU_BUDDY) += buddy.o diff --git a/drivers/gpu/buddy.c b/drivers/gpu/buddy.c index 4cc63d961d26..603c59a2013a 100644 --- a/drivers/gpu/buddy.c +++ b/drivers/gpu/buddy.c @@ -11,27 +11,17 @@ #include #include -#include - -enum drm_buddy_free_tree { - DRM_BUDDY_CLEAR_TREE = 0, - DRM_BUDDY_DIRTY_TREE, - DRM_BUDDY_MAX_FREE_TREES, -}; static struct kmem_cache *slab_blocks; -#define for_each_free_tree(tree) \ - for ((tree) = 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++) - -static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, - struct drm_buddy_block *parent, +static struct gpu_buddy_block *gpu_block_alloc(struct gpu_buddy *mm, + struct gpu_buddy_block *parent, unsigned int order, u64 offset) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; - BUG_ON(order > DRM_BUDDY_MAX_ORDER); + BUG_ON(order > GPU_BUDDY_MAX_ORDER); block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL); if (!block) @@ -43,30 +33,30 @@ static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, RB_CLEAR_NODE(&block->rb); - BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED); + BUG_ON(block->header & GPU_BUDDY_HEADER_UNUSED); return block; } -static void drm_block_free(struct drm_buddy *mm, - struct drm_buddy_block *block) +static void gpu_block_free(struct gpu_buddy *mm, + struct gpu_buddy_block *block) { kmem_cache_free(slab_blocks, block); } -static enum drm_buddy_free_tree -get_block_tree(struct drm_buddy_block *block) +static enum gpu_buddy_free_tree +get_block_tree(struct gpu_buddy_block *block) { - return drm_buddy_block_is_clear(block) ? - DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; + return gpu_buddy_block_is_clear(block) ? + GPU_BUDDY_CLEAR_TREE : GPU_BUDDY_DIRTY_TREE; } -static struct drm_buddy_block * +static struct gpu_buddy_block * rbtree_get_free_block(const struct rb_node *node) { - return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL; + return node ? rb_entry(node, struct gpu_buddy_block, rb) : NULL; } -static struct drm_buddy_block * +static struct gpu_buddy_block * rbtree_last_free_block(struct rb_root *root) { return rbtree_get_free_block(rb_last(root)); @@ -77,33 +67,33 @@ static bool rbtree_is_empty(struct rb_root *root) return RB_EMPTY_ROOT(root); } -static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block, - const struct drm_buddy_block *node) +static bool gpu_buddy_block_offset_less(const struct gpu_buddy_block *block, + const struct gpu_buddy_block *node) { - return drm_buddy_block_offset(block) < drm_buddy_block_offset(node); + return gpu_buddy_block_offset(block) < gpu_buddy_block_offset(node); } static bool rbtree_block_offset_less(struct rb_node *block, const struct rb_node *node) { - return drm_buddy_block_offset_less(rbtree_get_free_block(block), + return gpu_buddy_block_offset_less(rbtree_get_free_block(block), rbtree_get_free_block(node)); } -static void rbtree_insert(struct drm_buddy *mm, - struct drm_buddy_block *block, - enum drm_buddy_free_tree tree) +static void rbtree_insert(struct gpu_buddy *mm, + struct gpu_buddy_block *block, + enum gpu_buddy_free_tree tree) { rb_add(&block->rb, - &mm->free_trees[tree][drm_buddy_block_order(block)], + &mm->free_trees[tree][gpu_buddy_block_order(block)], rbtree_block_offset_less); } -static void rbtree_remove(struct drm_buddy *mm, - struct drm_buddy_block *block) +static void rbtree_remove(struct gpu_buddy *mm, + struct gpu_buddy_block *block) { - unsigned int order = drm_buddy_block_order(block); - enum drm_buddy_free_tree tree; + unsigned int order = gpu_buddy_block_order(block); + enum gpu_buddy_free_tree tree; struct rb_root *root; tree = get_block_tree(block); @@ -113,42 +103,42 @@ static void rbtree_remove(struct drm_buddy *mm, RB_CLEAR_NODE(&block->rb); } -static void clear_reset(struct drm_buddy_block *block) +static void clear_reset(struct gpu_buddy_block *block) { - block->header &= ~DRM_BUDDY_HEADER_CLEAR; + block->header &= ~GPU_BUDDY_HEADER_CLEAR; } -static void mark_cleared(struct drm_buddy_block *block) +static void mark_cleared(struct gpu_buddy_block *block) { - block->header |= DRM_BUDDY_HEADER_CLEAR; + block->header |= GPU_BUDDY_HEADER_CLEAR; } -static void mark_allocated(struct drm_buddy *mm, - struct drm_buddy_block *block) +static void mark_allocated(struct gpu_buddy *mm, + struct gpu_buddy_block *block) { - block->header &= ~DRM_BUDDY_HEADER_STATE; - block->header |= DRM_BUDDY_ALLOCATED; + block->header &= ~GPU_BUDDY_HEADER_STATE; + block->header |= GPU_BUDDY_ALLOCATED; rbtree_remove(mm, block); } -static void mark_free(struct drm_buddy *mm, - struct drm_buddy_block *block) +static void mark_free(struct gpu_buddy *mm, + struct gpu_buddy_block *block) { - enum drm_buddy_free_tree tree; + enum gpu_buddy_free_tree tree; - block->header &= ~DRM_BUDDY_HEADER_STATE; - block->header |= DRM_BUDDY_FREE; + block->header &= ~GPU_BUDDY_HEADER_STATE; + block->header |= GPU_BUDDY_FREE; tree = get_block_tree(block); rbtree_insert(mm, block, tree); } -static void mark_split(struct drm_buddy *mm, - struct drm_buddy_block *block) +static void mark_split(struct gpu_buddy *mm, + struct gpu_buddy_block *block) { - block->header &= ~DRM_BUDDY_HEADER_STATE; - block->header |= DRM_BUDDY_SPLIT; + block->header &= ~GPU_BUDDY_HEADER_STATE; + block->header |= GPU_BUDDY_SPLIT; rbtree_remove(mm, block); } @@ -163,10 +153,10 @@ static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2) return s1 <= s2 && e1 >= e2; } -static struct drm_buddy_block * -__get_buddy(struct drm_buddy_block *block) +static struct gpu_buddy_block * +__get_buddy(struct gpu_buddy_block *block) { - struct drm_buddy_block *parent; + struct gpu_buddy_block *parent; parent = block->parent; if (!parent) @@ -178,19 +168,19 @@ __get_buddy(struct drm_buddy_block *block) return parent->left; } -static unsigned int __drm_buddy_free(struct drm_buddy *mm, - struct drm_buddy_block *block, +static unsigned int __gpu_buddy_free(struct gpu_buddy *mm, + struct gpu_buddy_block *block, bool force_merge) { - struct drm_buddy_block *parent; + struct gpu_buddy_block *parent; unsigned int order; while ((parent = block->parent)) { - struct drm_buddy_block *buddy; + struct gpu_buddy_block *buddy; buddy = __get_buddy(block); - if (!drm_buddy_block_is_free(buddy)) + if (!gpu_buddy_block_is_free(buddy)) break; if (!force_merge) { @@ -198,31 +188,31 @@ static unsigned int __drm_buddy_free(struct drm_buddy *mm, * Check the block and its buddy clear state and exit * the loop if they both have the dissimilar state. */ - if (drm_buddy_block_is_clear(block) != - drm_buddy_block_is_clear(buddy)) + if (gpu_buddy_block_is_clear(block) != + gpu_buddy_block_is_clear(buddy)) break; - if (drm_buddy_block_is_clear(block)) + if (gpu_buddy_block_is_clear(block)) mark_cleared(parent); } rbtree_remove(mm, buddy); - if (force_merge && drm_buddy_block_is_clear(buddy)) - mm->clear_avail -= drm_buddy_block_size(mm, buddy); + if (force_merge && gpu_buddy_block_is_clear(buddy)) + mm->clear_avail -= gpu_buddy_block_size(mm, buddy); - drm_block_free(mm, block); - drm_block_free(mm, buddy); + gpu_block_free(mm, block); + gpu_block_free(mm, buddy); block = parent; } - order = drm_buddy_block_order(block); + order = gpu_buddy_block_order(block); mark_free(mm, block); return order; } -static int __force_merge(struct drm_buddy *mm, +static int __force_merge(struct gpu_buddy *mm, u64 start, u64 end, unsigned int min_order) @@ -241,7 +231,7 @@ static int __force_merge(struct drm_buddy *mm, struct rb_node *iter = rb_last(&mm->free_trees[tree][i]); while (iter) { - struct drm_buddy_block *block, *buddy; + struct gpu_buddy_block *block, *buddy; u64 block_start, block_end; block = rbtree_get_free_block(iter); @@ -250,18 +240,18 @@ static int __force_merge(struct drm_buddy *mm, if (!block || !block->parent) continue; - block_start = drm_buddy_block_offset(block); - block_end = block_start + drm_buddy_block_size(mm, block) - 1; + block_start = gpu_buddy_block_offset(block); + block_end = block_start + gpu_buddy_block_size(mm, block) - 1; if (!contains(start, end, block_start, block_end)) continue; buddy = __get_buddy(block); - if (!drm_buddy_block_is_free(buddy)) + if (!gpu_buddy_block_is_free(buddy)) continue; - WARN_ON(drm_buddy_block_is_clear(block) == - drm_buddy_block_is_clear(buddy)); + WARN_ON(gpu_buddy_block_is_clear(block) == + gpu_buddy_block_is_clear(buddy)); /* * Advance to the next node when the current node is the buddy, @@ -271,10 +261,10 @@ static int __force_merge(struct drm_buddy *mm, iter = rb_prev(iter); rbtree_remove(mm, block); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail -= drm_buddy_block_size(mm, block); + if (gpu_buddy_block_is_clear(block)) + mm->clear_avail -= gpu_buddy_block_size(mm, block); - order = __drm_buddy_free(mm, block, true); + order = __gpu_buddy_free(mm, block, true); if (order >= min_order) return 0; } @@ -285,9 +275,9 @@ static int __force_merge(struct drm_buddy *mm, } /** - * drm_buddy_init - init memory manager + * gpu_buddy_init - init memory manager * - * @mm: DRM buddy manager to initialize + * @mm: GPU buddy manager to initialize * @size: size in bytes to manage * @chunk_size: minimum page size in bytes for our allocations * @@ -296,7 +286,7 @@ static int __force_merge(struct drm_buddy *mm, * Returns: * 0 on success, error code on failure. */ -int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) +int gpu_buddy_init(struct gpu_buddy *mm, u64 size, u64 chunk_size) { unsigned int i, j, root_count = 0; u64 offset = 0; @@ -318,9 +308,9 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) mm->chunk_size = chunk_size; mm->max_order = ilog2(size) - ilog2(chunk_size); - BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER); + BUG_ON(mm->max_order > GPU_BUDDY_MAX_ORDER); - mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES, + mm->free_trees = kmalloc_array(GPU_BUDDY_MAX_FREE_TREES, sizeof(*mm->free_trees), GFP_KERNEL); if (!mm->free_trees) @@ -340,7 +330,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) mm->n_roots = hweight64(size); mm->roots = kmalloc_array(mm->n_roots, - sizeof(struct drm_buddy_block *), + sizeof(struct gpu_buddy_block *), GFP_KERNEL); if (!mm->roots) goto out_free_tree; @@ -350,21 +340,21 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) * not itself a power-of-two. */ do { - struct drm_buddy_block *root; + struct gpu_buddy_block *root; unsigned int order; u64 root_size; order = ilog2(size) - ilog2(chunk_size); root_size = chunk_size << order; - root = drm_block_alloc(mm, NULL, order, offset); + root = gpu_block_alloc(mm, NULL, order, offset); if (!root) goto out_free_roots; mark_free(mm, root); BUG_ON(root_count > mm->max_order); - BUG_ON(drm_buddy_block_size(mm, root) < chunk_size); + BUG_ON(gpu_buddy_block_size(mm, root) < chunk_size); mm->roots[root_count] = root; @@ -377,7 +367,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) out_free_roots: while (root_count--) - drm_block_free(mm, mm->roots[root_count]); + gpu_block_free(mm, mm->roots[root_count]); kfree(mm->roots); out_free_tree: while (i--) @@ -385,16 +375,16 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) kfree(mm->free_trees); return -ENOMEM; } -EXPORT_SYMBOL(drm_buddy_init); +EXPORT_SYMBOL(gpu_buddy_init); /** - * drm_buddy_fini - tear down the memory manager + * gpu_buddy_fini - tear down the memory manager * - * @mm: DRM buddy manager to free + * @mm: GPU buddy manager to free * * Cleanup memory manager resources and the freetree */ -void drm_buddy_fini(struct drm_buddy *mm) +void gpu_buddy_fini(struct gpu_buddy *mm) { u64 root_size, size, start; unsigned int order; @@ -404,13 +394,13 @@ void drm_buddy_fini(struct drm_buddy *mm) for (i = 0; i < mm->n_roots; ++i) { order = ilog2(size) - ilog2(mm->chunk_size); - start = drm_buddy_block_offset(mm->roots[i]); + start = gpu_buddy_block_offset(mm->roots[i]); __force_merge(mm, start, start + size, order); - if (WARN_ON(!drm_buddy_block_is_free(mm->roots[i]))) + if (WARN_ON(!gpu_buddy_block_is_free(mm->roots[i]))) kunit_fail_current_test("buddy_fini() root"); - drm_block_free(mm, mm->roots[i]); + gpu_block_free(mm, mm->roots[i]); root_size = mm->chunk_size << order; size -= root_size; @@ -423,31 +413,31 @@ void drm_buddy_fini(struct drm_buddy *mm) kfree(mm->free_trees); kfree(mm->roots); } -EXPORT_SYMBOL(drm_buddy_fini); +EXPORT_SYMBOL(gpu_buddy_fini); -static int split_block(struct drm_buddy *mm, - struct drm_buddy_block *block) +static int split_block(struct gpu_buddy *mm, + struct gpu_buddy_block *block) { - unsigned int block_order = drm_buddy_block_order(block) - 1; - u64 offset = drm_buddy_block_offset(block); + unsigned int block_order = gpu_buddy_block_order(block) - 1; + u64 offset = gpu_buddy_block_offset(block); - BUG_ON(!drm_buddy_block_is_free(block)); - BUG_ON(!drm_buddy_block_order(block)); + BUG_ON(!gpu_buddy_block_is_free(block)); + BUG_ON(!gpu_buddy_block_order(block)); - block->left = drm_block_alloc(mm, block, block_order, offset); + block->left = gpu_block_alloc(mm, block, block_order, offset); if (!block->left) return -ENOMEM; - block->right = drm_block_alloc(mm, block, block_order, + block->right = gpu_block_alloc(mm, block, block_order, offset + (mm->chunk_size << block_order)); if (!block->right) { - drm_block_free(mm, block->left); + gpu_block_free(mm, block->left); return -ENOMEM; } mark_split(mm, block); - if (drm_buddy_block_is_clear(block)) { + if (gpu_buddy_block_is_clear(block)) { mark_cleared(block->left); mark_cleared(block->right); clear_reset(block); @@ -460,34 +450,34 @@ static int split_block(struct drm_buddy *mm, } /** - * drm_get_buddy - get buddy address + * gpu_get_buddy - get buddy address * - * @block: DRM buddy block + * @block: GPU buddy block * * Returns the corresponding buddy block for @block, or NULL * if this is a root block and can't be merged further. * Requires some kind of locking to protect against * any concurrent allocate and free operations. */ -struct drm_buddy_block * -drm_get_buddy(struct drm_buddy_block *block) +struct gpu_buddy_block * +gpu_get_buddy(struct gpu_buddy_block *block) { return __get_buddy(block); } -EXPORT_SYMBOL(drm_get_buddy); +EXPORT_SYMBOL(gpu_get_buddy); /** - * drm_buddy_reset_clear - reset blocks clear state + * gpu_buddy_reset_clear - reset blocks clear state * - * @mm: DRM buddy manager + * @mm: GPU buddy manager * @is_clear: blocks clear state * * Reset the clear state based on @is_clear value for each block * in the freetree. */ -void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) +void gpu_buddy_reset_clear(struct gpu_buddy *mm, bool is_clear) { - enum drm_buddy_free_tree src_tree, dst_tree; + enum gpu_buddy_free_tree src_tree, dst_tree; u64 root_size, size, start; unsigned int order; int i; @@ -495,60 +485,60 @@ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) size = mm->size; for (i = 0; i < mm->n_roots; ++i) { order = ilog2(size) - ilog2(mm->chunk_size); - start = drm_buddy_block_offset(mm->roots[i]); + start = gpu_buddy_block_offset(mm->roots[i]); __force_merge(mm, start, start + size, order); root_size = mm->chunk_size << order; size -= root_size; } - src_tree = is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; - dst_tree = is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; + src_tree = is_clear ? GPU_BUDDY_DIRTY_TREE : GPU_BUDDY_CLEAR_TREE; + dst_tree = is_clear ? GPU_BUDDY_CLEAR_TREE : GPU_BUDDY_DIRTY_TREE; for (i = 0; i <= mm->max_order; ++i) { struct rb_root *root = &mm->free_trees[src_tree][i]; - struct drm_buddy_block *block, *tmp; + struct gpu_buddy_block *block, *tmp; rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { rbtree_remove(mm, block); if (is_clear) { mark_cleared(block); - mm->clear_avail += drm_buddy_block_size(mm, block); + mm->clear_avail += gpu_buddy_block_size(mm, block); } else { clear_reset(block); - mm->clear_avail -= drm_buddy_block_size(mm, block); + mm->clear_avail -= gpu_buddy_block_size(mm, block); } rbtree_insert(mm, block, dst_tree); } } } -EXPORT_SYMBOL(drm_buddy_reset_clear); +EXPORT_SYMBOL(gpu_buddy_reset_clear); /** - * drm_buddy_free_block - free a block + * gpu_buddy_free_block - free a block * - * @mm: DRM buddy manager + * @mm: GPU buddy manager * @block: block to be freed */ -void drm_buddy_free_block(struct drm_buddy *mm, - struct drm_buddy_block *block) +void gpu_buddy_free_block(struct gpu_buddy *mm, + struct gpu_buddy_block *block) { - BUG_ON(!drm_buddy_block_is_allocated(block)); - mm->avail += drm_buddy_block_size(mm, block); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail += drm_buddy_block_size(mm, block); + BUG_ON(!gpu_buddy_block_is_allocated(block)); + mm->avail += gpu_buddy_block_size(mm, block); + if (gpu_buddy_block_is_clear(block)) + mm->clear_avail += gpu_buddy_block_size(mm, block); - __drm_buddy_free(mm, block, false); + __gpu_buddy_free(mm, block, false); } -EXPORT_SYMBOL(drm_buddy_free_block); +EXPORT_SYMBOL(gpu_buddy_free_block); -static void __drm_buddy_free_list(struct drm_buddy *mm, +static void __gpu_buddy_free_list(struct gpu_buddy *mm, struct list_head *objects, bool mark_clear, bool mark_dirty) { - struct drm_buddy_block *block, *on; + struct gpu_buddy_block *block, *on; WARN_ON(mark_dirty && mark_clear); @@ -557,13 +547,13 @@ static void __drm_buddy_free_list(struct drm_buddy *mm, mark_cleared(block); else if (mark_dirty) clear_reset(block); - drm_buddy_free_block(mm, block); + gpu_buddy_free_block(mm, block); cond_resched(); } INIT_LIST_HEAD(objects); } -static void drm_buddy_free_list_internal(struct drm_buddy *mm, +static void gpu_buddy_free_list_internal(struct gpu_buddy *mm, struct list_head *objects) { /* @@ -571,43 +561,43 @@ static void drm_buddy_free_list_internal(struct drm_buddy *mm, * at this point. For example we might have just failed part of the * allocation. */ - __drm_buddy_free_list(mm, objects, false, false); + __gpu_buddy_free_list(mm, objects, false, false); } /** - * drm_buddy_free_list - free blocks + * gpu_buddy_free_list - free blocks * - * @mm: DRM buddy manager + * @mm: GPU buddy manager * @objects: input list head to free blocks - * @flags: optional flags like DRM_BUDDY_CLEARED + * @flags: optional flags like GPU_BUDDY_CLEARED */ -void drm_buddy_free_list(struct drm_buddy *mm, +void gpu_buddy_free_list(struct gpu_buddy *mm, struct list_head *objects, unsigned int flags) { - bool mark_clear = flags & DRM_BUDDY_CLEARED; + bool mark_clear = flags & GPU_BUDDY_CLEARED; - __drm_buddy_free_list(mm, objects, mark_clear, !mark_clear); + __gpu_buddy_free_list(mm, objects, mark_clear, !mark_clear); } -EXPORT_SYMBOL(drm_buddy_free_list); +EXPORT_SYMBOL(gpu_buddy_free_list); -static bool block_incompatible(struct drm_buddy_block *block, unsigned int flags) +static bool block_incompatible(struct gpu_buddy_block *block, unsigned int flags) { - bool needs_clear = flags & DRM_BUDDY_CLEAR_ALLOCATION; + bool needs_clear = flags & GPU_BUDDY_CLEAR_ALLOCATION; - return needs_clear != drm_buddy_block_is_clear(block); + return needs_clear != gpu_buddy_block_is_clear(block); } -static struct drm_buddy_block * -__alloc_range_bias(struct drm_buddy *mm, +static struct gpu_buddy_block * +__alloc_range_bias(struct gpu_buddy *mm, u64 start, u64 end, unsigned int order, unsigned long flags, bool fallback) { u64 req_size = mm->chunk_size << order; - struct drm_buddy_block *block; - struct drm_buddy_block *buddy; + struct gpu_buddy_block *block; + struct gpu_buddy_block *buddy; LIST_HEAD(dfs); int err; int i; @@ -622,23 +612,23 @@ __alloc_range_bias(struct drm_buddy *mm, u64 block_end; block = list_first_entry_or_null(&dfs, - struct drm_buddy_block, + struct gpu_buddy_block, tmp_link); if (!block) break; list_del(&block->tmp_link); - if (drm_buddy_block_order(block) < order) + if (gpu_buddy_block_order(block) < order) continue; - block_start = drm_buddy_block_offset(block); - block_end = block_start + drm_buddy_block_size(mm, block) - 1; + block_start = gpu_buddy_block_offset(block); + block_end = block_start + gpu_buddy_block_size(mm, block) - 1; if (!overlaps(start, end, block_start, block_end)) continue; - if (drm_buddy_block_is_allocated(block)) + if (gpu_buddy_block_is_allocated(block)) continue; if (block_start < start || block_end > end) { @@ -654,17 +644,17 @@ __alloc_range_bias(struct drm_buddy *mm, continue; if (contains(start, end, block_start, block_end) && - order == drm_buddy_block_order(block)) { + order == gpu_buddy_block_order(block)) { /* * Find the free block within the range. */ - if (drm_buddy_block_is_free(block)) + if (gpu_buddy_block_is_free(block)) return block; continue; } - if (!drm_buddy_block_is_split(block)) { + if (!gpu_buddy_block_is_split(block)) { err = split_block(mm, block); if (unlikely(err)) goto err_undo; @@ -684,19 +674,19 @@ __alloc_range_bias(struct drm_buddy *mm, */ buddy = __get_buddy(block); if (buddy && - (drm_buddy_block_is_free(block) && - drm_buddy_block_is_free(buddy))) - __drm_buddy_free(mm, block, false); + (gpu_buddy_block_is_free(block) && + gpu_buddy_block_is_free(buddy))) + __gpu_buddy_free(mm, block, false); return ERR_PTR(err); } -static struct drm_buddy_block * -__drm_buddy_alloc_range_bias(struct drm_buddy *mm, +static struct gpu_buddy_block * +__gpu_buddy_alloc_range_bias(struct gpu_buddy *mm, u64 start, u64 end, unsigned int order, unsigned long flags) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; bool fallback = false; block = __alloc_range_bias(mm, start, end, order, @@ -708,12 +698,12 @@ __drm_buddy_alloc_range_bias(struct drm_buddy *mm, return block; } -static struct drm_buddy_block * -get_maxblock(struct drm_buddy *mm, +static struct gpu_buddy_block * +get_maxblock(struct gpu_buddy *mm, unsigned int order, - enum drm_buddy_free_tree tree) + enum gpu_buddy_free_tree tree) { - struct drm_buddy_block *max_block = NULL, *block = NULL; + struct gpu_buddy_block *max_block = NULL, *block = NULL; struct rb_root *root; unsigned int i; @@ -728,8 +718,8 @@ get_maxblock(struct drm_buddy *mm, continue; } - if (drm_buddy_block_offset(block) > - drm_buddy_block_offset(max_block)) { + if (gpu_buddy_block_offset(block) > + gpu_buddy_block_offset(max_block)) { max_block = block; } } @@ -737,25 +727,25 @@ get_maxblock(struct drm_buddy *mm, return max_block; } -static struct drm_buddy_block * -alloc_from_freetree(struct drm_buddy *mm, +static struct gpu_buddy_block * +alloc_from_freetree(struct gpu_buddy *mm, unsigned int order, unsigned long flags) { - struct drm_buddy_block *block = NULL; + struct gpu_buddy_block *block = NULL; struct rb_root *root; - enum drm_buddy_free_tree tree; + enum gpu_buddy_free_tree tree; unsigned int tmp; int err; - tree = (flags & DRM_BUDDY_CLEAR_ALLOCATION) ? - DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; + tree = (flags & GPU_BUDDY_CLEAR_ALLOCATION) ? + GPU_BUDDY_CLEAR_TREE : GPU_BUDDY_DIRTY_TREE; - if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { + if (flags & GPU_BUDDY_TOPDOWN_ALLOCATION) { block = get_maxblock(mm, order, tree); if (block) /* Store the obtained block order */ - tmp = drm_buddy_block_order(block); + tmp = gpu_buddy_block_order(block); } else { for (tmp = order; tmp <= mm->max_order; ++tmp) { /* Get RB tree root for this order and tree */ @@ -768,8 +758,8 @@ alloc_from_freetree(struct drm_buddy *mm, if (!block) { /* Try allocating from the other tree */ - tree = (tree == DRM_BUDDY_CLEAR_TREE) ? - DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; + tree = (tree == GPU_BUDDY_CLEAR_TREE) ? + GPU_BUDDY_DIRTY_TREE : GPU_BUDDY_CLEAR_TREE; for (tmp = order; tmp <= mm->max_order; ++tmp) { root = &mm->free_trees[tree][tmp]; @@ -782,7 +772,7 @@ alloc_from_freetree(struct drm_buddy *mm, return ERR_PTR(-ENOSPC); } - BUG_ON(!drm_buddy_block_is_free(block)); + BUG_ON(!gpu_buddy_block_is_free(block)); while (tmp != order) { err = split_block(mm, block); @@ -796,18 +786,18 @@ alloc_from_freetree(struct drm_buddy *mm, err_undo: if (tmp != order) - __drm_buddy_free(mm, block, false); + __gpu_buddy_free(mm, block, false); return ERR_PTR(err); } -static int __alloc_range(struct drm_buddy *mm, +static int __alloc_range(struct gpu_buddy *mm, struct list_head *dfs, u64 start, u64 size, struct list_head *blocks, u64 *total_allocated_on_err) { - struct drm_buddy_block *block; - struct drm_buddy_block *buddy; + struct gpu_buddy_block *block; + struct gpu_buddy_block *buddy; u64 total_allocated = 0; LIST_HEAD(allocated); u64 end; @@ -820,31 +810,31 @@ static int __alloc_range(struct drm_buddy *mm, u64 block_end; block = list_first_entry_or_null(dfs, - struct drm_buddy_block, + struct gpu_buddy_block, tmp_link); if (!block) break; list_del(&block->tmp_link); - block_start = drm_buddy_block_offset(block); - block_end = block_start + drm_buddy_block_size(mm, block) - 1; + block_start = gpu_buddy_block_offset(block); + block_end = block_start + gpu_buddy_block_size(mm, block) - 1; if (!overlaps(start, end, block_start, block_end)) continue; - if (drm_buddy_block_is_allocated(block)) { + if (gpu_buddy_block_is_allocated(block)) { err = -ENOSPC; goto err_free; } if (contains(start, end, block_start, block_end)) { - if (drm_buddy_block_is_free(block)) { + if (gpu_buddy_block_is_free(block)) { mark_allocated(mm, block); - total_allocated += drm_buddy_block_size(mm, block); - mm->avail -= drm_buddy_block_size(mm, block); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail -= drm_buddy_block_size(mm, block); + total_allocated += gpu_buddy_block_size(mm, block); + mm->avail -= gpu_buddy_block_size(mm, block); + if (gpu_buddy_block_is_clear(block)) + mm->clear_avail -= gpu_buddy_block_size(mm, block); list_add_tail(&block->link, &allocated); continue; } else if (!mm->clear_avail) { @@ -853,7 +843,7 @@ static int __alloc_range(struct drm_buddy *mm, } } - if (!drm_buddy_block_is_split(block)) { + if (!gpu_buddy_block_is_split(block)) { err = split_block(mm, block); if (unlikely(err)) goto err_undo; @@ -880,22 +870,22 @@ static int __alloc_range(struct drm_buddy *mm, */ buddy = __get_buddy(block); if (buddy && - (drm_buddy_block_is_free(block) && - drm_buddy_block_is_free(buddy))) - __drm_buddy_free(mm, block, false); + (gpu_buddy_block_is_free(block) && + gpu_buddy_block_is_free(buddy))) + __gpu_buddy_free(mm, block, false); err_free: if (err == -ENOSPC && total_allocated_on_err) { list_splice_tail(&allocated, blocks); *total_allocated_on_err = total_allocated; } else { - drm_buddy_free_list_internal(mm, &allocated); + gpu_buddy_free_list_internal(mm, &allocated); } return err; } -static int __drm_buddy_alloc_range(struct drm_buddy *mm, +static int __gpu_buddy_alloc_range(struct gpu_buddy *mm, u64 start, u64 size, u64 *total_allocated_on_err, @@ -911,13 +901,13 @@ static int __drm_buddy_alloc_range(struct drm_buddy *mm, blocks, total_allocated_on_err); } -static int __alloc_contig_try_harder(struct drm_buddy *mm, +static int __alloc_contig_try_harder(struct gpu_buddy *mm, u64 size, u64 min_block_size, struct list_head *blocks) { u64 rhs_offset, lhs_offset, lhs_size, filled; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; unsigned int tree, order; LIST_HEAD(blocks_lhs); unsigned long pages; @@ -943,8 +933,8 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, block = rbtree_get_free_block(iter); /* Allocate blocks traversing RHS */ - rhs_offset = drm_buddy_block_offset(block); - err = __drm_buddy_alloc_range(mm, rhs_offset, size, + rhs_offset = gpu_buddy_block_offset(block); + err = __gpu_buddy_alloc_range(mm, rhs_offset, size, &filled, blocks); if (!err || err != -ENOSPC) return err; @@ -954,18 +944,18 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, lhs_size = round_up(lhs_size, min_block_size); /* Allocate blocks traversing LHS */ - lhs_offset = drm_buddy_block_offset(block) - lhs_size; - err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, + lhs_offset = gpu_buddy_block_offset(block) - lhs_size; + err = __gpu_buddy_alloc_range(mm, lhs_offset, lhs_size, NULL, &blocks_lhs); if (!err) { list_splice(&blocks_lhs, blocks); return 0; } else if (err != -ENOSPC) { - drm_buddy_free_list_internal(mm, blocks); + gpu_buddy_free_list_internal(mm, blocks); return err; } /* Free blocks for the next iteration */ - drm_buddy_free_list_internal(mm, blocks); + gpu_buddy_free_list_internal(mm, blocks); iter = rb_prev(iter); } @@ -975,9 +965,9 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, } /** - * drm_buddy_block_trim - free unused pages + * gpu_buddy_block_trim - free unused pages * - * @mm: DRM buddy manager + * @mm: GPU buddy manager * @start: start address to begin the trimming. * @new_size: original size requested * @blocks: Input and output list of allocated blocks. @@ -993,13 +983,13 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, * Returns: * 0 on success, error code on failure. */ -int drm_buddy_block_trim(struct drm_buddy *mm, +int gpu_buddy_block_trim(struct gpu_buddy *mm, u64 *start, u64 new_size, struct list_head *blocks) { - struct drm_buddy_block *parent; - struct drm_buddy_block *block; + struct gpu_buddy_block *parent; + struct gpu_buddy_block *block; u64 block_start, block_end; LIST_HEAD(dfs); u64 new_start; @@ -1009,22 +999,22 @@ int drm_buddy_block_trim(struct drm_buddy *mm, return -EINVAL; block = list_first_entry(blocks, - struct drm_buddy_block, + struct gpu_buddy_block, link); - block_start = drm_buddy_block_offset(block); - block_end = block_start + drm_buddy_block_size(mm, block); + block_start = gpu_buddy_block_offset(block); + block_end = block_start + gpu_buddy_block_size(mm, block); - if (WARN_ON(!drm_buddy_block_is_allocated(block))) + if (WARN_ON(!gpu_buddy_block_is_allocated(block))) return -EINVAL; - if (new_size > drm_buddy_block_size(mm, block)) + if (new_size > gpu_buddy_block_size(mm, block)) return -EINVAL; if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size)) return -EINVAL; - if (new_size == drm_buddy_block_size(mm, block)) + if (new_size == gpu_buddy_block_size(mm, block)) return 0; new_start = block_start; @@ -1043,9 +1033,9 @@ int drm_buddy_block_trim(struct drm_buddy *mm, list_del(&block->link); mark_free(mm, block); - mm->avail += drm_buddy_block_size(mm, block); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail += drm_buddy_block_size(mm, block); + mm->avail += gpu_buddy_block_size(mm, block); + if (gpu_buddy_block_is_clear(block)) + mm->clear_avail += gpu_buddy_block_size(mm, block); /* Prevent recursively freeing this node */ parent = block->parent; @@ -1055,26 +1045,26 @@ int drm_buddy_block_trim(struct drm_buddy *mm, err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); if (err) { mark_allocated(mm, block); - mm->avail -= drm_buddy_block_size(mm, block); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail -= drm_buddy_block_size(mm, block); + mm->avail -= gpu_buddy_block_size(mm, block); + if (gpu_buddy_block_is_clear(block)) + mm->clear_avail -= gpu_buddy_block_size(mm, block); list_add(&block->link, blocks); } block->parent = parent; return err; } -EXPORT_SYMBOL(drm_buddy_block_trim); +EXPORT_SYMBOL(gpu_buddy_block_trim); -static struct drm_buddy_block * -__drm_buddy_alloc_blocks(struct drm_buddy *mm, +static struct gpu_buddy_block * +__gpu_buddy_alloc_blocks(struct gpu_buddy *mm, u64 start, u64 end, unsigned int order, unsigned long flags) { - if (flags & DRM_BUDDY_RANGE_ALLOCATION) + if (flags & GPU_BUDDY_RANGE_ALLOCATION) /* Allocate traversing within the range */ - return __drm_buddy_alloc_range_bias(mm, start, end, + return __gpu_buddy_alloc_range_bias(mm, start, end, order, flags); else /* Allocate from freetree */ @@ -1082,15 +1072,15 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm, } /** - * drm_buddy_alloc_blocks - allocate power-of-two blocks + * gpu_buddy_alloc_blocks - allocate power-of-two blocks * - * @mm: DRM buddy manager to allocate from + * @mm: GPU buddy manager to allocate from * @start: start of the allowed range for this block * @end: end of the allowed range for this block * @size: size of the allocation in bytes * @min_block_size: alignment of the allocation * @blocks: output list head to add allocated blocks - * @flags: DRM_BUDDY_*_ALLOCATION flags + * @flags: GPU_BUDDY_*_ALLOCATION flags * * alloc_range_bias() called on range limitations, which traverses * the tree and returns the desired block. @@ -1101,13 +1091,13 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm, * Returns: * 0 on success, error code on failure. */ -int drm_buddy_alloc_blocks(struct drm_buddy *mm, +int gpu_buddy_alloc_blocks(struct gpu_buddy *mm, u64 start, u64 end, u64 size, u64 min_block_size, struct list_head *blocks, unsigned long flags) { - struct drm_buddy_block *block = NULL; + struct gpu_buddy_block *block = NULL; u64 original_size, original_min_size; unsigned int min_order, order; LIST_HEAD(allocated); @@ -1137,14 +1127,14 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, if (!IS_ALIGNED(start | end, min_block_size)) return -EINVAL; - return __drm_buddy_alloc_range(mm, start, size, NULL, blocks); + return __gpu_buddy_alloc_range(mm, start, size, NULL, blocks); } original_size = size; original_min_size = min_block_size; /* Roundup the size to power of 2 */ - if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) { + if (flags & GPU_BUDDY_CONTIGUOUS_ALLOCATION) { size = roundup_pow_of_two(size); min_block_size = size; /* Align size value to min_block_size */ @@ -1157,8 +1147,8 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, min_order = ilog2(min_block_size) - ilog2(mm->chunk_size); if (order > mm->max_order || size > mm->size) { - if ((flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) && - !(flags & DRM_BUDDY_RANGE_ALLOCATION)) + if ((flags & GPU_BUDDY_CONTIGUOUS_ALLOCATION) && + !(flags & GPU_BUDDY_RANGE_ALLOCATION)) return __alloc_contig_try_harder(mm, original_size, original_min_size, blocks); @@ -1171,7 +1161,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, BUG_ON(order < min_order); do { - block = __drm_buddy_alloc_blocks(mm, start, + block = __gpu_buddy_alloc_blocks(mm, start, end, order, flags); @@ -1182,7 +1172,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, /* Try allocation through force merge method */ if (mm->clear_avail && !__force_merge(mm, start, end, min_order)) { - block = __drm_buddy_alloc_blocks(mm, start, + block = __gpu_buddy_alloc_blocks(mm, start, end, min_order, flags); @@ -1196,8 +1186,8 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, * Try contiguous block allocation through * try harder method. */ - if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION && - !(flags & DRM_BUDDY_RANGE_ALLOCATION)) + if (flags & GPU_BUDDY_CONTIGUOUS_ALLOCATION && + !(flags & GPU_BUDDY_RANGE_ALLOCATION)) return __alloc_contig_try_harder(mm, original_size, original_min_size, @@ -1208,9 +1198,9 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, } while (1); mark_allocated(mm, block); - mm->avail -= drm_buddy_block_size(mm, block); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail -= drm_buddy_block_size(mm, block); + mm->avail -= gpu_buddy_block_size(mm, block); + if (gpu_buddy_block_is_clear(block)) + mm->clear_avail -= gpu_buddy_block_size(mm, block); kmemleak_update_trace(block); list_add_tail(&block->link, &allocated); @@ -1221,7 +1211,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, } while (1); /* Trim the allocated block to the required size */ - if (!(flags & DRM_BUDDY_TRIM_DISABLE) && + if (!(flags & GPU_BUDDY_TRIM_DISABLE) && original_size != size) { struct list_head *trim_list; LIST_HEAD(temp); @@ -1234,11 +1224,11 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, block = list_last_entry(&allocated, typeof(*block), link); list_move(&block->link, &temp); trim_list = &temp; - trim_size = drm_buddy_block_size(mm, block) - + trim_size = gpu_buddy_block_size(mm, block) - (size - original_size); } - drm_buddy_block_trim(mm, + gpu_buddy_block_trim(mm, NULL, trim_size, trim_list); @@ -1251,44 +1241,42 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, return 0; err_free: - drm_buddy_free_list_internal(mm, &allocated); + gpu_buddy_free_list_internal(mm, &allocated); return err; } -EXPORT_SYMBOL(drm_buddy_alloc_blocks); +EXPORT_SYMBOL(gpu_buddy_alloc_blocks); /** - * drm_buddy_block_print - print block information + * gpu_buddy_block_print - print block information * - * @mm: DRM buddy manager - * @block: DRM buddy block - * @p: DRM printer to use + * @mm: GPU buddy manager + * @block: GPU buddy block */ -void drm_buddy_block_print(struct drm_buddy *mm, - struct drm_buddy_block *block, - struct drm_printer *p) +void gpu_buddy_block_print(struct gpu_buddy *mm, + struct gpu_buddy_block *block) { - u64 start = drm_buddy_block_offset(block); - u64 size = drm_buddy_block_size(mm, block); + u64 start = gpu_buddy_block_offset(block); + u64 size = gpu_buddy_block_size(mm, block); - drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size); + pr_info("%#018llx-%#018llx: %llu\n", start, start + size, size); } -EXPORT_SYMBOL(drm_buddy_block_print); +EXPORT_SYMBOL(gpu_buddy_block_print); /** - * drm_buddy_print - print allocator state + * gpu_buddy_print - print allocator state * - * @mm: DRM buddy manager - * @p: DRM printer to use + * @mm: GPU buddy manager + * @p: GPU printer to use */ -void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p) +void gpu_buddy_print(struct gpu_buddy *mm) { int order; - drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n", - mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20); + pr_info("chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n", + mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20); for (order = mm->max_order; order >= 0; order--) { - struct drm_buddy_block *block, *tmp; + struct gpu_buddy_block *block, *tmp; struct rb_root *root; u64 count = 0, free; unsigned int tree; @@ -1297,40 +1285,38 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p) root = &mm->free_trees[tree][order]; rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { - BUG_ON(!drm_buddy_block_is_free(block)); + BUG_ON(!gpu_buddy_block_is_free(block)); count++; } } - drm_printf(p, "order-%2d ", order); - free = count * (mm->chunk_size << order); if (free < SZ_1M) - drm_printf(p, "free: %8llu KiB", free >> 10); + pr_info("order-%2d free: %8llu KiB, blocks: %llu\n", + order, free >> 10, count); else - drm_printf(p, "free: %8llu MiB", free >> 20); - - drm_printf(p, ", blocks: %llu\n", count); + pr_info("order-%2d free: %8llu MiB, blocks: %llu\n", + order, free >> 20, count); } } -EXPORT_SYMBOL(drm_buddy_print); +EXPORT_SYMBOL(gpu_buddy_print); -static void drm_buddy_module_exit(void) +static void gpu_buddy_module_exit(void) { kmem_cache_destroy(slab_blocks); } -static int __init drm_buddy_module_init(void) +static int __init gpu_buddy_module_init(void) { - slab_blocks = KMEM_CACHE(drm_buddy_block, 0); + slab_blocks = KMEM_CACHE(gpu_buddy_block, 0); if (!slab_blocks) return -ENOMEM; return 0; } -module_init(drm_buddy_module_init); -module_exit(drm_buddy_module_exit); +module_init(gpu_buddy_module_init); +module_exit(gpu_buddy_module_exit); -MODULE_DESCRIPTION("DRM Buddy Allocator"); +MODULE_DESCRIPTION("GPU Buddy Allocator"); MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 862ff4000969..758f2eb3d588 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -220,6 +220,7 @@ config DRM_GPUSVM config DRM_BUDDY tristate depends on DRM + select GPU_BUDDY help A page based buddy allocator diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 892859cfe95f..d0e37f8c2a46 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -114,7 +114,7 @@ drm_gpusvm_helper-$(CONFIG_ZONE_DEVICE) += \ obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o -obj-$(CONFIG_DRM_BUDDY) += ../buddy.o +obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o drm_dma_helper-y := drm_gem_dma_helper.o drm_dma_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_dma.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index f582113d78b7..149f8f942eae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -5663,7 +5663,7 @@ int amdgpu_ras_add_critical_region(struct amdgpu_device *adev, struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_vram_mgr_resource *vres; struct ras_critical_region *region; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; int ret = 0; if (!bo || !bo->tbo.resource) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index be2e56ce1355..8908d9e08a30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -55,7 +55,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, uint64_t start, uint64_t size, struct amdgpu_res_cursor *cur) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; struct list_head *head, *next; struct drm_mm_node *node; @@ -71,7 +71,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, head = &to_amdgpu_vram_mgr_resource(res)->blocks; block = list_first_entry_or_null(head, - struct drm_buddy_block, + struct gpu_buddy_block, link); if (!block) goto fallback; @@ -81,7 +81,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, next = block->link.next; if (next != head) - block = list_entry(next, struct drm_buddy_block, link); + block = list_entry(next, struct gpu_buddy_block, link); } cur->start = amdgpu_vram_mgr_block_start(block) + start; @@ -125,7 +125,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, */ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; struct drm_mm_node *node; struct list_head *next; @@ -146,7 +146,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) block = cur->node; next = block->link.next; - block = list_entry(next, struct drm_buddy_block, link); + block = list_entry(next, struct gpu_buddy_block, link); cur->node = block; cur->start = amdgpu_vram_mgr_block_start(block); @@ -175,7 +175,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) */ static inline bool amdgpu_res_cleared(struct amdgpu_res_cursor *cur) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; switch (cur->mem_type) { case TTM_PL_VRAM: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 9d934c07fa6b..cd94f6efb7cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "amdgpu.h" #include "amdgpu_vm.h" @@ -52,15 +53,15 @@ to_amdgpu_device(struct amdgpu_vram_mgr *mgr) return container_of(mgr, struct amdgpu_device, mman.vram_mgr); } -static inline struct drm_buddy_block * +static inline struct gpu_buddy_block * amdgpu_vram_mgr_first_block(struct list_head *list) { - return list_first_entry_or_null(list, struct drm_buddy_block, link); + return list_first_entry_or_null(list, struct gpu_buddy_block, link); } static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; u64 start, size; block = amdgpu_vram_mgr_first_block(head); @@ -71,7 +72,7 @@ static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head) start = amdgpu_vram_mgr_block_start(block); size = amdgpu_vram_mgr_block_size(block); - block = list_entry(block->link.next, struct drm_buddy_block, link); + block = list_entry(block->link.next, struct gpu_buddy_block, link); if (start + size != amdgpu_vram_mgr_block_start(block)) return false; } @@ -81,7 +82,7 @@ static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head) static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; u64 size = 0; list_for_each_entry(block, head, link) @@ -254,7 +255,7 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = { * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM */ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, - struct drm_buddy_block *block) + struct gpu_buddy_block *block) { u64 start = amdgpu_vram_mgr_block_start(block); u64 end = start + amdgpu_vram_mgr_block_size(block); @@ -279,7 +280,7 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct ttm_resource *res = bo->tbo.resource; struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); - struct drm_buddy_block *block; + struct gpu_buddy_block *block; u64 usage = 0; if (amdgpu_gmc_vram_full_visible(&adev->gmc)) @@ -299,15 +300,15 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); - struct drm_buddy *mm = &mgr->mm; + struct gpu_buddy *mm = &mgr->mm; struct amdgpu_vram_reservation *rsv, *temp; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; uint64_t vis_usage; list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) { - if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size, + if (gpu_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size, rsv->size, mm->chunk_size, &rsv->allocated, - DRM_BUDDY_RANGE_ALLOCATION)) + GPU_BUDDY_RANGE_ALLOCATION)) continue; block = amdgpu_vram_mgr_first_block(&rsv->allocated); @@ -403,7 +404,7 @@ int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr, uint64_t address, struct amdgpu_vram_block_info *info) { struct amdgpu_vram_mgr_resource *vres; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; u64 start, size; int ret = -ENOENT; @@ -450,8 +451,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, struct amdgpu_vram_mgr_resource *vres; u64 size, remaining_size, lpfn, fpfn; unsigned int adjust_dcc_size = 0; - struct drm_buddy *mm = &mgr->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm = &mgr->mm; + struct gpu_buddy_block *block; unsigned long pages_per_block; int r; @@ -493,17 +494,17 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, INIT_LIST_HEAD(&vres->blocks); if (place->flags & TTM_PL_FLAG_TOPDOWN) - vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; + vres->flags |= GPU_BUDDY_TOPDOWN_ALLOCATION; if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) - vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION; + vres->flags |= GPU_BUDDY_CONTIGUOUS_ALLOCATION; if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED) - vres->flags |= DRM_BUDDY_CLEAR_ALLOCATION; + vres->flags |= GPU_BUDDY_CLEAR_ALLOCATION; if (fpfn || lpfn != mgr->mm.size) /* Allocate blocks in desired range */ - vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; + vres->flags |= GPU_BUDDY_RANGE_ALLOCATION; if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC && adev->gmc.gmc_funcs->get_dcc_alignment) @@ -516,7 +517,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, dcc_size = roundup_pow_of_two(vres->base.size + adjust_dcc_size); remaining_size = (u64)dcc_size; - vres->flags |= DRM_BUDDY_TRIM_DISABLE; + vres->flags |= GPU_BUDDY_TRIM_DISABLE; } mutex_lock(&mgr->lock); @@ -536,7 +537,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, BUG_ON(min_block_size < mm->chunk_size); - r = drm_buddy_alloc_blocks(mm, fpfn, + r = gpu_buddy_alloc_blocks(mm, fpfn, lpfn, size, min_block_size, @@ -545,7 +546,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, if (unlikely(r == -ENOSPC) && pages_per_block == ~0ul && !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) { - vres->flags &= ~DRM_BUDDY_CONTIGUOUS_ALLOCATION; + vres->flags &= ~GPU_BUDDY_CONTIGUOUS_ALLOCATION; pages_per_block = max_t(u32, 2UL << (20UL - PAGE_SHIFT), tbo->page_alignment); @@ -566,7 +567,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, list_add_tail(&vres->vres_node, &mgr->allocated_vres_list); if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) { - struct drm_buddy_block *dcc_block; + struct gpu_buddy_block *dcc_block; unsigned long dcc_start; u64 trim_start; @@ -576,7 +577,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block), adjust_dcc_size); trim_start = (u64)dcc_start; - drm_buddy_block_trim(mm, &trim_start, + gpu_buddy_block_trim(mm, &trim_start, (u64)vres->base.size, &vres->blocks); } @@ -614,7 +615,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, return 0; error_free_blocks: - drm_buddy_free_list(mm, &vres->blocks, 0); + gpu_buddy_free_list(mm, &vres->blocks, 0); mutex_unlock(&mgr->lock); error_fini: ttm_resource_fini(man, &vres->base); @@ -637,8 +638,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); - struct drm_buddy *mm = &mgr->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm = &mgr->mm; + struct gpu_buddy_block *block; uint64_t vis_usage = 0; mutex_lock(&mgr->lock); @@ -649,7 +650,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, list_for_each_entry(block, &vres->blocks, link) vis_usage += amdgpu_vram_mgr_vis_size(adev, block); - drm_buddy_free_list(mm, &vres->blocks, vres->flags); + gpu_buddy_free_list(mm, &vres->blocks, vres->flags); amdgpu_vram_mgr_do_reserve(man); mutex_unlock(&mgr->lock); @@ -688,7 +689,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, if (!*sgt) return -ENOMEM; - /* Determine the number of DRM_BUDDY blocks to export */ + /* Determine the number of GPU_BUDDY blocks to export */ amdgpu_res_first(res, offset, length, &cursor); while (cursor.remaining) { num_entries++; @@ -704,10 +705,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, sg->length = 0; /* - * Walk down DRM_BUDDY blocks to populate scatterlist nodes - * @note: Use iterator api to get first the DRM_BUDDY block + * Walk down GPU_BUDDY blocks to populate scatterlist nodes + * @note: Use iterator api to get first the GPU_BUDDY block * and the number of bytes from it. Access the following - * DRM_BUDDY block(s) if more buffer needs to exported + * GPU_BUDDY block(s) if more buffer needs to exported */ amdgpu_res_first(res, offset, length, &cursor); for_each_sgtable_sg((*sgt), sg, i) { @@ -792,10 +793,10 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr) void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev) { struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; - struct drm_buddy *mm = &mgr->mm; + struct gpu_buddy *mm = &mgr->mm; mutex_lock(&mgr->lock); - drm_buddy_reset_clear(mm, false); + gpu_buddy_reset_clear(mm, false); mutex_unlock(&mgr->lock); } @@ -815,7 +816,7 @@ static bool amdgpu_vram_mgr_intersects(struct ttm_resource_manager *man, size_t size) { struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res); - struct drm_buddy_block *block; + struct gpu_buddy_block *block; /* Check each drm buddy block individually */ list_for_each_entry(block, &mgr->blocks, link) { @@ -848,7 +849,7 @@ static bool amdgpu_vram_mgr_compatible(struct ttm_resource_manager *man, size_t size) { struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res); - struct drm_buddy_block *block; + struct gpu_buddy_block *block; /* Check each drm buddy block individually */ list_for_each_entry(block, &mgr->blocks, link) { @@ -877,7 +878,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); - struct drm_buddy *mm = &mgr->mm; + struct gpu_buddy *mm = &mgr->mm; struct amdgpu_vram_reservation *rsv; drm_printf(printer, " vis usage:%llu\n", @@ -930,7 +931,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) mgr->default_page_size = PAGE_SIZE; man->func = &amdgpu_vram_mgr_func; - err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); + err = gpu_buddy_init(&mgr->mm, man->size, PAGE_SIZE); if (err) return err; @@ -965,11 +966,11 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) kfree(rsv); list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) { - drm_buddy_free_list(&mgr->mm, &rsv->allocated, 0); + gpu_buddy_free_list(&mgr->mm, &rsv->allocated, 0); kfree(rsv); } if (!adev->gmc.is_app_apu) - drm_buddy_fini(&mgr->mm); + gpu_buddy_fini(&mgr->mm); mutex_unlock(&mgr->lock); ttm_resource_manager_cleanup(man); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h index 874779618056..429a21a2e9b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h @@ -28,7 +28,7 @@ struct amdgpu_vram_mgr { struct ttm_resource_manager manager; - struct drm_buddy mm; + struct gpu_buddy mm; /* protects access to buffer objects */ struct mutex lock; struct list_head reservations_pending; @@ -57,19 +57,19 @@ struct amdgpu_vram_mgr_resource { struct amdgpu_vres_task task; }; -static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block) +static inline u64 amdgpu_vram_mgr_block_start(struct gpu_buddy_block *block) { - return drm_buddy_block_offset(block); + return gpu_buddy_block_offset(block); } -static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block) +static inline u64 amdgpu_vram_mgr_block_size(struct gpu_buddy_block *block) { - return (u64)PAGE_SIZE << drm_buddy_block_order(block); + return (u64)PAGE_SIZE << gpu_buddy_block_order(block); } -static inline bool amdgpu_vram_mgr_is_cleared(struct drm_buddy_block *block) +static inline bool amdgpu_vram_mgr_is_cleared(struct gpu_buddy_block *block) { - return drm_buddy_block_is_clear(block); + return gpu_buddy_block_is_clear(block); } static inline struct amdgpu_vram_mgr_resource * @@ -82,8 +82,8 @@ static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res) { struct amdgpu_vram_mgr_resource *ares = to_amdgpu_vram_mgr_resource(res); - WARN_ON(ares->flags & DRM_BUDDY_CLEARED); - ares->flags |= DRM_BUDDY_CLEARED; + WARN_ON(ares->flags & GPU_BUDDY_CLEARED); + ares->flags |= GPU_BUDDY_CLEARED; } int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr, diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c new file mode 100644 index 000000000000..841f3de5f307 --- /dev/null +++ b/drivers/gpu/drm/drm_buddy.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include + +#include +#include +#include +#include + +#include +#include +#include + +/** + * drm_buddy_block_print - print block information + * + * @mm: DRM buddy manager + * @block: DRM buddy block + * @p: DRM printer to use + */ +void drm_buddy_block_print(struct gpu_buddy *mm, + struct gpu_buddy_block *block, + struct drm_printer *p) +{ + u64 start = gpu_buddy_block_offset(block); + u64 size = gpu_buddy_block_size(mm, block); + + drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size); +} +EXPORT_SYMBOL(drm_buddy_block_print); + +/** + * drm_buddy_print - print allocator state + * + * @mm: DRM buddy manager + * @p: DRM printer to use + */ +void drm_buddy_print(struct gpu_buddy *mm, struct drm_printer *p) +{ + int order; + + drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n", + mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20); + + for (order = mm->max_order; order >= 0; order--) { + struct gpu_buddy_block *block, *tmp; + struct rb_root *root; + u64 count = 0, free; + unsigned int tree; + + for_each_free_tree(tree) { + root = &mm->free_trees[tree][order]; + + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { + BUG_ON(!gpu_buddy_block_is_free(block)); + count++; + } + } + + drm_printf(p, "order-%2d ", order); + + free = count * (mm->chunk_size << order); + if (free < SZ_1M) + drm_printf(p, "free: %8llu KiB", free >> 10); + else + drm_printf(p, "free: %8llu MiB", free >> 20); + + drm_printf(p, ", blocks: %llu\n", count); + } +} +EXPORT_SYMBOL(drm_buddy_print); + +MODULE_DESCRIPTION("DRM-specific GPU Buddy Allocator Print Helpers"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c index 30246f02bcfe..6a34dae13769 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.c +++ b/drivers/gpu/drm/i915/i915_scatterlist.c @@ -167,9 +167,9 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); const u64 size = res->size; const u32 max_segment = round_down(UINT_MAX, page_alignment); - struct drm_buddy *mm = bman_res->mm; + struct gpu_buddy *mm = bman_res->mm; struct list_head *blocks = &bman_res->blocks; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; struct i915_refct_sgt *rsgt; struct scatterlist *sg; struct sg_table *st; @@ -202,8 +202,8 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, list_for_each_entry(block, blocks, link) { u64 block_size, offset; - block_size = min_t(u64, size, drm_buddy_block_size(mm, block)); - offset = drm_buddy_block_offset(block); + block_size = min_t(u64, size, gpu_buddy_block_size(mm, block)); + offset = gpu_buddy_block_offset(block); while (block_size) { u64 len; diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c index 6b256d95badd..c5ca90088705 100644 --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -16,7 +17,7 @@ struct i915_ttm_buddy_manager { struct ttm_resource_manager manager; - struct drm_buddy mm; + struct gpu_buddy mm; struct list_head reserved; struct mutex lock; unsigned long visible_size; @@ -38,7 +39,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, { struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); struct i915_ttm_buddy_resource *bman_res; - struct drm_buddy *mm = &bman->mm; + struct gpu_buddy *mm = &bman->mm; unsigned long n_pages, lpfn; u64 min_page_size; u64 size; @@ -57,13 +58,13 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, bman_res->mm = mm; if (place->flags & TTM_PL_FLAG_TOPDOWN) - bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; + bman_res->flags |= GPU_BUDDY_TOPDOWN_ALLOCATION; if (place->flags & TTM_PL_FLAG_CONTIGUOUS) - bman_res->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION; + bman_res->flags |= GPU_BUDDY_CONTIGUOUS_ALLOCATION; if (place->fpfn || lpfn != man->size) - bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION; + bman_res->flags |= GPU_BUDDY_RANGE_ALLOCATION; GEM_BUG_ON(!bman_res->base.size); size = bman_res->base.size; @@ -89,7 +90,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, goto err_free_res; } - err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, + err = gpu_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, (u64)lpfn << PAGE_SHIFT, (u64)n_pages << PAGE_SHIFT, min_page_size, @@ -101,15 +102,15 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, if (lpfn <= bman->visible_size) { bman_res->used_visible_size = PFN_UP(bman_res->base.size); } else { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; list_for_each_entry(block, &bman_res->blocks, link) { unsigned long start = - drm_buddy_block_offset(block) >> PAGE_SHIFT; + gpu_buddy_block_offset(block) >> PAGE_SHIFT; if (start < bman->visible_size) { unsigned long end = start + - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); bman_res->used_visible_size += min(end, bman->visible_size) - start; @@ -126,7 +127,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, return 0; err_free_blocks: - drm_buddy_free_list(mm, &bman_res->blocks, 0); + gpu_buddy_free_list(mm, &bman_res->blocks, 0); mutex_unlock(&bman->lock); err_free_res: ttm_resource_fini(man, &bman_res->base); @@ -141,7 +142,7 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man, struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); mutex_lock(&bman->lock); - drm_buddy_free_list(&bman->mm, &bman_res->blocks, 0); + gpu_buddy_free_list(&bman->mm, &bman_res->blocks, 0); bman->visible_avail += bman_res->used_visible_size; mutex_unlock(&bman->lock); @@ -156,8 +157,8 @@ static bool i915_ttm_buddy_man_intersects(struct ttm_resource_manager *man, { struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); - struct drm_buddy *mm = &bman->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm = &bman->mm; + struct gpu_buddy_block *block; if (!place->fpfn && !place->lpfn) return true; @@ -176,9 +177,9 @@ static bool i915_ttm_buddy_man_intersects(struct ttm_resource_manager *man, /* Check each drm buddy block individually */ list_for_each_entry(block, &bman_res->blocks, link) { unsigned long fpfn = - drm_buddy_block_offset(block) >> PAGE_SHIFT; + gpu_buddy_block_offset(block) >> PAGE_SHIFT; unsigned long lpfn = fpfn + - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); if (place->fpfn < lpfn && place->lpfn > fpfn) return true; @@ -194,8 +195,8 @@ static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man, { struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); - struct drm_buddy *mm = &bman->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm = &bman->mm; + struct gpu_buddy_block *block; if (!place->fpfn && !place->lpfn) return true; @@ -209,9 +210,9 @@ static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man, /* Check each drm buddy block individually */ list_for_each_entry(block, &bman_res->blocks, link) { unsigned long fpfn = - drm_buddy_block_offset(block) >> PAGE_SHIFT; + gpu_buddy_block_offset(block) >> PAGE_SHIFT; unsigned long lpfn = fpfn + - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); if (fpfn < place->fpfn || lpfn > place->lpfn) return false; @@ -224,7 +225,7 @@ static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); - struct drm_buddy_block *block; + struct gpu_buddy_block *block; mutex_lock(&bman->lock); drm_printf(printer, "default_page_size: %lluKiB\n", @@ -293,7 +294,7 @@ int i915_ttm_buddy_man_init(struct ttm_device *bdev, if (!bman) return -ENOMEM; - err = drm_buddy_init(&bman->mm, size, chunk_size); + err = gpu_buddy_init(&bman->mm, size, chunk_size); if (err) goto err_free_bman; @@ -333,7 +334,7 @@ int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type) { struct ttm_resource_manager *man = ttm_manager_type(bdev, type); struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); - struct drm_buddy *mm = &bman->mm; + struct gpu_buddy *mm = &bman->mm; int ret; ttm_resource_manager_set_used(man, false); @@ -345,8 +346,8 @@ int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type) ttm_set_driver_manager(bdev, type, NULL); mutex_lock(&bman->lock); - drm_buddy_free_list(mm, &bman->reserved, 0); - drm_buddy_fini(mm); + gpu_buddy_free_list(mm, &bman->reserved, 0); + gpu_buddy_fini(mm); bman->visible_avail += bman->visible_reserved; WARN_ON_ONCE(bman->visible_avail != bman->visible_size); mutex_unlock(&bman->lock); @@ -371,15 +372,15 @@ int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man, u64 start, u64 size) { struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); - struct drm_buddy *mm = &bman->mm; + struct gpu_buddy *mm = &bman->mm; unsigned long fpfn = start >> PAGE_SHIFT; unsigned long flags = 0; int ret; - flags |= DRM_BUDDY_RANGE_ALLOCATION; + flags |= GPU_BUDDY_RANGE_ALLOCATION; mutex_lock(&bman->lock); - ret = drm_buddy_alloc_blocks(mm, start, + ret = gpu_buddy_alloc_blocks(mm, start, start + size, size, mm->chunk_size, &bman->reserved, diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h index d64620712830..1cff018c1689 100644 --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h @@ -13,7 +13,7 @@ struct ttm_device; struct ttm_resource_manager; -struct drm_buddy; +struct gpu_buddy; /** * struct i915_ttm_buddy_resource @@ -33,7 +33,7 @@ struct i915_ttm_buddy_resource { struct list_head blocks; unsigned long flags; unsigned long used_visible_size; - struct drm_buddy *mm; + struct gpu_buddy *mm; }; /** diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 7b856b5090f9..8307390943a2 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include "../i915_selftest.h" @@ -371,7 +371,7 @@ static int igt_mock_splintered_region(void *arg) struct drm_i915_private *i915 = mem->i915; struct i915_ttm_buddy_resource *res; struct drm_i915_gem_object *obj; - struct drm_buddy *mm; + struct gpu_buddy *mm; unsigned int expected_order; LIST_HEAD(objects); u64 size; @@ -447,8 +447,8 @@ static int igt_mock_max_segment(void *arg) struct drm_i915_private *i915 = mem->i915; struct i915_ttm_buddy_resource *res; struct drm_i915_gem_object *obj; - struct drm_buddy_block *block; - struct drm_buddy *mm; + struct gpu_buddy_block *block; + struct gpu_buddy *mm; struct list_head *blocks; struct scatterlist *sg; I915_RND_STATE(prng); @@ -487,8 +487,8 @@ static int igt_mock_max_segment(void *arg) mm = res->mm; size = 0; list_for_each_entry(block, blocks, link) { - if (drm_buddy_block_size(mm, block) > size) - size = drm_buddy_block_size(mm, block); + if (gpu_buddy_block_size(mm, block) > size) + size = gpu_buddy_block_size(mm, block); } if (size < max_segment) { pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n", @@ -527,14 +527,14 @@ static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj) struct intel_memory_region *mr = obj->mm.region; struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(obj->mm.res); - struct drm_buddy *mm = bman_res->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm = bman_res->mm; + struct gpu_buddy_block *block; u64 total; total = 0; list_for_each_entry(block, &bman_res->blocks, link) { - u64 start = drm_buddy_block_offset(block); - u64 end = start + drm_buddy_block_size(mm, block); + u64 start = gpu_buddy_block_offset(block); + u64 end = start + gpu_buddy_block_size(mm, block); if (start < resource_size(&mr->io)) total += min_t(u64, end, resource_size(&mr->io)) - start; diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c index 6d95447a989d..e32f3c8d7b84 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c @@ -251,7 +251,7 @@ static void ttm_bo_validate_basic(struct kunit *test) NULL, &dummy_ttm_bo_destroy); KUNIT_EXPECT_EQ(test, err, 0); - snd_place = ttm_place_kunit_init(test, snd_mem, DRM_BUDDY_TOPDOWN_ALLOCATION); + snd_place = ttm_place_kunit_init(test, snd_mem, GPU_BUDDY_TOPDOWN_ALLOCATION); snd_placement = ttm_placement_kunit_init(test, snd_place, 1); err = ttm_bo_validate(bo, snd_placement, &ctx_val); @@ -263,7 +263,7 @@ static void ttm_bo_validate_basic(struct kunit *test) KUNIT_EXPECT_TRUE(test, ttm_tt_is_populated(bo->ttm)); KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem); KUNIT_EXPECT_EQ(test, bo->resource->placement, - DRM_BUDDY_TOPDOWN_ALLOCATION); + GPU_BUDDY_TOPDOWN_ALLOCATION); ttm_bo_fini(bo); ttm_mock_manager_fini(priv->ttm_dev, snd_mem); diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c index dd395229e388..294d56d9067e 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c +++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c @@ -31,7 +31,7 @@ static int ttm_mock_manager_alloc(struct ttm_resource_manager *man, { struct ttm_mock_manager *manager = to_mock_mgr(man); struct ttm_mock_resource *mock_res; - struct drm_buddy *mm = &manager->mm; + struct gpu_buddy *mm = &manager->mm; u64 lpfn, fpfn, alloc_size; int err; @@ -47,14 +47,14 @@ static int ttm_mock_manager_alloc(struct ttm_resource_manager *man, INIT_LIST_HEAD(&mock_res->blocks); if (place->flags & TTM_PL_FLAG_TOPDOWN) - mock_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; + mock_res->flags |= GPU_BUDDY_TOPDOWN_ALLOCATION; if (place->flags & TTM_PL_FLAG_CONTIGUOUS) - mock_res->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION; + mock_res->flags |= GPU_BUDDY_CONTIGUOUS_ALLOCATION; alloc_size = (uint64_t)mock_res->base.size; mutex_lock(&manager->lock); - err = drm_buddy_alloc_blocks(mm, fpfn, lpfn, alloc_size, + err = gpu_buddy_alloc_blocks(mm, fpfn, lpfn, alloc_size, manager->default_page_size, &mock_res->blocks, mock_res->flags); @@ -67,7 +67,7 @@ static int ttm_mock_manager_alloc(struct ttm_resource_manager *man, return 0; error_free_blocks: - drm_buddy_free_list(mm, &mock_res->blocks, 0); + gpu_buddy_free_list(mm, &mock_res->blocks, 0); ttm_resource_fini(man, &mock_res->base); mutex_unlock(&manager->lock); @@ -79,10 +79,10 @@ static void ttm_mock_manager_free(struct ttm_resource_manager *man, { struct ttm_mock_manager *manager = to_mock_mgr(man); struct ttm_mock_resource *mock_res = to_mock_mgr_resource(res); - struct drm_buddy *mm = &manager->mm; + struct gpu_buddy *mm = &manager->mm; mutex_lock(&manager->lock); - drm_buddy_free_list(mm, &mock_res->blocks, 0); + gpu_buddy_free_list(mm, &mock_res->blocks, 0); mutex_unlock(&manager->lock); ttm_resource_fini(man, res); @@ -106,7 +106,7 @@ int ttm_mock_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size) mutex_init(&manager->lock); - err = drm_buddy_init(&manager->mm, size, PAGE_SIZE); + err = gpu_buddy_init(&manager->mm, size, PAGE_SIZE); if (err) { kfree(manager); @@ -142,7 +142,7 @@ void ttm_mock_manager_fini(struct ttm_device *bdev, u32 mem_type) ttm_resource_manager_set_used(man, false); mutex_lock(&mock_man->lock); - drm_buddy_fini(&mock_man->mm); + gpu_buddy_fini(&mock_man->mm); mutex_unlock(&mock_man->lock); ttm_set_driver_manager(bdev, mem_type, NULL); diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h index 96ea8c9aae34..08710756fd8e 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h +++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h @@ -9,7 +9,7 @@ struct ttm_mock_manager { struct ttm_resource_manager man; - struct drm_buddy mm; + struct gpu_buddy mm; u64 default_page_size; /* protects allocations of mock buffer objects */ struct mutex lock; diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res_cursor.h index 4e00008b7081..5f4ab08c0686 100644 --- a/drivers/gpu/drm/xe/xe_res_cursor.h +++ b/drivers/gpu/drm/xe/xe_res_cursor.h @@ -58,7 +58,7 @@ struct xe_res_cursor { /** @dma_addr: Current element in a struct drm_pagemap_addr array */ const struct drm_pagemap_addr *dma_addr; /** @mm: Buddy allocator for VRAM cursor */ - struct drm_buddy *mm; + struct gpu_buddy *mm; /** * @dma_start: DMA start address for the current segment. * This may be different to @dma_addr.addr since elements in @@ -69,7 +69,7 @@ struct xe_res_cursor { u64 dma_seg_size; }; -static struct drm_buddy *xe_res_get_buddy(struct ttm_resource *res) +static struct gpu_buddy *xe_res_get_buddy(struct ttm_resource *res) { struct ttm_resource_manager *mgr; @@ -104,30 +104,30 @@ static inline void xe_res_first(struct ttm_resource *res, case XE_PL_STOLEN: case XE_PL_VRAM0: case XE_PL_VRAM1: { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; struct list_head *head, *next; - struct drm_buddy *mm = xe_res_get_buddy(res); + struct gpu_buddy *mm = xe_res_get_buddy(res); head = &to_xe_ttm_vram_mgr_resource(res)->blocks; block = list_first_entry_or_null(head, - struct drm_buddy_block, + struct gpu_buddy_block, link); if (!block) goto fallback; - while (start >= drm_buddy_block_size(mm, block)) { - start -= drm_buddy_block_size(mm, block); + while (start >= gpu_buddy_block_size(mm, block)) { + start -= gpu_buddy_block_size(mm, block); next = block->link.next; if (next != head) - block = list_entry(next, struct drm_buddy_block, + block = list_entry(next, struct gpu_buddy_block, link); } cur->mm = mm; - cur->start = drm_buddy_block_offset(block) + start; - cur->size = min(drm_buddy_block_size(mm, block) - start, + cur->start = gpu_buddy_block_offset(block) + start; + cur->size = min(gpu_buddy_block_size(mm, block) - start, size); cur->remaining = size; cur->node = block; @@ -259,7 +259,7 @@ static inline void xe_res_first_dma(const struct drm_pagemap_addr *dma_addr, */ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; struct list_head *next; u64 start; @@ -295,18 +295,18 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size) block = cur->node; next = block->link.next; - block = list_entry(next, struct drm_buddy_block, link); + block = list_entry(next, struct gpu_buddy_block, link); - while (start >= drm_buddy_block_size(cur->mm, block)) { - start -= drm_buddy_block_size(cur->mm, block); + while (start >= gpu_buddy_block_size(cur->mm, block)) { + start -= gpu_buddy_block_size(cur->mm, block); next = block->link.next; - block = list_entry(next, struct drm_buddy_block, link); + block = list_entry(next, struct gpu_buddy_block, link); } - cur->start = drm_buddy_block_offset(block) + start; - cur->size = min(drm_buddy_block_size(cur->mm, block) - start, + cur->start = gpu_buddy_block_offset(block) + start; + cur->size = min(gpu_buddy_block_size(cur->mm, block) - start, cur->remaining); cur->node = block; break; diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 213f0334518a..cda3bf7e2418 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -747,7 +747,7 @@ static u64 block_offset_to_pfn(struct drm_pagemap *dpagemap, u64 offset) return PHYS_PFN(offset + xpagemap->hpa_base); } -static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram) +static struct gpu_buddy *vram_to_buddy(struct xe_vram_region *vram) { return &vram->ttm.mm; } @@ -758,17 +758,17 @@ static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocati struct xe_bo *bo = to_xe_bo(devmem_allocation); struct ttm_resource *res = bo->ttm.resource; struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; int j = 0; list_for_each_entry(block, blocks, link) { struct xe_vram_region *vr = block->private; - struct drm_buddy *buddy = vram_to_buddy(vr); + struct gpu_buddy *buddy = vram_to_buddy(vr); u64 block_pfn = block_offset_to_pfn(devmem_allocation->dpagemap, - drm_buddy_block_offset(block)); + gpu_buddy_block_offset(block)); int i; - for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i) + for (i = 0; i < gpu_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i) pfn[j++] = block_pfn + i; } @@ -1033,7 +1033,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, struct dma_fence *pre_migrate_fence = NULL; struct xe_device *xe = vr->xe; struct device *dev = xe->drm.dev; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; struct xe_validation_ctx vctx; struct list_head *blocks; struct drm_exec exec; diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c index 6553a19f7cf2..d119217d566a 100644 --- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c @@ -6,6 +6,7 @@ #include #include +#include #include #include @@ -16,16 +17,16 @@ #include "xe_ttm_vram_mgr.h" #include "xe_vram_types.h" -static inline struct drm_buddy_block * +static inline struct gpu_buddy_block * xe_ttm_vram_mgr_first_block(struct list_head *list) { - return list_first_entry_or_null(list, struct drm_buddy_block, link); + return list_first_entry_or_null(list, struct gpu_buddy_block, link); } -static inline bool xe_is_vram_mgr_blocks_contiguous(struct drm_buddy *mm, +static inline bool xe_is_vram_mgr_blocks_contiguous(struct gpu_buddy *mm, struct list_head *head) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; u64 start, size; block = xe_ttm_vram_mgr_first_block(head); @@ -33,12 +34,12 @@ static inline bool xe_is_vram_mgr_blocks_contiguous(struct drm_buddy *mm, return false; while (head != block->link.next) { - start = drm_buddy_block_offset(block); - size = drm_buddy_block_size(mm, block); + start = gpu_buddy_block_offset(block); + size = gpu_buddy_block_size(mm, block); - block = list_entry(block->link.next, struct drm_buddy_block, + block = list_entry(block->link.next, struct gpu_buddy_block, link); - if (start + size != drm_buddy_block_offset(block)) + if (start + size != gpu_buddy_block_offset(block)) return false; } @@ -52,7 +53,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man, { struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man); struct xe_ttm_vram_mgr_resource *vres; - struct drm_buddy *mm = &mgr->mm; + struct gpu_buddy *mm = &mgr->mm; u64 size, min_page_size; unsigned long lpfn; int err; @@ -79,10 +80,10 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man, INIT_LIST_HEAD(&vres->blocks); if (place->flags & TTM_PL_FLAG_TOPDOWN) - vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; + vres->flags |= GPU_BUDDY_TOPDOWN_ALLOCATION; if (place->fpfn || lpfn != man->size >> PAGE_SHIFT) - vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; + vres->flags |= GPU_BUDDY_RANGE_ALLOCATION; if (WARN_ON(!vres->base.size)) { err = -EINVAL; @@ -118,27 +119,27 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man, lpfn = max_t(unsigned long, place->fpfn + (size >> PAGE_SHIFT), lpfn); } - err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, + err = gpu_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, (u64)lpfn << PAGE_SHIFT, size, min_page_size, &vres->blocks, vres->flags); if (err) goto error_unlock; if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { - if (!drm_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks)) + if (!gpu_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks)) size = vres->base.size; } if (lpfn <= mgr->visible_size >> PAGE_SHIFT) { vres->used_visible_size = size; } else { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; list_for_each_entry(block, &vres->blocks, link) { - u64 start = drm_buddy_block_offset(block); + u64 start = gpu_buddy_block_offset(block); if (start < mgr->visible_size) { - u64 end = start + drm_buddy_block_size(mm, block); + u64 end = start + gpu_buddy_block_size(mm, block); vres->used_visible_size += min(end, mgr->visible_size) - start; @@ -158,11 +159,11 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man, * the object. */ if (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS) { - struct drm_buddy_block *block = list_first_entry(&vres->blocks, + struct gpu_buddy_block *block = list_first_entry(&vres->blocks, typeof(*block), link); - vres->base.start = drm_buddy_block_offset(block) >> PAGE_SHIFT; + vres->base.start = gpu_buddy_block_offset(block) >> PAGE_SHIFT; } else { vres->base.start = XE_BO_INVALID_OFFSET; } @@ -184,10 +185,10 @@ static void xe_ttm_vram_mgr_del(struct ttm_resource_manager *man, struct xe_ttm_vram_mgr_resource *vres = to_xe_ttm_vram_mgr_resource(res); struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man); - struct drm_buddy *mm = &mgr->mm; + struct gpu_buddy *mm = &mgr->mm; mutex_lock(&mgr->lock); - drm_buddy_free_list(mm, &vres->blocks, 0); + gpu_buddy_free_list(mm, &vres->blocks, 0); mgr->visible_avail += vres->used_visible_size; mutex_unlock(&mgr->lock); @@ -200,7 +201,7 @@ static void xe_ttm_vram_mgr_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man); - struct drm_buddy *mm = &mgr->mm; + struct gpu_buddy *mm = &mgr->mm; mutex_lock(&mgr->lock); drm_printf(printer, "default_page_size: %lluKiB\n", @@ -223,8 +224,8 @@ static bool xe_ttm_vram_mgr_intersects(struct ttm_resource_manager *man, struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man); struct xe_ttm_vram_mgr_resource *vres = to_xe_ttm_vram_mgr_resource(res); - struct drm_buddy *mm = &mgr->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm = &mgr->mm; + struct gpu_buddy_block *block; if (!place->fpfn && !place->lpfn) return true; @@ -234,9 +235,9 @@ static bool xe_ttm_vram_mgr_intersects(struct ttm_resource_manager *man, list_for_each_entry(block, &vres->blocks, link) { unsigned long fpfn = - drm_buddy_block_offset(block) >> PAGE_SHIFT; + gpu_buddy_block_offset(block) >> PAGE_SHIFT; unsigned long lpfn = fpfn + - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); if (place->fpfn < lpfn && place->lpfn > fpfn) return true; @@ -253,8 +254,8 @@ static bool xe_ttm_vram_mgr_compatible(struct ttm_resource_manager *man, struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man); struct xe_ttm_vram_mgr_resource *vres = to_xe_ttm_vram_mgr_resource(res); - struct drm_buddy *mm = &mgr->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm = &mgr->mm; + struct gpu_buddy_block *block; if (!place->fpfn && !place->lpfn) return true; @@ -264,9 +265,9 @@ static bool xe_ttm_vram_mgr_compatible(struct ttm_resource_manager *man, list_for_each_entry(block, &vres->blocks, link) { unsigned long fpfn = - drm_buddy_block_offset(block) >> PAGE_SHIFT; + gpu_buddy_block_offset(block) >> PAGE_SHIFT; unsigned long lpfn = fpfn + - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); if (fpfn < place->fpfn || lpfn > place->lpfn) return false; @@ -296,7 +297,7 @@ static void xe_ttm_vram_mgr_fini(struct drm_device *dev, void *arg) WARN_ON_ONCE(mgr->visible_avail != mgr->visible_size); - drm_buddy_fini(&mgr->mm); + gpu_buddy_fini(&mgr->mm); ttm_resource_manager_cleanup(&mgr->manager); @@ -327,7 +328,7 @@ int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr, mgr->visible_avail = io_size; ttm_resource_manager_init(man, &xe->ttm, size); - err = drm_buddy_init(&mgr->mm, man->size, default_page_size); + err = gpu_buddy_init(&mgr->mm, man->size, default_page_size); if (err) return err; @@ -375,7 +376,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe, if (!*sgt) return -ENOMEM; - /* Determine the number of DRM_BUDDY blocks to export */ + /* Determine the number of GPU_BUDDY blocks to export */ xe_res_first(res, offset, length, &cursor); while (cursor.remaining) { num_entries++; @@ -392,10 +393,10 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe, sg->length = 0; /* - * Walk down DRM_BUDDY blocks to populate scatterlist nodes - * @note: Use iterator api to get first the DRM_BUDDY block + * Walk down GPU_BUDDY blocks to populate scatterlist nodes + * @note: Use iterator api to get first the GPU_BUDDY block * and the number of bytes from it. Access the following - * DRM_BUDDY block(s) if more buffer needs to exported + * GPU_BUDDY block(s) if more buffer needs to exported */ xe_res_first(res, offset, length, &cursor); for_each_sgtable_sg((*sgt), sg, i) { diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h index babeec5511d9..9106da056b49 100644 --- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h +++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h @@ -18,7 +18,7 @@ struct xe_ttm_vram_mgr { /** @manager: Base TTM resource manager */ struct ttm_resource_manager manager; /** @mm: DRM buddy allocator which manages the VRAM */ - struct drm_buddy mm; + struct gpu_buddy mm; /** @visible_size: Proped size of the CPU visible portion */ u64 visible_size; /** @visible_avail: CPU visible portion still unallocated */ diff --git a/drivers/gpu/tests/Makefile b/drivers/gpu/tests/Makefile index 8e7654e87d82..4183e6e2de45 100644 --- a/drivers/gpu/tests/Makefile +++ b/drivers/gpu/tests/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 gpu_buddy_tests-y = gpu_buddy_test.o gpu_random.o -obj-$(CONFIG_DRM_KUNIT_TEST) += gpu_buddy_tests.o +obj-$(CONFIG_GPU_BUDDY_KUNIT_TEST) += gpu_buddy_tests.o diff --git a/drivers/gpu/tests/gpu_buddy_test.c b/drivers/gpu/tests/gpu_buddy_test.c index b905932da990..450e71deed90 100644 --- a/drivers/gpu/tests/gpu_buddy_test.c +++ b/drivers/gpu/tests/gpu_buddy_test.c @@ -21,9 +21,9 @@ static inline u64 get_size(int order, u64 chunk_size) return (1 << order) * chunk_size; } -static void drm_test_buddy_fragmentation_performance(struct kunit *test) +static void gpu_test_buddy_fragmentation_performance(struct kunit *test) { - struct drm_buddy_block *block, *tmp; + struct gpu_buddy_block *block, *tmp; int num_blocks, i, ret, count = 0; LIST_HEAD(allocated_blocks); unsigned long elapsed_ms; @@ -32,7 +32,7 @@ static void drm_test_buddy_fragmentation_performance(struct kunit *test) LIST_HEAD(clear_list); LIST_HEAD(dirty_list); LIST_HEAD(free_list); - struct drm_buddy mm; + struct gpu_buddy mm; u64 mm_size = SZ_4G; ktime_t start, end; @@ -47,7 +47,7 @@ static void drm_test_buddy_fragmentation_performance(struct kunit *test) * quickly the allocator can satisfy larger, aligned requests from a pool of * highly fragmented space. */ - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), "buddy_init failed\n"); num_blocks = mm_size / SZ_64K; @@ -55,7 +55,7 @@ static void drm_test_buddy_fragmentation_performance(struct kunit *test) start = ktime_get(); /* Allocate with maximum fragmentation - 8K blocks with 64K alignment */ for (i = 0; i < num_blocks; i++) - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K, &allocated_blocks, 0), "buddy_alloc hit an error size=%u\n", SZ_8K); @@ -68,21 +68,21 @@ static void drm_test_buddy_fragmentation_performance(struct kunit *test) } /* Free with different flags to ensure no coalescing */ - drm_buddy_free_list(&mm, &clear_list, DRM_BUDDY_CLEARED); - drm_buddy_free_list(&mm, &dirty_list, 0); + gpu_buddy_free_list(&mm, &clear_list, GPU_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &dirty_list, 0); for (i = 0; i < num_blocks; i++) - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_64K, SZ_64K, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_64K, SZ_64K, &test_blocks, 0), "buddy_alloc hit an error size=%u\n", SZ_64K); - drm_buddy_free_list(&mm, &test_blocks, 0); + gpu_buddy_free_list(&mm, &test_blocks, 0); end = ktime_get(); elapsed_ms = ktime_to_ms(ktime_sub(end, start)); kunit_info(test, "Fragmented allocation took %lu ms\n", elapsed_ms); - drm_buddy_fini(&mm); + gpu_buddy_fini(&mm); /* * Reverse free order under fragmentation @@ -96,13 +96,13 @@ static void drm_test_buddy_fragmentation_performance(struct kunit *test) * deallocation occurs in the opposite order of allocation, exposing the * cost difference between a linear freelist scan and an ordered tree lookup. */ - ret = drm_buddy_init(&mm, mm_size, SZ_4K); + ret = gpu_buddy_init(&mm, mm_size, SZ_4K); KUNIT_ASSERT_EQ(test, ret, 0); start = ktime_get(); /* Allocate maximum fragmentation */ for (i = 0; i < num_blocks; i++) - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K, &allocated_blocks, 0), "buddy_alloc hit an error size=%u\n", SZ_8K); @@ -111,28 +111,28 @@ static void drm_test_buddy_fragmentation_performance(struct kunit *test) list_move_tail(&block->link, &free_list); count++; } - drm_buddy_free_list(&mm, &free_list, DRM_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &free_list, GPU_BUDDY_CLEARED); list_for_each_entry_safe_reverse(block, tmp, &allocated_blocks, link) list_move(&block->link, &reverse_list); - drm_buddy_free_list(&mm, &reverse_list, DRM_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &reverse_list, GPU_BUDDY_CLEARED); end = ktime_get(); elapsed_ms = ktime_to_ms(ktime_sub(end, start)); kunit_info(test, "Reverse-ordered free took %lu ms\n", elapsed_ms); - drm_buddy_fini(&mm); + gpu_buddy_fini(&mm); } -static void drm_test_buddy_alloc_range_bias(struct kunit *test) +static void gpu_test_buddy_alloc_range_bias(struct kunit *test) { u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem; - DRM_RND_STATE(prng, random_seed); + GPU_RND_STATE(prng, random_seed); unsigned int i, count, *order; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; unsigned long flags; - struct drm_buddy mm; + struct gpu_buddy mm; LIST_HEAD(allocated); bias_size = SZ_1M; @@ -142,11 +142,11 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test) kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, ps), "buddy_init failed\n"); count = mm_size / bias_size; - order = drm_random_order(count, &prng); + order = gpu_random_order(count, &prng); KUNIT_EXPECT_TRUE(test, order); /* @@ -166,79 +166,79 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test) /* internal round_up too big */ KUNIT_ASSERT_TRUE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, bias_size + ps, bias_size, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", bias_start, bias_end, bias_size, bias_size); /* size too big */ KUNIT_ASSERT_TRUE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, bias_size + ps, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", bias_start, bias_end, bias_size + ps, ps); /* bias range too small for size */ KUNIT_ASSERT_TRUE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start + ps, + gpu_buddy_alloc_blocks(&mm, bias_start + ps, bias_end, bias_size, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", bias_start + ps, bias_end, bias_size, ps); /* bias misaligned */ KUNIT_ASSERT_TRUE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start + ps, + gpu_buddy_alloc_blocks(&mm, bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n", bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1); /* single big page */ KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, bias_size, bias_size, &tmp, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n", bias_start, bias_end, bias_size, bias_size); - drm_buddy_free_list(&mm, &tmp, 0); + gpu_buddy_free_list(&mm, &tmp, 0); /* single page with internal round_up */ KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, ps, bias_size, &tmp, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", bias_start, bias_end, ps, bias_size); - drm_buddy_free_list(&mm, &tmp, 0); + gpu_buddy_free_list(&mm, &tmp, 0); /* random size within */ size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); if (size) KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, size, ps, &tmp, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", bias_start, bias_end, size, ps); bias_rem -= size; /* too big for current avail */ KUNIT_ASSERT_TRUE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, bias_rem + ps, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", bias_start, bias_end, bias_rem + ps, ps); @@ -248,10 +248,10 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test) size = max(size, ps); KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, size, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", bias_start, bias_end, size, ps); /* @@ -259,15 +259,15 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test) * unallocated, and ideally not always on the bias * boundaries. */ - drm_buddy_free_list(&mm, &tmp, 0); + gpu_buddy_free_list(&mm, &tmp, 0); } else { list_splice_tail(&tmp, &allocated); } } kfree(order); - drm_buddy_free_list(&mm, &allocated, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &allocated, 0); + gpu_buddy_fini(&mm); /* * Something more free-form. Idea is to pick a random starting bias @@ -278,7 +278,7 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test) * allocated nodes in the middle of the address space. */ - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, ps), "buddy_init failed\n"); bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps); @@ -290,10 +290,10 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test) u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, size, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", bias_start, bias_end, size, ps); bias_rem -= size; @@ -319,24 +319,24 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test) KUNIT_ASSERT_EQ(test, bias_start, 0); KUNIT_ASSERT_EQ(test, bias_end, mm_size); KUNIT_ASSERT_TRUE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, bias_end, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, ps, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc passed with bias(%x-%x), size=%u\n", bias_start, bias_end, ps); - drm_buddy_free_list(&mm, &allocated, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &allocated, 0); + gpu_buddy_fini(&mm); /* - * Allocate cleared blocks in the bias range when the DRM buddy's clear avail is + * Allocate cleared blocks in the bias range when the GPU buddy's clear avail is * zero. This will validate the bias range allocation in scenarios like system boot * when no cleared blocks are available and exercise the fallback path too. The resulting * blocks should always be dirty. */ - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, ps), "buddy_init failed\n"); bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps); @@ -344,11 +344,11 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test) bias_end = max(bias_end, bias_start + ps); bias_rem = bias_end - bias_start; - flags = DRM_BUDDY_CLEAR_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION; + flags = GPU_BUDDY_CLEAR_ALLOCATION | GPU_BUDDY_RANGE_ALLOCATION; size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, size, ps, &allocated, flags), @@ -356,27 +356,27 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test) bias_start, bias_end, size, ps); list_for_each_entry(block, &allocated, link) - KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false); + KUNIT_EXPECT_EQ(test, gpu_buddy_block_is_clear(block), false); - drm_buddy_free_list(&mm, &allocated, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &allocated, 0); + gpu_buddy_fini(&mm); } -static void drm_test_buddy_alloc_clear(struct kunit *test) +static void gpu_test_buddy_alloc_clear(struct kunit *test) { unsigned long n_pages, total, i = 0; const unsigned long ps = SZ_4K; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; const int max_order = 12; LIST_HEAD(allocated); - struct drm_buddy mm; + struct gpu_buddy mm; unsigned int order; u32 mm_size, size; LIST_HEAD(dirty); LIST_HEAD(clean); mm_size = SZ_4K << max_order; - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps)); KUNIT_EXPECT_EQ(test, mm.max_order, max_order); @@ -389,11 +389,11 @@ static void drm_test_buddy_alloc_clear(struct kunit *test) * is indeed all dirty pages and vice versa. Free it all again, * keeping the dirty/clear status. */ - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 5 * ps, ps, &allocated, - DRM_BUDDY_TOPDOWN_ALLOCATION), + GPU_BUDDY_TOPDOWN_ALLOCATION), "buddy_alloc hit an error size=%lu\n", 5 * ps); - drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &allocated, GPU_BUDDY_CLEARED); n_pages = 10; do { @@ -406,37 +406,37 @@ static void drm_test_buddy_alloc_clear(struct kunit *test) flags = 0; } else { list = &clean; - flags = DRM_BUDDY_CLEAR_ALLOCATION; + flags = GPU_BUDDY_CLEAR_ALLOCATION; } - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, ps, ps, list, flags), "buddy_alloc hit an error size=%lu\n", ps); } while (++i < n_pages); list_for_each_entry(block, &clean, link) - KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true); + KUNIT_EXPECT_EQ(test, gpu_buddy_block_is_clear(block), true); list_for_each_entry(block, &dirty, link) - KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false); + KUNIT_EXPECT_EQ(test, gpu_buddy_block_is_clear(block), false); - drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &clean, GPU_BUDDY_CLEARED); /* * Trying to go over the clear limit for some allocation. * The allocation should never fail with reasonable page-size. */ - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 10 * ps, ps, &clean, - DRM_BUDDY_CLEAR_ALLOCATION), + GPU_BUDDY_CLEAR_ALLOCATION), "buddy_alloc hit an error size=%lu\n", 10 * ps); - drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); - drm_buddy_free_list(&mm, &dirty, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &clean, GPU_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &dirty, 0); + gpu_buddy_fini(&mm); - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps)); /* * Create a new mm. Intentionally fragment the address space by creating @@ -458,34 +458,34 @@ static void drm_test_buddy_alloc_clear(struct kunit *test) else list = &clean; - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, ps, ps, list, 0), "buddy_alloc hit an error size=%lu\n", ps); } while (++i < n_pages); - drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); - drm_buddy_free_list(&mm, &dirty, 0); + gpu_buddy_free_list(&mm, &clean, GPU_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &dirty, 0); order = 1; do { size = SZ_4K << order; - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, size, size, &allocated, - DRM_BUDDY_CLEAR_ALLOCATION), + GPU_BUDDY_CLEAR_ALLOCATION), "buddy_alloc hit an error size=%u\n", size); total = 0; list_for_each_entry(block, &allocated, link) { if (size != mm_size) - KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false); - total += drm_buddy_block_size(&mm, block); + KUNIT_EXPECT_EQ(test, gpu_buddy_block_is_clear(block), false); + total += gpu_buddy_block_size(&mm, block); } KUNIT_EXPECT_EQ(test, total, size); - drm_buddy_free_list(&mm, &allocated, 0); + gpu_buddy_free_list(&mm, &allocated, 0); } while (++order <= max_order); - drm_buddy_fini(&mm); + gpu_buddy_fini(&mm); /* * Create a new mm with a non power-of-two size. Allocate a random size from each @@ -494,44 +494,44 @@ static void drm_test_buddy_alloc_clear(struct kunit *test) */ mm_size = (SZ_4K << max_order) + (SZ_4K << (max_order - 2)); - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps)); KUNIT_EXPECT_EQ(test, mm.max_order, max_order); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order, 4 * ps, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc hit an error size=%lu\n", 4 * ps); - drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order, + gpu_buddy_free_list(&mm, &allocated, GPU_BUDDY_CLEARED); + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order, 2 * ps, ps, &allocated, - DRM_BUDDY_CLEAR_ALLOCATION), + GPU_BUDDY_CLEAR_ALLOCATION), "buddy_alloc hit an error size=%lu\n", 2 * ps); - drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, SZ_4K << max_order, mm_size, + gpu_buddy_free_list(&mm, &allocated, GPU_BUDDY_CLEARED); + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, SZ_4K << max_order, mm_size, ps, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc hit an error size=%lu\n", ps); - drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &allocated, GPU_BUDDY_CLEARED); + gpu_buddy_fini(&mm); } -static void drm_test_buddy_alloc_contiguous(struct kunit *test) +static void gpu_test_buddy_alloc_contiguous(struct kunit *test) { const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K; unsigned long i, n_pages, total; - struct drm_buddy_block *block; - struct drm_buddy mm; + struct gpu_buddy_block *block; + struct gpu_buddy mm; LIST_HEAD(left); LIST_HEAD(middle); LIST_HEAD(right); LIST_HEAD(allocated); - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps)); /* * Idea is to fragment the address space by alternating block * allocations between three different lists; one for left, middle and * right. We can then free a list to simulate fragmentation. In - * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION, + * particular we want to exercise the GPU_BUDDY_CONTIGUOUS_ALLOCATION, * including the try_harder path. */ @@ -548,66 +548,66 @@ static void drm_test_buddy_alloc_contiguous(struct kunit *test) else list = &right; KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, 0, mm_size, + gpu_buddy_alloc_blocks(&mm, 0, mm_size, ps, ps, list, 0), "buddy_alloc hit an error size=%lu\n", ps); } while (++i < n_pages); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 3 * ps, ps, &allocated, - DRM_BUDDY_CONTIGUOUS_ALLOCATION), + GPU_BUDDY_CONTIGUOUS_ALLOCATION), "buddy_alloc didn't error size=%lu\n", 3 * ps); - drm_buddy_free_list(&mm, &middle, 0); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + gpu_buddy_free_list(&mm, &middle, 0); + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 3 * ps, ps, &allocated, - DRM_BUDDY_CONTIGUOUS_ALLOCATION), + GPU_BUDDY_CONTIGUOUS_ALLOCATION), "buddy_alloc didn't error size=%lu\n", 3 * ps); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 2 * ps, ps, &allocated, - DRM_BUDDY_CONTIGUOUS_ALLOCATION), + GPU_BUDDY_CONTIGUOUS_ALLOCATION), "buddy_alloc didn't error size=%lu\n", 2 * ps); - drm_buddy_free_list(&mm, &right, 0); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + gpu_buddy_free_list(&mm, &right, 0); + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 3 * ps, ps, &allocated, - DRM_BUDDY_CONTIGUOUS_ALLOCATION), + GPU_BUDDY_CONTIGUOUS_ALLOCATION), "buddy_alloc didn't error size=%lu\n", 3 * ps); /* * At this point we should have enough contiguous space for 2 blocks, * however they are never buddies (since we freed middle and right) so * will require the try_harder logic to find them. */ - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 2 * ps, ps, &allocated, - DRM_BUDDY_CONTIGUOUS_ALLOCATION), + GPU_BUDDY_CONTIGUOUS_ALLOCATION), "buddy_alloc hit an error size=%lu\n", 2 * ps); - drm_buddy_free_list(&mm, &left, 0); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + gpu_buddy_free_list(&mm, &left, 0); + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 3 * ps, ps, &allocated, - DRM_BUDDY_CONTIGUOUS_ALLOCATION), + GPU_BUDDY_CONTIGUOUS_ALLOCATION), "buddy_alloc hit an error size=%lu\n", 3 * ps); total = 0; list_for_each_entry(block, &allocated, link) - total += drm_buddy_block_size(&mm, block); + total += gpu_buddy_block_size(&mm, block); KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3); - drm_buddy_free_list(&mm, &allocated, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &allocated, 0); + gpu_buddy_fini(&mm); } -static void drm_test_buddy_alloc_pathological(struct kunit *test) +static void gpu_test_buddy_alloc_pathological(struct kunit *test) { u64 mm_size, size, start = 0; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; const int max_order = 3; unsigned long flags = 0; int order, top; - struct drm_buddy mm; + struct gpu_buddy mm; LIST_HEAD(blocks); LIST_HEAD(holes); LIST_HEAD(tmp); @@ -620,7 +620,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test) */ mm_size = SZ_4K << max_order; - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), "buddy_init failed\n"); KUNIT_EXPECT_EQ(test, mm.max_order, max_order); @@ -630,18 +630,18 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test) block = list_first_entry_or_null(&blocks, typeof(*block), link); if (block) { list_del(&block->link); - drm_buddy_free_block(&mm, block); + gpu_buddy_free_block(&mm, block); } for (order = top; order--;) { size = get_size(order, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM with order=%d, top=%d\n", order, top); - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); list_move_tail(&block->link, &blocks); @@ -649,45 +649,45 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test) /* There should be one final page for this sub-allocation */ size = get_size(0, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM for hole\n"); - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); list_move_tail(&block->link, &holes); size = get_size(top, mm.chunk_size); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!", top, max_order); } - drm_buddy_free_list(&mm, &holes, 0); + gpu_buddy_free_list(&mm, &holes, 0); /* Nothing larger than blocks of chunk_size now available */ for (order = 1; order <= max_order; order++) { size = get_size(order, mm.chunk_size); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc unexpectedly succeeded at order %d, it should be full!", order); } list_splice_tail(&holes, &blocks); - drm_buddy_free_list(&mm, &blocks, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &blocks, 0); + gpu_buddy_fini(&mm); } -static void drm_test_buddy_alloc_pessimistic(struct kunit *test) +static void gpu_test_buddy_alloc_pessimistic(struct kunit *test) { u64 mm_size, size, start = 0; - struct drm_buddy_block *block, *bn; + struct gpu_buddy_block *block, *bn; const unsigned int max_order = 16; unsigned long flags = 0; - struct drm_buddy mm; + struct gpu_buddy mm; unsigned int order; LIST_HEAD(blocks); LIST_HEAD(tmp); @@ -699,19 +699,19 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test) */ mm_size = SZ_4K << max_order; - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), "buddy_init failed\n"); KUNIT_EXPECT_EQ(test, mm.max_order, max_order); for (order = 0; order < max_order; order++) { size = get_size(order, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM with order=%d\n", order); - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); list_move_tail(&block->link, &blocks); @@ -719,11 +719,11 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test) /* And now the last remaining block available */ size = get_size(0, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM on final alloc\n"); - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); list_move_tail(&block->link, &blocks); @@ -731,58 +731,58 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test) /* Should be completely full! */ for (order = max_order; order--;) { size = get_size(order, mm.chunk_size); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc unexpectedly succeeded, it should be full!"); } block = list_last_entry(&blocks, typeof(*block), link); list_del(&block->link); - drm_buddy_free_block(&mm, block); + gpu_buddy_free_block(&mm, block); /* As we free in increasing size, we make available larger blocks */ order = 1; list_for_each_entry_safe(block, bn, &blocks, link) { list_del(&block->link); - drm_buddy_free_block(&mm, block); + gpu_buddy_free_block(&mm, block); size = get_size(order, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM with order=%d\n", order); - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); list_del(&block->link); - drm_buddy_free_block(&mm, block); + gpu_buddy_free_block(&mm, block); order++; } /* To confirm, now the whole mm should be available */ size = get_size(max_order, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc (realloc) hit -ENOMEM with order=%d\n", max_order); - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); list_del(&block->link); - drm_buddy_free_block(&mm, block); - drm_buddy_free_list(&mm, &blocks, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_block(&mm, block); + gpu_buddy_free_list(&mm, &blocks, 0); + gpu_buddy_fini(&mm); } -static void drm_test_buddy_alloc_optimistic(struct kunit *test) +static void gpu_test_buddy_alloc_optimistic(struct kunit *test) { u64 mm_size, size, start = 0; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; unsigned long flags = 0; const int max_order = 16; - struct drm_buddy mm; + struct gpu_buddy mm; LIST_HEAD(blocks); LIST_HEAD(tmp); int order; @@ -794,19 +794,19 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test) mm_size = SZ_4K * ((1 << (max_order + 1)) - 1); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), "buddy_init failed\n"); KUNIT_EXPECT_EQ(test, mm.max_order, max_order); for (order = 0; order <= max_order; order++) { size = get_size(order, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM with order=%d\n", order); - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); list_move_tail(&block->link, &blocks); @@ -814,115 +814,115 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test) /* Should be completely full! */ size = get_size(0, mm.chunk_size); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc unexpectedly succeeded, it should be full!"); - drm_buddy_free_list(&mm, &blocks, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &blocks, 0); + gpu_buddy_fini(&mm); } -static void drm_test_buddy_alloc_limit(struct kunit *test) +static void gpu_test_buddy_alloc_limit(struct kunit *test) { u64 size = U64_MAX, start = 0; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; unsigned long flags = 0; LIST_HEAD(allocated); - struct drm_buddy mm; + struct gpu_buddy mm; - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K)); + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, size, SZ_4K)); - KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER, + KUNIT_EXPECT_EQ_MSG(test, mm.max_order, GPU_BUDDY_MAX_ORDER, "mm.max_order(%d) != %d\n", mm.max_order, - DRM_BUDDY_MAX_ORDER); + GPU_BUDDY_MAX_ORDER); size = mm.chunk_size << mm.max_order; - KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size, + KUNIT_EXPECT_FALSE(test, gpu_buddy_alloc_blocks(&mm, start, size, size, mm.chunk_size, &allocated, flags)); - block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link); + block = list_first_entry_or_null(&allocated, struct gpu_buddy_block, link); KUNIT_EXPECT_TRUE(test, block); - KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order, + KUNIT_EXPECT_EQ_MSG(test, gpu_buddy_block_order(block), mm.max_order, "block order(%d) != %d\n", - drm_buddy_block_order(block), mm.max_order); + gpu_buddy_block_order(block), mm.max_order); - KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block), + KUNIT_EXPECT_EQ_MSG(test, gpu_buddy_block_size(&mm, block), BIT_ULL(mm.max_order) * mm.chunk_size, "block size(%llu) != %llu\n", - drm_buddy_block_size(&mm, block), + gpu_buddy_block_size(&mm, block), BIT_ULL(mm.max_order) * mm.chunk_size); - drm_buddy_free_list(&mm, &allocated, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &allocated, 0); + gpu_buddy_fini(&mm); } -static void drm_test_buddy_alloc_exceeds_max_order(struct kunit *test) +static void gpu_test_buddy_alloc_exceeds_max_order(struct kunit *test) { u64 mm_size = SZ_8G + SZ_2G, size = SZ_8G + SZ_1G, min_block_size = SZ_8G; - struct drm_buddy mm; + struct gpu_buddy mm; LIST_HEAD(blocks); int err; - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), "buddy_init failed\n"); /* CONTIGUOUS allocation should succeed via try_harder fallback */ - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, size, SZ_4K, &blocks, - DRM_BUDDY_CONTIGUOUS_ALLOCATION), + GPU_BUDDY_CONTIGUOUS_ALLOCATION), "buddy_alloc hit an error size=%llu\n", size); - drm_buddy_free_list(&mm, &blocks, 0); + gpu_buddy_free_list(&mm, &blocks, 0); /* Non-CONTIGUOUS with large min_block_size should return -EINVAL */ - err = drm_buddy_alloc_blocks(&mm, 0, mm_size, size, min_block_size, &blocks, 0); + err = gpu_buddy_alloc_blocks(&mm, 0, mm_size, size, min_block_size, &blocks, 0); KUNIT_EXPECT_EQ(test, err, -EINVAL); /* Non-CONTIGUOUS + RANGE with large min_block_size should return -EINVAL */ - err = drm_buddy_alloc_blocks(&mm, 0, mm_size, size, min_block_size, &blocks, - DRM_BUDDY_RANGE_ALLOCATION); + err = gpu_buddy_alloc_blocks(&mm, 0, mm_size, size, min_block_size, &blocks, + GPU_BUDDY_RANGE_ALLOCATION); KUNIT_EXPECT_EQ(test, err, -EINVAL); /* CONTIGUOUS + RANGE should return -EINVAL (no try_harder for RANGE) */ - err = drm_buddy_alloc_blocks(&mm, 0, mm_size, size, SZ_4K, &blocks, - DRM_BUDDY_CONTIGUOUS_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION); + err = gpu_buddy_alloc_blocks(&mm, 0, mm_size, size, SZ_4K, &blocks, + GPU_BUDDY_CONTIGUOUS_ALLOCATION | GPU_BUDDY_RANGE_ALLOCATION); KUNIT_EXPECT_EQ(test, err, -EINVAL); - drm_buddy_fini(&mm); + gpu_buddy_fini(&mm); } -static int drm_buddy_suite_init(struct kunit_suite *suite) +static int gpu_buddy_suite_init(struct kunit_suite *suite) { while (!random_seed) random_seed = get_random_u32(); - kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", + kunit_info(suite, "Testing GPU buddy manager, with random_seed=0x%x\n", random_seed); return 0; } -static struct kunit_case drm_buddy_tests[] = { - KUNIT_CASE(drm_test_buddy_alloc_limit), - KUNIT_CASE(drm_test_buddy_alloc_optimistic), - KUNIT_CASE(drm_test_buddy_alloc_pessimistic), - KUNIT_CASE(drm_test_buddy_alloc_pathological), - KUNIT_CASE(drm_test_buddy_alloc_contiguous), - KUNIT_CASE(drm_test_buddy_alloc_clear), - KUNIT_CASE(drm_test_buddy_alloc_range_bias), - KUNIT_CASE(drm_test_buddy_fragmentation_performance), - KUNIT_CASE(drm_test_buddy_alloc_exceeds_max_order), +static struct kunit_case gpu_buddy_tests[] = { + KUNIT_CASE(gpu_test_buddy_alloc_limit), + KUNIT_CASE(gpu_test_buddy_alloc_optimistic), + KUNIT_CASE(gpu_test_buddy_alloc_pessimistic), + KUNIT_CASE(gpu_test_buddy_alloc_pathological), + KUNIT_CASE(gpu_test_buddy_alloc_contiguous), + KUNIT_CASE(gpu_test_buddy_alloc_clear), + KUNIT_CASE(gpu_test_buddy_alloc_range_bias), + KUNIT_CASE(gpu_test_buddy_fragmentation_performance), + KUNIT_CASE(gpu_test_buddy_alloc_exceeds_max_order), {} }; -static struct kunit_suite drm_buddy_test_suite = { - .name = "drm_buddy", - .suite_init = drm_buddy_suite_init, - .test_cases = drm_buddy_tests, +static struct kunit_suite gpu_buddy_test_suite = { + .name = "gpu_buddy", + .suite_init = gpu_buddy_suite_init, + .test_cases = gpu_buddy_tests, }; -kunit_test_suite(drm_buddy_test_suite); +kunit_test_suite(gpu_buddy_test_suite); MODULE_AUTHOR("Intel Corporation"); -MODULE_DESCRIPTION("Kunit test for drm_buddy functions"); +MODULE_DESCRIPTION("Kunit test for gpu_buddy functions"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/tests/gpu_random.c b/drivers/gpu/tests/gpu_random.c index ddd1f594b5d5..6356372f7e52 100644 --- a/drivers/gpu/tests/gpu_random.c +++ b/drivers/gpu/tests/gpu_random.c @@ -8,26 +8,26 @@ #include "gpu_random.h" -u32 drm_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state) +u32 gpu_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state) { return upper_32_bits((u64)prandom_u32_state(state) * ep_ro); } -EXPORT_SYMBOL(drm_prandom_u32_max_state); +EXPORT_SYMBOL(gpu_prandom_u32_max_state); -void drm_random_reorder(unsigned int *order, unsigned int count, +void gpu_random_reorder(unsigned int *order, unsigned int count, struct rnd_state *state) { unsigned int i, j; for (i = 0; i < count; ++i) { BUILD_BUG_ON(sizeof(unsigned int) > sizeof(u32)); - j = drm_prandom_u32_max_state(count, state); + j = gpu_prandom_u32_max_state(count, state); swap(order[i], order[j]); } } -EXPORT_SYMBOL(drm_random_reorder); +EXPORT_SYMBOL(gpu_random_reorder); -unsigned int *drm_random_order(unsigned int count, struct rnd_state *state) +unsigned int *gpu_random_order(unsigned int count, struct rnd_state *state) { unsigned int *order, i; @@ -38,7 +38,7 @@ unsigned int *drm_random_order(unsigned int count, struct rnd_state *state) for (i = 0; i < count; i++) order[i] = i; - drm_random_reorder(order, count, state); + gpu_random_reorder(order, count, state); return order; } -EXPORT_SYMBOL(drm_random_order); +EXPORT_SYMBOL(gpu_random_order); diff --git a/drivers/gpu/tests/gpu_random.h b/drivers/gpu/tests/gpu_random.h index 9f827260a89d..b68cf3448264 100644 --- a/drivers/gpu/tests/gpu_random.h +++ b/drivers/gpu/tests/gpu_random.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __DRM_RANDOM_H__ -#define __DRM_RANDOM_H__ +#ifndef __GPU_RANDOM_H__ +#define __GPU_RANDOM_H__ /* This is a temporary home for a couple of utility functions that should * be transposed to lib/ at the earliest convenience. @@ -8,21 +8,21 @@ #include -#define DRM_RND_STATE_INITIALIZER(seed__) ({ \ +#define GPU_RND_STATE_INITIALIZER(seed__) ({ \ struct rnd_state state__; \ prandom_seed_state(&state__, (seed__)); \ state__; \ }) -#define DRM_RND_STATE(name__, seed__) \ - struct rnd_state name__ = DRM_RND_STATE_INITIALIZER(seed__) +#define GPU_RND_STATE(name__, seed__) \ + struct rnd_state name__ = GPU_RND_STATE_INITIALIZER(seed__) -unsigned int *drm_random_order(unsigned int count, +unsigned int *gpu_random_order(unsigned int count, struct rnd_state *state); -void drm_random_reorder(unsigned int *order, +void gpu_random_reorder(unsigned int *order, unsigned int count, struct rnd_state *state); -u32 drm_prandom_u32_max_state(u32 ep_ro, +u32 gpu_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state); -#endif /* !__DRM_RANDOM_H__ */ +#endif /* !__GPU_RANDOM_H__ */ diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index d51777df12d1..0adb1e2fa533 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -37,6 +37,7 @@ source "drivers/char/agp/Kconfig" source "drivers/gpu/vga/Kconfig" +source "drivers/gpu/Kconfig" source "drivers/gpu/host1x/Kconfig" source "drivers/gpu/ipu-v3/Kconfig" source "drivers/gpu/nova-core/Kconfig" diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h new file mode 100644 index 000000000000..3054369bebff --- /dev/null +++ b/include/drm/drm_buddy.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __DRM_BUDDY_H__ +#define __DRM_BUDDY_H__ + +#include + +struct drm_printer; + +/* DRM-specific GPU Buddy Allocator print helpers */ +void drm_buddy_print(struct gpu_buddy *mm, struct drm_printer *p); +void drm_buddy_block_print(struct gpu_buddy *mm, + struct gpu_buddy_block *block, + struct drm_printer *p); +#endif diff --git a/include/linux/gpu_buddy.h b/include/linux/gpu_buddy.h index b909fa8f810a..07ac65db6d2e 100644 --- a/include/linux/gpu_buddy.h +++ b/include/linux/gpu_buddy.h @@ -3,8 +3,8 @@ * Copyright © 2021 Intel Corporation */ -#ifndef __DRM_BUDDY_H__ -#define __DRM_BUDDY_H__ +#ifndef __GPU_BUDDY_H__ +#define __GPU_BUDDY_H__ #include #include @@ -12,38 +12,45 @@ #include #include -struct drm_printer; +#define GPU_BUDDY_RANGE_ALLOCATION BIT(0) +#define GPU_BUDDY_TOPDOWN_ALLOCATION BIT(1) +#define GPU_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) +#define GPU_BUDDY_CLEAR_ALLOCATION BIT(3) +#define GPU_BUDDY_CLEARED BIT(4) +#define GPU_BUDDY_TRIM_DISABLE BIT(5) -#define DRM_BUDDY_RANGE_ALLOCATION BIT(0) -#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1) -#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) -#define DRM_BUDDY_CLEAR_ALLOCATION BIT(3) -#define DRM_BUDDY_CLEARED BIT(4) -#define DRM_BUDDY_TRIM_DISABLE BIT(5) +enum gpu_buddy_free_tree { + GPU_BUDDY_CLEAR_TREE = 0, + GPU_BUDDY_DIRTY_TREE, + GPU_BUDDY_MAX_FREE_TREES, +}; -struct drm_buddy_block { -#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) -#define DRM_BUDDY_HEADER_STATE GENMASK_ULL(11, 10) -#define DRM_BUDDY_ALLOCATED (1 << 10) -#define DRM_BUDDY_FREE (2 << 10) -#define DRM_BUDDY_SPLIT (3 << 10) -#define DRM_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9) +#define for_each_free_tree(tree) \ + for ((tree) = 0; (tree) < GPU_BUDDY_MAX_FREE_TREES; (tree)++) + +struct gpu_buddy_block { +#define GPU_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) +#define GPU_BUDDY_HEADER_STATE GENMASK_ULL(11, 10) +#define GPU_BUDDY_ALLOCATED (1 << 10) +#define GPU_BUDDY_FREE (2 << 10) +#define GPU_BUDDY_SPLIT (3 << 10) +#define GPU_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9) /* Free to be used, if needed in the future */ -#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6) -#define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0) +#define GPU_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6) +#define GPU_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0) u64 header; - struct drm_buddy_block *left; - struct drm_buddy_block *right; - struct drm_buddy_block *parent; + struct gpu_buddy_block *left; + struct gpu_buddy_block *right; + struct gpu_buddy_block *parent; void *private; /* owned by creator */ /* - * While the block is allocated by the user through drm_buddy_alloc*, + * While the block is allocated by the user through gpu_buddy_alloc*, * the user has ownership of the link, for example to maintain within * a list, if so desired. As soon as the block is freed with - * drm_buddy_free* ownership is given back to the mm. + * gpu_buddy_free* ownership is given back to the mm. */ union { struct rb_node rb; @@ -54,15 +61,15 @@ struct drm_buddy_block { }; /* Order-zero must be at least SZ_4K */ -#define DRM_BUDDY_MAX_ORDER (63 - 12) +#define GPU_BUDDY_MAX_ORDER (63 - 12) /* * Binary Buddy System. * * Locking should be handled by the user, a simple mutex around - * drm_buddy_alloc* and drm_buddy_free* should suffice. + * gpu_buddy_alloc* and gpu_buddy_free* should suffice. */ -struct drm_buddy { +struct gpu_buddy { /* Maintain a free list for each order. */ struct rb_root **free_trees; @@ -73,7 +80,7 @@ struct drm_buddy { * block. Nodes are either allocated or free, in which case they will * also exist on the respective free list. */ - struct drm_buddy_block **roots; + struct gpu_buddy_block **roots; /* * Anything from here is public, and remains static for the lifetime of @@ -90,82 +97,81 @@ struct drm_buddy { }; static inline u64 -drm_buddy_block_offset(const struct drm_buddy_block *block) +gpu_buddy_block_offset(const struct gpu_buddy_block *block) { - return block->header & DRM_BUDDY_HEADER_OFFSET; + return block->header & GPU_BUDDY_HEADER_OFFSET; } static inline unsigned int -drm_buddy_block_order(struct drm_buddy_block *block) +gpu_buddy_block_order(struct gpu_buddy_block *block) { - return block->header & DRM_BUDDY_HEADER_ORDER; + return block->header & GPU_BUDDY_HEADER_ORDER; } static inline unsigned int -drm_buddy_block_state(struct drm_buddy_block *block) +gpu_buddy_block_state(struct gpu_buddy_block *block) { - return block->header & DRM_BUDDY_HEADER_STATE; + return block->header & GPU_BUDDY_HEADER_STATE; } static inline bool -drm_buddy_block_is_allocated(struct drm_buddy_block *block) +gpu_buddy_block_is_allocated(struct gpu_buddy_block *block) { - return drm_buddy_block_state(block) == DRM_BUDDY_ALLOCATED; + return gpu_buddy_block_state(block) == GPU_BUDDY_ALLOCATED; } static inline bool -drm_buddy_block_is_clear(struct drm_buddy_block *block) +gpu_buddy_block_is_clear(struct gpu_buddy_block *block) { - return block->header & DRM_BUDDY_HEADER_CLEAR; + return block->header & GPU_BUDDY_HEADER_CLEAR; } static inline bool -drm_buddy_block_is_free(struct drm_buddy_block *block) +gpu_buddy_block_is_free(struct gpu_buddy_block *block) { - return drm_buddy_block_state(block) == DRM_BUDDY_FREE; + return gpu_buddy_block_state(block) == GPU_BUDDY_FREE; } static inline bool -drm_buddy_block_is_split(struct drm_buddy_block *block) +gpu_buddy_block_is_split(struct gpu_buddy_block *block) { - return drm_buddy_block_state(block) == DRM_BUDDY_SPLIT; + return gpu_buddy_block_state(block) == GPU_BUDDY_SPLIT; } static inline u64 -drm_buddy_block_size(struct drm_buddy *mm, - struct drm_buddy_block *block) +gpu_buddy_block_size(struct gpu_buddy *mm, + struct gpu_buddy_block *block) { - return mm->chunk_size << drm_buddy_block_order(block); + return mm->chunk_size << gpu_buddy_block_order(block); } -int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size); +int gpu_buddy_init(struct gpu_buddy *mm, u64 size, u64 chunk_size); -void drm_buddy_fini(struct drm_buddy *mm); +void gpu_buddy_fini(struct gpu_buddy *mm); -struct drm_buddy_block * -drm_get_buddy(struct drm_buddy_block *block); +struct gpu_buddy_block * +gpu_get_buddy(struct gpu_buddy_block *block); -int drm_buddy_alloc_blocks(struct drm_buddy *mm, +int gpu_buddy_alloc_blocks(struct gpu_buddy *mm, u64 start, u64 end, u64 size, u64 min_page_size, struct list_head *blocks, unsigned long flags); -int drm_buddy_block_trim(struct drm_buddy *mm, +int gpu_buddy_block_trim(struct gpu_buddy *mm, u64 *start, u64 new_size, struct list_head *blocks); -void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear); +void gpu_buddy_reset_clear(struct gpu_buddy *mm, bool is_clear); -void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block); +void gpu_buddy_free_block(struct gpu_buddy *mm, struct gpu_buddy_block *block); -void drm_buddy_free_list(struct drm_buddy *mm, +void gpu_buddy_free_list(struct gpu_buddy *mm, struct list_head *objects, unsigned int flags); -void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p); -void drm_buddy_block_print(struct drm_buddy *mm, - struct drm_buddy_block *block, - struct drm_printer *p); +void gpu_buddy_print(struct gpu_buddy *mm); +void gpu_buddy_block_print(struct gpu_buddy *mm, + struct gpu_buddy_block *block); #endif From ee8bfb15d02dddb554ad4bdd6c44297f19556563 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 6 Feb 2026 11:44:09 +1000 Subject: [PATCH 043/158] drm: drop lib from header search path. This was leftover from when I dropped it in 4a9671a03f2b ("gpu: Move DRM buddy allocator one level up (part one)") Signed-off-by: Dave Airlie --- drivers/gpu/drm/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index d0e37f8c2a46..062162d8e179 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -242,7 +242,7 @@ obj-$(CONFIG_DRM_VERISILICON_DC) += verisilicon/ # Ensure drm headers are self-contained and pass kernel-doc hdrtest-files := \ $(shell cd $(src) && find . -maxdepth 1 -name 'drm_*.h') \ - $(shell cd $(src) && find display lib -name '*.h') + $(shell cd $(src) && find display -name '*.h') always-$(CONFIG_DRM_HEADER_TEST) += \ $(patsubst %.h,%.hdrtest, $(hdrtest-files)) From 6d438685340df6ac8570326aaa51c3603a2fe25c Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 5 Feb 2026 15:10:44 +0100 Subject: [PATCH 044/158] drm/fbdev-emulation: Remove empty placeholders Only DRM clients for fbdev emulation invoke fbdev helpers. Hence remove the empty placeholders for non-fbdev builds, as they are unused. Signed-off-by: Thomas Zimmermann Reviewed-by: Maarten Lankhorst Link: https://patch.msgid.link/20260205141142.412048-1-tzimmermann@suse.de --- include/drm/drm_fb_helper.h | 105 ------------------------------------ 1 file changed, 105 deletions(-) diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 05cca77b7249..15274b8a1d97 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -271,111 +271,6 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper); -#else -static inline void drm_fb_helper_prepare(struct drm_device *dev, - struct drm_fb_helper *helper, - unsigned int preferred_bpp, - const struct drm_fb_helper_funcs *funcs) -{ -} - -static inline void drm_fb_helper_unprepare(struct drm_fb_helper *fb_helper) -{ -} - -static inline int drm_fb_helper_init(struct drm_device *dev, - struct drm_fb_helper *helper) -{ - /* So drivers can use it to free the struct */ - helper->dev = dev; - dev->fb_helper = helper; - - return 0; -} - -static inline void drm_fb_helper_fini(struct drm_fb_helper *helper) -{ - if (helper && helper->dev) - helper->dev->fb_helper = NULL; -} - -static inline int drm_fb_helper_blank(int blank, struct fb_info *info) -{ - return 0; -} - -static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, - struct fb_info *info) -{ - return 0; -} - -static inline int drm_fb_helper_set_par(struct fb_info *info) -{ - return 0; -} - -static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var, - struct fb_info *info) -{ - return 0; -} - -static inline int -drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) -{ - return 0; -} - -static inline void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper) -{ -} - -static inline void -drm_fb_helper_fill_info(struct fb_info *info, - struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) -{ -} - -static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap, - struct fb_info *info) -{ - return 0; -} - -static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, - unsigned long arg) -{ - return 0; -} - -#ifdef CONFIG_FB_DEFERRED_IO -static inline void drm_fb_helper_deferred_io(struct fb_info *info, - struct list_head *pagelist) -{ -} -#endif - -static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, - bool suspend) -{ -} - -static inline void -drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, bool suspend) -{ -} - -static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) -{ - return 0; -} - -static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper) -{ - return 0; -} #endif #endif From 55473b60178060a4fdb4631bd0c91879cc7d18d8 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 5 Feb 2026 15:40:48 +0100 Subject: [PATCH 045/158] drm/fbdev-emulation: Remove support for legacy emulation Remove the internal DRM client from fbdev emulation. This has been required when some DRM drivers provided their own fbdev emulation. This is no longer the case with commit b55f3bbab891 ("drm/{i915, xe}: Implement fbdev emulation as in-kernel client") from 2024. Now there's only a single DRM client for fbdev-emulation that fills out the client callback functions as required. Signed-off-by: Thomas Zimmermann Reviewed-by: Maarten Lankhorst Link: https://patch.msgid.link/20260205144056.416759-1-tzimmermann@suse.de --- drivers/gpu/drm/drm_fb_helper.c | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 05803169bed5..845c63ca15b5 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -343,18 +343,6 @@ EXPORT_SYMBOL(drm_fb_helper_unprepare); int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *fb_helper) { - int ret; - - /* - * If this is not the generic fbdev client, initialize a drm_client - * without callbacks so we can use the modesets. - */ - if (!fb_helper->client.funcs) { - ret = drm_client_init(dev, &fb_helper->client, "drm_fb_helper", NULL); - if (ret) - return ret; - } - dev->fb_helper = fb_helper; return 0; @@ -437,9 +425,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) cancel_work_sync(&fb_helper->damage_work); drm_fb_helper_release_info(fb_helper); - - if (!fb_helper->client.funcs) - drm_client_release(&fb_helper->client); } EXPORT_SYMBOL(drm_fb_helper_fini); From e19cc5ab347e3cdcc21c97ea5d11af8da7f1358d Mon Sep 17 00:00:00 2001 From: Matt Coster Date: Fri, 23 Jan 2026 14:44:50 +0000 Subject: [PATCH 046/158] drm/imagination: Use dev_pm_domain_attach_list() This helper handles the attaching and linking of the entire list of power domains. Besides making pvr_power_domains_init() simpler, this also lays the groundwork to simplify supporting the varied power domain names used in Volcanic GPU cores. Note that we still need to create the links between power domains to ensure they're brought up in a valid sequence. Reviewed-by: Alessio Belle Link: https://patch.msgid.link/20260123-pm-domain-attach-list-v1-1-d51dd7e43253@imgtec.com Signed-off-by: Matt Coster --- drivers/gpu/drm/imagination/pvr_device.h | 10 ++- drivers/gpu/drm/imagination/pvr_power.c | 80 +++++++++--------------- 2 files changed, 33 insertions(+), 57 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h index cfda215e7428..d51c57cf9332 100644 --- a/drivers/gpu/drm/imagination/pvr_device.h +++ b/drivers/gpu/drm/imagination/pvr_device.h @@ -152,15 +152,13 @@ struct pvr_device { * @power: Optional power domain devices. * * On platforms with more than one power domain for the GPU, they are - * stored here in @domain_devs, along with links between them in - * @domain_links. The size of @domain_devs is given by @domain_count, - * while the size of @domain_links is (2 * @domain_count) - 1. + * stored here in @domains, along with links between them in + * @domain_links. The size of @domain_links is one less than + * struct dev_pm_domain_list->num_pds in @domains. */ struct pvr_device_power { - struct device **domain_devs; + struct dev_pm_domain_list *domains; struct device_link **domain_links; - - u32 domain_count; } power; /** diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c index b9f801c63260..a0834c550a85 100644 --- a/drivers/gpu/drm/imagination/pvr_power.c +++ b/drivers/gpu/drm/imagination/pvr_power.c @@ -593,14 +593,16 @@ pvr_watchdog_fini(struct pvr_device *pvr_dev) int pvr_power_domains_init(struct pvr_device *pvr_dev) { - struct device *dev = from_pvr_device(pvr_dev)->dev; + static const char *const ROGUE_PD_NAMES[] = { "a", "b", "c", "d", "e" }; + + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + struct device *dev = drm_dev->dev; struct device_link **domain_links __free(kfree) = NULL; - struct device **domain_devs __free(kfree) = NULL; + struct dev_pm_domain_list *domains = NULL; int domain_count; int link_count; - char dev_name[2] = "a"; int err; int i; @@ -612,46 +614,33 @@ int pvr_power_domains_init(struct pvr_device *pvr_dev) if (domain_count <= 1) return 0; - link_count = domain_count + (domain_count - 1); + if (domain_count > ARRAY_SIZE(ROGUE_PD_NAMES)) { + drm_err(drm_dev, "%s() only supports %zu domains on Rogue", + __func__, ARRAY_SIZE(ROGUE_PD_NAMES)); + return -EOPNOTSUPP; + } - domain_devs = kcalloc(domain_count, sizeof(*domain_devs), GFP_KERNEL); - if (!domain_devs) - return -ENOMEM; + link_count = domain_count - 1; domain_links = kcalloc(link_count, sizeof(*domain_links), GFP_KERNEL); if (!domain_links) return -ENOMEM; - for (i = 0; i < domain_count; i++) { - struct device *domain_dev; + const struct dev_pm_domain_attach_data pd_attach_data = { + .pd_names = ROGUE_PD_NAMES, + .num_pd_names = domain_count, + .pd_flags = 0, + }; - dev_name[0] = 'a' + i; - domain_dev = dev_pm_domain_attach_by_name(dev, dev_name); - if (IS_ERR_OR_NULL(domain_dev)) { - err = domain_dev ? PTR_ERR(domain_dev) : -ENODEV; - goto err_detach; - } + err = dev_pm_domain_attach_list(dev, &pd_attach_data, &domains); + if (err < 0) + return err; - domain_devs[i] = domain_dev; - } - - for (i = 0; i < domain_count; i++) { + for (i = 0; i < link_count; i++) { struct device_link *link; - link = device_link_add(dev, domain_devs[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); - if (!link) { - err = -ENODEV; - goto err_unlink; - } - - domain_links[i] = link; - } - - for (i = domain_count; i < link_count; i++) { - struct device_link *link; - - link = device_link_add(domain_devs[i - domain_count + 1], - domain_devs[i - domain_count], + link = device_link_add(domains->pd_devs[i + 1], + domains->pd_devs[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); if (!link) { err = -ENODEV; @@ -662,9 +651,8 @@ int pvr_power_domains_init(struct pvr_device *pvr_dev) } pvr_dev->power = (struct pvr_device_power){ - .domain_devs = no_free_ptr(domain_devs), + .domains = domains, .domain_links = no_free_ptr(domain_links), - .domain_count = domain_count, }; return 0; @@ -673,31 +661,21 @@ int pvr_power_domains_init(struct pvr_device *pvr_dev) while (--i >= 0) device_link_del(domain_links[i]); - i = domain_count; - -err_detach: - while (--i >= 0) - dev_pm_domain_detach(domain_devs[i], true); - return err; } void pvr_power_domains_fini(struct pvr_device *pvr_dev) { - const int domain_count = pvr_dev->power.domain_count; + struct pvr_device_power *pvr_power = &pvr_dev->power; - int i = domain_count + (domain_count - 1); + int i = (int)pvr_power->domains->num_pds - 1; while (--i >= 0) - device_link_del(pvr_dev->power.domain_links[i]); + device_link_del(pvr_power->domain_links[i]); - i = domain_count; + dev_pm_domain_detach_list(pvr_power->domains); - while (--i >= 0) - dev_pm_domain_detach(pvr_dev->power.domain_devs[i], true); + kfree(pvr_power->domain_links); - kfree(pvr_dev->power.domain_links); - kfree(pvr_dev->power.domain_devs); - - pvr_dev->power = (struct pvr_device_power){ 0 }; + *pvr_power = (struct pvr_device_power){ 0 }; } From 1d7532444a32b53ff7344dc52019bab5a4b5ed66 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Wed, 21 Jan 2026 12:15:45 +0200 Subject: [PATCH 047/158] dt-bindings: drm/bridge: anx7625: describe Type-C connector ANX7625 can be used to mux converted video stream with the USB signals on a Type-C connector. Describe the optional connector subnode, make it exclusive with the AUX bus and port@1 as it is impossible to have both eDP panel and USB-C connector. Reviewed-by: Rob Herring (Arm) Link: https://patch.msgid.link/20260121-anx7625-typec-v2-1-d14f31256a17@oss.qualcomm.com Signed-off-by: Dmitry Baryshkov --- .../display/bridge/analogix,anx7625.yaml | 98 ++++++++++++++++++- 1 file changed, 97 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml index a1ed1004651b..6ad466952c02 100644 --- a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml +++ b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml @@ -85,6 +85,11 @@ properties: aux-bus: $ref: /schemas/display/dp-aux-bus.yaml# + connector: + type: object + $ref: /schemas/connector/usb-connector.yaml# + unevaluatedProperties: false + ports: $ref: /schemas/graph.yaml#/properties/ports @@ -117,7 +122,6 @@ properties: required: - port@0 - - port@1 required: - compatible @@ -127,6 +131,28 @@ required: - vdd33-supply - ports +allOf: + - if: + required: + - aux-bus + - connector + then: + false + + - if: + required: + - connector + then: + properties: + ports: + properties: + port@1: false + else: + properties: + ports: + required: + - port@1 + additionalProperties: false examples: @@ -185,3 +211,73 @@ examples: }; }; }; + - | + #include + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + encoder@58 { + compatible = "analogix,anx7625"; + reg = <0x58>; + enable-gpios = <&pio 45 GPIO_ACTIVE_HIGH>; + reset-gpios = <&pio 73 GPIO_ACTIVE_HIGH>; + vdd10-supply = <&pp1000_mipibrdg>; + vdd18-supply = <&pp1800_mipibrdg>; + vdd33-supply = <&pp3300_mipibrdg>; + analogix,audio-enable; + analogix,lane0-swing = /bits/ 8 <0x14 0x54 0x64 0x74>; + analogix,lane1-swing = /bits/ 8 <0x14 0x54 0x64 0x74>; + + connector { + compatible = "usb-c-connector"; + power-role = "dual"; + data-role = "dual"; + vbus-supply = <&vbus_reg>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + endpoint { + remote-endpoint = <&usb_hs>; + }; + }; + + port@1 { + reg = <1>; + + endpoint { + remote-endpoint = <&usb_ss>; + }; + }; + + port@2 { + reg = <2>; + + endpoint { + remote-endpoint = <&usb_sbu>; + }; + }; + }; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + endpoint { + remote-endpoint = <&mipi_dsi>; + bus-type = <7>; + data-lanes = <0 1 2 3>; + }; + }; + }; + }; + }; From f81455b2d3327a5685623e7db4050dbbe5513bc3 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Wed, 21 Jan 2026 12:15:46 +0200 Subject: [PATCH 048/158] drm: bridge: anx7625: implement minimal Type-C support ANX7625 can be used as a USB-C controller, handling USB and DP data streams. Provide minimal Type-C support necessary for ANX7625 to register the Type-C port device and properly respond to data / power role events from the Type-C partner. While ANX7625 provides TCPCI interface, using it would circumvent the on-chip running firmware. Analogix recommended using the higher-level interface instead of TCPCI. Reviewed-by: Xin Ji Reviewed-by: Heikki Krogerus Link: https://patch.msgid.link/20260121-anx7625-typec-v2-2-d14f31256a17@oss.qualcomm.com Signed-off-by: Dmitry Baryshkov --- drivers/gpu/drm/bridge/analogix/Kconfig | 1 + drivers/gpu/drm/bridge/analogix/anx7625.c | 155 ++++++++++++++++++++-- drivers/gpu/drm/bridge/analogix/anx7625.h | 22 ++- 3 files changed, 168 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig index 4846b2e9be7c..f3448b0631fe 100644 --- a/drivers/gpu/drm/bridge/analogix/Kconfig +++ b/drivers/gpu/drm/bridge/analogix/Kconfig @@ -34,6 +34,7 @@ config DRM_ANALOGIX_ANX7625 tristate "Analogix Anx7625 MIPI to DP interface support" depends on DRM depends on OF + depends on TYPEC || !TYPEC select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HDCP_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 4e49e4f28d55..8dc6e3b16968 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -3,6 +3,7 @@ * Copyright(c) 2020, Analogix Semiconductor. All rights reserved. * */ +#include #include #include #include @@ -15,6 +16,9 @@ #include #include #include +#include +#include +#include #include #include @@ -1325,7 +1329,7 @@ static int anx7625_read_hpd_gpio_config_status(struct anx7625_data *ctx) static void anx7625_disable_pd_protocol(struct anx7625_data *ctx) { struct device *dev = ctx->dev; - int ret, val; + int ret; /* Reset main ocm */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40); @@ -1339,6 +1343,11 @@ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx) DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n"); else DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n"); +} + +static void anx7625_configure_hpd(struct anx7625_data *ctx) +{ + int val; /* * Make sure the HPD GPIO already be configured after OCM release before @@ -1369,7 +1378,9 @@ static int anx7625_ocm_loading_check(struct anx7625_data *ctx) if ((ret & FLASH_LOAD_STA_CHK) != FLASH_LOAD_STA_CHK) return -ENODEV; - anx7625_disable_pd_protocol(ctx); + if (!ctx->typec_port) + anx7625_disable_pd_protocol(ctx); + anx7625_configure_hpd(ctx); DRM_DEV_DEBUG_DRIVER(dev, "Firmware ver %02x%02x,", anx7625_reg_read(ctx, @@ -1472,6 +1483,107 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx) DRM_DEV_DEBUG_DRIVER(dev, "Secure OCM version=%02x\n", ret); } +#if IS_REACHABLE(CONFIG_TYPEC) +static void anx7625_typec_set_orientation(struct anx7625_data *ctx) +{ + u32 val = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS); + + if (val & (CC1_RP | CC1_RD)) + typec_set_orientation(ctx->typec_port, TYPEC_ORIENTATION_NORMAL); + else if (val & (CC2_RP | CC2_RD)) + typec_set_orientation(ctx->typec_port, TYPEC_ORIENTATION_REVERSE); + else + typec_set_orientation(ctx->typec_port, TYPEC_ORIENTATION_NONE); +} + +static void anx7625_typec_set_status(struct anx7625_data *ctx, + unsigned int intr_status, + unsigned int intr_vector) +{ + if (intr_vector & CC_STATUS) + anx7625_typec_set_orientation(ctx); + if (intr_vector & DATA_ROLE_STATUS) { + enum typec_data_role data_role = (intr_status & DATA_ROLE_STATUS) ? + TYPEC_HOST : TYPEC_DEVICE; + usb_role_switch_set_role(ctx->role_sw, + (intr_status & DATA_ROLE_STATUS) ? + USB_ROLE_HOST : USB_ROLE_DEVICE); + typec_set_data_role(ctx->typec_port, data_role); + ctx->typec_data_role = data_role; + } + if (intr_vector & VBUS_STATUS) + typec_set_pwr_role(ctx->typec_port, + (intr_status & VBUS_STATUS) ? + TYPEC_SOURCE : TYPEC_SINK); + if (intr_vector & VCONN_STATUS) + typec_set_vconn_role(ctx->typec_port, + (intr_status & VCONN_STATUS) ? + TYPEC_SOURCE : TYPEC_SINK); +} + +static int anx7625_typec_register(struct anx7625_data *ctx) +{ + struct typec_capability typec_cap = { }; + struct fwnode_handle *fwnode __free(fwnode_handle) = + device_get_named_child_node(ctx->dev, "connector"); + u32 val; + int ret; + + if (!fwnode) + return 0; + + ret = typec_get_fw_cap(&typec_cap, fwnode); + if (ret < 0) + return ret; + + typec_cap.revision = 0x0120; + typec_cap.pd_revision = 0x0300; + typec_cap.usb_capability = USB_CAPABILITY_USB2 | USB_CAPABILITY_USB3; + typec_cap.orientation_aware = true; + + typec_cap.driver_data = ctx; + + ctx->typec_port = typec_register_port(ctx->dev, &typec_cap); + if (IS_ERR(ctx->typec_port)) + return PTR_ERR(ctx->typec_port); + + ctx->role_sw = fwnode_usb_role_switch_get(fwnode); + if (IS_ERR(ctx->role_sw)) { + typec_unregister_port(ctx->typec_port); + return PTR_ERR(ctx->role_sw); + } + + val = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS); + + anx7625_typec_set_status(ctx, val, + CC_STATUS | DATA_ROLE_STATUS | + VBUS_STATUS | VCONN_STATUS); + + return 0; +} + +static void anx7625_typec_unregister(struct anx7625_data *ctx) +{ + usb_role_switch_put(ctx->role_sw); + typec_unregister_port(ctx->typec_port); +} +#else +static void anx7625_typec_set_status(struct anx7625_data *ctx, + unsigned int intr_status, + unsigned int intr_vector) +{ +} + +static int anx7625_typec_register(struct anx7625_data *ctx) +{ + return 0; +} + +static void anx7625_typec_unregister(struct anx7625_data *ctx) +{ +} +#endif + static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx) { return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS); @@ -1566,7 +1678,7 @@ static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on) } } -static int anx7625_hpd_change_detect(struct anx7625_data *ctx) +static int anx7625_intr_status(struct anx7625_data *ctx) { int intr_vector, status; struct device *dev = ctx->dev; @@ -1593,9 +1705,6 @@ static int anx7625_hpd_change_detect(struct anx7625_data *ctx) return status; } - if (!(intr_vector & HPD_STATUS_CHANGE)) - return -ENOENT; - status = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS); if (status < 0) { @@ -1604,6 +1713,12 @@ static int anx7625_hpd_change_detect(struct anx7625_data *ctx) } DRM_DEV_DEBUG_DRIVER(dev, "0x7e:0x45=%x\n", status); + + anx7625_typec_set_status(ctx, status, intr_vector); + + if (!(intr_vector & HPD_STATUS)) + return -ENOENT; + dp_hpd_change_handler(ctx, status & HPD_STATUS); return 0; @@ -1622,7 +1737,7 @@ static void anx7625_work_func(struct work_struct *work) return; } - event = anx7625_hpd_change_detect(ctx); + event = anx7625_intr_status(ctx); mutex_unlock(&ctx->lock); @@ -2741,11 +2856,29 @@ static int anx7625_i2c_probe(struct i2c_client *client) } if (!platform->pdata.low_power_mode) { - anx7625_disable_pd_protocol(platform); + struct fwnode_handle *fwnode; + + fwnode = device_get_named_child_node(dev, "connector"); + if (fwnode) + fwnode_handle_put(fwnode); + else + anx7625_disable_pd_protocol(platform); + + anx7625_configure_hpd(platform); + pm_runtime_get_sync(dev); _anx7625_hpd_polling(platform, 5000 * 100); } + if (platform->pdata.intp_irq) + anx7625_reg_write(platform, platform->i2c.rx_p0_client, + INTERFACE_CHANGE_INT_MASK, 0); + + /* After getting runtime handle */ + ret = anx7625_typec_register(platform); + if (ret) + goto pm_suspend; + /* Add work function */ if (platform->pdata.intp_irq) { enable_irq(platform->pdata.intp_irq); @@ -2759,6 +2892,10 @@ static int anx7625_i2c_probe(struct i2c_client *client) return 0; +pm_suspend: + if (!platform->pdata.low_power_mode) + pm_runtime_put_sync_suspend(&client->dev); + free_wq: if (platform->workqueue) destroy_workqueue(platform->workqueue); @@ -2774,6 +2911,8 @@ static void anx7625_i2c_remove(struct i2c_client *client) { struct anx7625_data *platform = i2c_get_clientdata(client); + anx7625_typec_unregister(platform); + drm_bridge_remove(&platform->bridge); if (platform->pdata.intp_irq) diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h index eb5580f1ab2f..a18561c213af 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.h +++ b/drivers/gpu/drm/bridge/analogix/anx7625.h @@ -51,9 +51,21 @@ #define INTR_RECEIVED_MSG BIT(5) #define SYSTEM_STSTUS 0x45 +#define INTERFACE_CHANGE_INT_MASK 0x43 #define INTERFACE_CHANGE_INT 0x44 -#define HPD_STATUS_CHANGE 0x80 -#define HPD_STATUS 0x80 +#define VCONN_STATUS BIT(2) +#define VBUS_STATUS BIT(3) +#define CC_STATUS BIT(4) +#define DATA_ROLE_STATUS BIT(5) +#define HPD_STATUS BIT(7) + +#define NEW_CC_STATUS 0x46 +#define CC1_RD BIT(0) +#define CC1_RA BIT(1) +#define CC1_RP (BIT(2) | BIT(3)) +#define CC2_RD BIT(4) +#define CC2_RA BIT(5) +#define CC2_RP (BIT(6) | BIT(7)) /******** END of I2C Address 0x58 ********/ @@ -447,9 +459,15 @@ struct anx7625_i2c_client { struct i2c_client *tcpc_client; }; +struct typec_port; +struct usb_role_switch; + struct anx7625_data { struct anx7625_platform_data pdata; struct platform_device *audio_pdev; + struct typec_port *typec_port; + struct usb_role_switch *role_sw; + int typec_data_role; int hpd_status; int hpd_high_cnt; int dp_en; From 8ad0f7d2e6fdfc4462a5b168ec64d73b7e952ab9 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Wed, 21 Jan 2026 12:15:47 +0200 Subject: [PATCH 049/158] drm: bridge: anx7625: implement message sending Swapping the data role requires sending the message to the other USB-C side. Implement sending these messages through the OCM. The code is largely based on the anx7411.c USB-C driver. Reviewed-by: Xin Ji Link: https://patch.msgid.link/20260121-anx7625-typec-v2-3-d14f31256a17@oss.qualcomm.com Signed-off-by: Dmitry Baryshkov --- drivers/gpu/drm/bridge/analogix/anx7625.c | 68 +++++++++++++++++++++++ drivers/gpu/drm/bridge/analogix/anx7625.h | 12 ++++ 2 files changed, 80 insertions(+) diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 8dc6e3b16968..c43519097a45 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -1484,6 +1484,73 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx) } #if IS_REACHABLE(CONFIG_TYPEC) +static u8 anx7625_checksum(u8 *buf, u8 len) +{ + u8 ret = 0; + u8 i; + + for (i = 0; i < len; i++) + ret += buf[i]; + + return ret; +} + +static int anx7625_read_msg_ctrl_status(struct anx7625_data *ctx) +{ + return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, CMD_SEND_BUF); +} + +static int anx7625_wait_msg_empty(struct anx7625_data *ctx) +{ + int val; + + return readx_poll_timeout(anx7625_read_msg_ctrl_status, ctx, + val, (val < 0) || (val == 0), + 2000, 2000 * 150); +} + +static int anx7625_send_msg(struct anx7625_data *ctx, u8 type, u8 *buf, u8 size) +{ + struct fw_msg *msg = &ctx->send_msg; + u8 crc; + int ret; + + size = min_t(u8, size, (u8)MAX_BUF_LEN); + memcpy(msg->buf, buf, size); + msg->msg_type = type; + + /* msg len equals buffer length + msg_type */ + msg->msg_len = size + 1; + + crc = anx7625_checksum((u8 *)msg, size + HEADER_LEN); + msg->buf[size] = 0 - crc; + + ret = anx7625_wait_msg_empty(ctx); + if (ret) + return ret; + + ret = anx7625_reg_block_write(ctx, ctx->i2c.rx_p0_client, + CMD_SEND_BUF + 1, size + HEADER_LEN, + &msg->msg_type); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, CMD_SEND_BUF, + msg->msg_len); + return ret; +} + +static int anx7625_typec_dr_set(struct typec_port *port, enum typec_data_role role) +{ + struct anx7625_data *ctx = typec_get_drvdata(port); + + if (role == ctx->typec_data_role) + return 0; + + return anx7625_send_msg(ctx, 0x11, NULL, 0); +} + +static const struct typec_operations anx7625_typec_ops = { + .dr_set = anx7625_typec_dr_set, +}; + static void anx7625_typec_set_orientation(struct anx7625_data *ctx) { u32 val = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS); @@ -1542,6 +1609,7 @@ static int anx7625_typec_register(struct anx7625_data *ctx) typec_cap.orientation_aware = true; typec_cap.driver_data = ctx; + typec_cap.ops = &anx7625_typec_ops; ctx->typec_port = typec_register_port(ctx->dev, &typec_cap); if (IS_ERR(ctx->typec_port)) diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h index a18561c213af..957d234ec07c 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.h +++ b/drivers/gpu/drm/bridge/analogix/anx7625.h @@ -67,6 +67,9 @@ #define CC2_RA BIT(5) #define CC2_RP (BIT(6) | BIT(7)) +#define CMD_SEND_BUF 0xC0 +#define CMD_RECV_BUF 0xE0 + /******** END of I2C Address 0x58 ********/ /***************************************************************/ @@ -462,6 +465,14 @@ struct anx7625_i2c_client { struct typec_port; struct usb_role_switch; +#define MAX_BUF_LEN 30 +struct fw_msg { + u8 msg_len; + u8 msg_type; + u8 buf[MAX_BUF_LEN]; +} __packed; +#define HEADER_LEN 2 + struct anx7625_data { struct anx7625_platform_data pdata; struct platform_device *audio_pdev; @@ -497,6 +508,7 @@ struct anx7625_data { struct drm_connector *connector; struct mipi_dsi_device *dsi; struct drm_dp_aux aux; + struct fw_msg send_msg; }; #endif /* __ANX7625_H__ */ From dbce431756f85ec76a189312afdae2dee14fb0de Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 6 Feb 2026 18:51:41 +0530 Subject: [PATCH 050/158] drm/gem: Make drm_gem_objects_lookup() self-cleaning on failure v6 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drm_gem_objects_lookup() can allocate the output array and take references on GEM objects before it fails. If an error happens part-way through, callers previously had to clean up partially created results themselves. This relied on subtle and undocumented behavior and was easy to get wrong. Make drm_gem_objects_lookup() clean up on failure. The function now drops any references it already took, frees the array, and sets *objs_out to NULL before returning an error. On success, behavior is unchanged. Existing callers remain correct and their error cleanup paths simply do nothing when *objs_out is NULL. v2/v3: Move partial-lookup cleanup into objects_lookup(), perform reference dropping outside the lock, and remove reliance on __GFP_ZERO or implicit NULL handling. (Christian) v4: Use goto-style error handling in objects_lookup(), drop partial references outside the lock, and simplify drm_gem_objects_lookup() cleanup by routing failures through err_free_handles as suggested. (Christian) v5: Rebase on drm-misc-next, drop the ret local variable. (Christian) v6: Drop superfluous initialization of handles. (Christian/Tvrtko) Cc: Alex Deucher Suggested-by: Christian König Suggested-by: Tvrtko Ursulin Signed-off-by: Srinivasan Shanmugam Reviewed-by: Christian König Reviewed-by: Tvrtko Ursulin Signed-off-by: Arunpravin Paneer Selvam Link: https://patch.msgid.link/20260206132141.1474191-1-srinivasan.shanmugam@amd.com --- drivers/gpu/drm/drm_gem.c | 45 +++++++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 7ff6b7bbeb73..c4a3de3b920e 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -783,7 +783,7 @@ EXPORT_SYMBOL(drm_gem_put_pages); static int objects_lookup(struct drm_file *filp, u32 *handle, int count, struct drm_gem_object **objs) { - int i, ret = 0; + int i; struct drm_gem_object *obj; spin_lock(&filp->table_lock); @@ -791,16 +791,23 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count, for (i = 0; i < count; i++) { /* Check if we currently have a reference on the object */ obj = idr_find(&filp->object_idr, handle[i]); - if (!obj) { - ret = -ENOENT; - break; - } + if (!obj) + goto err; + drm_gem_object_get(obj); objs[i] = obj; } + + spin_unlock(&filp->table_lock); + return 0; + +err: spin_unlock(&filp->table_lock); - return ret; + while (i--) + drm_gem_object_put(objs[i]); + + return -ENOENT; } /** @@ -828,24 +835,34 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, u32 *handles; int ret; + *objs_out = NULL; + if (!count) return 0; - objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), - GFP_KERNEL | __GFP_ZERO); + objs = kvmalloc_array(count, sizeof(*objs), GFP_KERNEL); if (!objs) return -ENOMEM; - *objs_out = objs; - handles = vmemdup_array_user(bo_handles, count, sizeof(u32)); - if (IS_ERR(handles)) - return PTR_ERR(handles); + if (IS_ERR(handles)) { + ret = PTR_ERR(handles); + goto err_free_objs; + } ret = objects_lookup(filp, handles, count, objs); - kvfree(handles); - return ret; + if (ret) + goto err_free_handles; + kvfree(handles); + *objs_out = objs; + return 0; + +err_free_handles: + kvfree(handles); +err_free_objs: + kvfree(objs); + return ret; } EXPORT_SYMBOL(drm_gem_objects_lookup); From 9d757669b2b22cd224c334924f798393ffca537c Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 20 Jan 2026 16:20:49 +0100 Subject: [PATCH 051/158] drm/nouveau/gsp: simplify code with acpi_get_local_u64_address() Now we have a helper so there's no need to open-code. Signed-off-by: Andy Shevchenko Link: https://patch.msgid.link/20260120152049.1763055-1-andriy.shevchenko@linux.intel.com Signed-off-by: Danilo Krummrich --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index 2a7e80c6d70f..c675324693af 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -796,7 +796,8 @@ r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode, struct acpi_object_list input = { 1, &mux_arg }; acpi_handle iter = NULL, handle_mux = NULL; acpi_status status; - unsigned long long value; + u64 value; + int ret; mode->status = 0xffff; part->status = 0xffff; @@ -806,8 +807,8 @@ r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode, if (ACPI_FAILURE(status) || !iter) return; - status = acpi_evaluate_integer(iter, "_ADR", NULL, &value); - if (ACPI_FAILURE(status) || value != id) + ret = acpi_get_local_u64_address(iter, &value); + if (ret || value != id) continue; handle_mux = iter; From b351df4bbd127f3fa80b06f1a0cd7ccfcded4f7a Mon Sep 17 00:00:00 2001 From: Marco Crivellari Date: Thu, 30 Oct 2025 17:20:41 +0100 Subject: [PATCH 052/158] drm/atomic-helper: replace use of system_unbound_wq with system_dfl_wq Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistency cannot be addressed without refactoring the API. system_unbound_wq should be the default workqueue so as not to enforce locality constraints for random work whenever it's not required. Adding system_dfl_wq to encourage its use when unbound work should be used. The old system_unbound_wq will be kept for a few release cycles. Suggested-by: Tejun Heo Signed-off-by: Marco Crivellari Reviewed-by: Thomas Zimmermann Signed-off-by: Thomas Zimmermann Link: https://patch.msgid.link/20251030162043.292468-2-marco.crivellari@suse.com --- drivers/gpu/drm/drm_atomic_helper.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index cc1f0c102414..d422f79b96db 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2301,13 +2301,13 @@ int drm_atomic_helper_commit(struct drm_device *dev, * current layout. * * NOTE: Commit work has multiple phases, first hardware commit, then - * cleanup. We want them to overlap, hence need system_unbound_wq to + * cleanup. We want them to overlap, hence need system_dfl_wq to * make sure work items don't artificially stall on each another. */ drm_atomic_state_get(state); if (nonblock) - queue_work(system_unbound_wq, &state->commit_work); + queue_work(system_dfl_wq, &state->commit_work); else commit_tail(state); @@ -2340,7 +2340,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); * * Asynchronous workers need to have sufficient parallelism to be able to run * different atomic commits on different CRTCs in parallel. The simplest way to - * achieve this is by running them on the &system_unbound_wq work queue. Note + * achieve this is by running them on the &system_dfl_wq work queue. Note * that drivers are not required to split up atomic commits and run an * individual commit in parallel - userspace is supposed to do that if it cares. * But it might be beneficial to do that for modesets, since those necessarily From ecae1bb8b0870cfa338c11083ef7ef02180e40be Mon Sep 17 00:00:00 2001 From: Marco Crivellari Date: Thu, 30 Oct 2025 17:20:42 +0100 Subject: [PATCH 053/158] drm/probe-helper: replace use of system_wq with system_percpu_wq Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistency cannot be addressed without refactoring the API. system_wq should be the per-cpu workqueue, yet in this name nothing makes that clear, so replace system_wq with system_percpu_wq. The old wq (system_wq) will be kept for a few release cycles. Suggested-by: Tejun Heo Signed-off-by: Marco Crivellari Reviewed-by: Thomas Zimmermann Signed-off-by: Thomas Zimmermann Link: https://patch.msgid.link/20251030162043.292468-3-marco.crivellari@suse.com --- drivers/gpu/drm/drm_probe_helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 09b12c30df69..d4dc8cb45bce 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -626,7 +626,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, */ dev->mode_config.delayed_event = true; if (dev->mode_config.poll_enabled) - mod_delayed_work(system_wq, + mod_delayed_work(system_percpu_wq, &dev->mode_config.output_poll_work, 0); } From 391ce961bcaf88fcd5a7d710f43c1806aa03ecfe Mon Sep 17 00:00:00 2001 From: Marco Crivellari Date: Thu, 30 Oct 2025 17:20:43 +0100 Subject: [PATCH 054/158] drm/self_refresh: replace use of system_wq with system_percpu_wq Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistency cannot be addressed without refactoring the API. system_wq should be the per-cpu workqueue, yet in this name nothing makes that clear, so replace system_wq with system_percpu_wq. The old wq (system_wq) will be kept for a few release cycles. Suggested-by: Tejun Heo Signed-off-by: Marco Crivellari Reviewed-by: Thomas Zimmermann Signed-off-by: Thomas Zimmermann Link: https://patch.msgid.link/20251030162043.292468-4-marco.crivellari@suse.com --- drivers/gpu/drm/drm_self_refresh_helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c index c0948586b7fd..36f4237efd4d 100644 --- a/drivers/gpu/drm/drm_self_refresh_helper.c +++ b/drivers/gpu/drm/drm_self_refresh_helper.c @@ -218,7 +218,7 @@ void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state) ewma_psr_time_read(&sr_data->exit_avg_ms)) * 2; mutex_unlock(&sr_data->avg_mutex); - mod_delayed_work(system_wq, &sr_data->entry_work, + mod_delayed_work(system_percpu_wq, &sr_data->entry_work, msecs_to_jiffies(delay)); } } From f2edbafc6cb905caec2b231e8e2226b4c75daad9 Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Sat, 7 Feb 2026 09:32:55 +0800 Subject: [PATCH 055/158] drm: verisilicon: suppress snprintf warning for pixel clock name Although it's generally expected that the pixel clock ID will only have one decimal digit, this isn't enforced in vs_dc.c source code, and the compiler will argue about the buffer being not long enough. Enlarge the snprintf() buffer for generating pixel clock name to be enough for a UINT_MAX pixel clock ID in order to suppress the compiler warning. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202602060154.ONBYvM9m-lkp@intel.com/ Signed-off-by: Icenowy Zheng Reviewed-by: Thomas Zimmermann Signed-off-by: Thomas Zimmermann Link: https://patch.msgid.link/20260207013255.2075294-1-zhengxingda@iscas.ac.cn --- drivers/gpu/drm/verisilicon/vs_dc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/verisilicon/vs_dc.c b/drivers/gpu/drm/verisilicon/vs_dc.c index ba1b3f261a3a..5f629d2d4bea 100644 --- a/drivers/gpu/drm/verisilicon/vs_dc.c +++ b/drivers/gpu/drm/verisilicon/vs_dc.c @@ -46,8 +46,8 @@ static int vs_dc_probe(struct platform_device *pdev) struct vs_dc *dc; void __iomem *regs; unsigned int port_count, i; - /* pix0/pix1 */ - char pixclk_name[5]; + /* pix%u */ + char pixclk_name[14]; int irq, ret; if (!dev->of_node) { From 62918542b7bf08860a60ebbde7654486e0ac0776 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 16 Jun 2025 16:59:52 +0100 Subject: [PATCH 056/158] dma-fence: Fix sparse warnings due __rcu annotations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit __rcu annotations on the return types from dma_fence_driver_name() and dma_fence_timeline_name() cause sparse to complain because both the constant signaled strings, and the strings return by the dma_fence_ops are not __rcu annotated. For a simple fix it is easiest to cast them with __rcu added and undo the smarts from the tracpoints side of things. There is no functional change since the rest is left in place. Later we can consider changing the dma_fence_ops return types too, and handle all the individual drivers which define them. Signed-off-by: Tvrtko Ursulin Fixes: 506aa8b02a8d ("dma-fence: Add safe access helpers and document the rules") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202506162214.1eA69hLe-lkp@intel.com/ Reviewed-by: Christian König Link: https://lore.kernel.org/r/20250616155952.24259-1-tvrtko.ursulin@igalia.com Signed-off-by: Christian König --- drivers/dma-buf/dma-fence.c | 8 ++++---- include/trace/events/dma_fence.h | 35 +++++--------------------------- 2 files changed, 9 insertions(+), 34 deletions(-) diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index c9a036b0d592..e05beae6e407 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -1133,9 +1133,9 @@ const char __rcu *dma_fence_driver_name(struct dma_fence *fence) "RCU protection is required for safe access to returned string"); if (!dma_fence_test_signaled_flag(fence)) - return fence->ops->get_driver_name(fence); + return (const char __rcu *)fence->ops->get_driver_name(fence); else - return "detached-driver"; + return (const char __rcu *)"detached-driver"; } EXPORT_SYMBOL(dma_fence_driver_name); @@ -1165,8 +1165,8 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence) "RCU protection is required for safe access to returned string"); if (!dma_fence_test_signaled_flag(fence)) - return fence->ops->get_timeline_name(fence); + return (const char __rcu *)fence->ops->get_driver_name(fence); else - return "signaled-timeline"; + return (const char __rcu *)"signaled-timeline"; } EXPORT_SYMBOL(dma_fence_timeline_name); diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h index 4814a65b68dc..3abba45c0601 100644 --- a/include/trace/events/dma_fence.h +++ b/include/trace/events/dma_fence.h @@ -9,37 +9,12 @@ struct dma_fence; -DECLARE_EVENT_CLASS(dma_fence, - - TP_PROTO(struct dma_fence *fence), - - TP_ARGS(fence), - - TP_STRUCT__entry( - __string(driver, dma_fence_driver_name(fence)) - __string(timeline, dma_fence_timeline_name(fence)) - __field(unsigned int, context) - __field(unsigned int, seqno) - ), - - TP_fast_assign( - __assign_str(driver); - __assign_str(timeline); - __entry->context = fence->context; - __entry->seqno = fence->seqno; - ), - - TP_printk("driver=%s timeline=%s context=%u seqno=%u", - __get_str(driver), __get_str(timeline), __entry->context, - __entry->seqno) -); - /* * Safe only for call sites which are guaranteed to not race with fence * signaling,holding the fence->lock and having checked for not signaled, or the * signaling path itself. */ -DECLARE_EVENT_CLASS(dma_fence_unsignaled, +DECLARE_EVENT_CLASS(dma_fence, TP_PROTO(struct dma_fence *fence), @@ -64,14 +39,14 @@ DECLARE_EVENT_CLASS(dma_fence_unsignaled, __entry->seqno) ); -DEFINE_EVENT(dma_fence_unsignaled, dma_fence_emit, +DEFINE_EVENT(dma_fence, dma_fence_emit, TP_PROTO(struct dma_fence *fence), TP_ARGS(fence) ); -DEFINE_EVENT(dma_fence_unsignaled, dma_fence_init, +DEFINE_EVENT(dma_fence, dma_fence_init, TP_PROTO(struct dma_fence *fence), @@ -85,14 +60,14 @@ DEFINE_EVENT(dma_fence, dma_fence_destroy, TP_ARGS(fence) ); -DEFINE_EVENT(dma_fence_unsignaled, dma_fence_enable_signal, +DEFINE_EVENT(dma_fence, dma_fence_enable_signal, TP_PROTO(struct dma_fence *fence), TP_ARGS(fence) ); -DEFINE_EVENT(dma_fence_unsignaled, dma_fence_signaled, +DEFINE_EVENT(dma_fence, dma_fence_signaled, TP_PROTO(struct dma_fence *fence), From 310326bb7df4bba094a3fc60364c641c547fd923 Mon Sep 17 00:00:00 2001 From: Li Chen Date: Wed, 21 Jan 2026 19:36:44 +0800 Subject: [PATCH 057/158] nouveau: pci: quiesce GPU on shutdown Kexec reboot does not reset PCI devices. Invoking the full DRM/TTM teardown from ->shutdown can trigger WARNs when userspace still holds DRM file descriptors. Quiesce the GPU through the suspend path and then power down the PCI function so the next kernel can re-initialize the device from a consistent state. WARNING: drivers/gpu/drm/drm_mode_config.c:578 at drm_mode_config_cleanup+0x2e7/0x300, CPU#2: kexec/1300 Call Trace: ? srso_return_thunk+0x5/0x5f ? enable_work+0x3a/0x100 nouveau_display_destroy+0x39/0x70 [nouveau c19e0da7fd83583a023f855c510d9a3903808734] nouveau_drm_device_fini+0x7b/0x1f0 [nouveau c19e0da7fd83583a023f855c510d9a3903808734] nouveau_drm_shutdown+0x52/0xc0 [nouveau c19e0da7fd83583a023f855c510d9a3903808734] pci_device_shutdown+0x35/0x60 device_shutdown+0x11c/0x1b0 kernel_kexec+0x13a/0x160 __do_sys_reboot+0x209/0x240 do_syscall_64+0x81/0x610 ? srso_return_thunk+0x5/0x5f ? __rtnl_unlock+0x37/0x70 ? srso_return_thunk+0x5/0x5f ? netdev_run_todo+0x63/0x570 ? netif_change_flags+0x54/0x70 ? srso_return_thunk+0x5/0x5f ? devinet_ioctl+0x1e5/0x790 ? srso_return_thunk+0x5/0x5f ? inet_ioctl+0x1e9/0x200 ? srso_return_thunk+0x5/0x5f ? srso_return_thunk+0x5/0x5f ? sock_do_ioctl+0x7d/0x130 ? srso_return_thunk+0x5/0x5f ? __x64_sys_ioctl+0x97/0xe0 ? srso_return_thunk+0x5/0x5f ? srso_return_thunk+0x5/0x5f ? do_syscall_64+0x23b/0x610 ? srso_return_thunk+0x5/0x5f ? put_user_ifreq+0x7a/0x90 ? srso_return_thunk+0x5/0x5f ? sock_do_ioctl+0x107/0x130 ? srso_return_thunk+0x5/0x5f ? __x64_sys_ioctl+0x97/0xe0 ? srso_return_thunk+0x5/0x5f ? do_syscall_64+0x81/0x610 ? srso_return_thunk+0x5/0x5f ? exc_page_fault+0x7e/0x1a0 entry_SYSCALL_64_after_hwframe+0x76/0x7e nouveau 0000:26:00.0: [drm] drm_WARN_ON(!list_empty(&fb->filp_head)) WARNING: drivers/gpu/drm/drm_framebuffer.c:833 at drm_framebuffer_free+0x73/0xa0, CPU#2: kexec/1300 Call Trace: drm_mode_config_cleanup+0x248/0x300 ? __pfx___drm_printfn_dbg+0x10/0x10 ? drm_mode_config_cleanup+0x1dc/0x300 nouveau_display_destroy+0x39/0x70 [nouveau c19e0da7fd83583a023f855c510d9a3903808734] nouveau_drm_device_fini+0x7b/0x1f0 [nouveau c19e0da7fd83583a023f855c510d9a3903808734] nouveau_drm_shutdown+0x52/0xc0 [nouveau c19e0da7fd83583a023f855c510d9a3903808734] pci_device_shutdown+0x35/0x60 device_shutdown+0x11c/0x1b0 kernel_kexec+0x13a/0x160 __do_sys_reboot+0x209/0x240 do_syscall_64+0x81/0x610 ? srso_return_thunk+0x5/0x5f ? __rtnl_unlock+0x37/0x70 ? srso_return_thunk+0x5/0x5f ? netdev_run_todo+0x63/0x570 ? netif_change_flags+0x54/0x70 ? srso_return_thunk+0x5/0x5f ? devinet_ioctl+0x1e5/0x790 ? srso_return_thunk+0x5/0x5f ? inet_ioctl+0x1e9/0x200 ? srso_return_thunk+0x5/0x5f ? srso_return_thunk+0x5/0x5f ? sock_do_ioctl+0x7d/0x130 ? srso_return_thunk+0x5/0x5f ? __x64_sys_ioctl+0x97/0xe0 ? srso_return_thunk+0x5/0x5f ? srso_return_thunk+0x5/0x5f ? do_syscall_64+0x23b/0x610 ? srso_return_thunk+0x5/0x5f ? put_user_ifreq+0x7a/0x90 ? srso_return_thunk+0x5/0x5f ? sock_do_ioctl+0x107/0x130 ? srso_return_thunk+0x5/0x5f ? __x64_sys_ioctl+0x97/0xe0 ? srso_return_thunk+0x5/0x5f ? do_syscall_64+0x81/0x610 ? srso_return_thunk+0x5/0x5f ? exc_page_fault+0x7e/0x1a0 entry_SYSCALL_64_after_hwframe+0x76/0x7e WARNING: include/drm/ttm/ttm_resource.h:406 at nouveau_ttm_fini+0x257/0x270 [nouveau], CPU#2: kexec/1300 Call Trace: nouveau_drm_device_fini+0x93/0x1f0 [nouveau c19e0da7fd83583a023f855c510d9a3903808734] nouveau_drm_shutdown+0x52/0xc0 [nouveau c19e0da7fd83583a023f855c510d9a3903808734] pci_device_shutdown+0x35/0x60 device_shutdown+0x11c/0x1b0 kernel_kexec+0x13a/0x160 __do_sys_reboot+0x209/0x240 do_syscall_64+0x81/0x610 ? srso_return_thunk+0x5/0x5f ? __rtnl_unlock+0x37/0x70 ? srso_return_thunk+0x5/0x5f ? netdev_run_todo+0x63/0x570 ? netif_change_flags+0x54/0x70 ? srso_return_thunk+0x5/0x5f ? devinet_ioctl+0x1e5/0x790 ? srso_return_thunk+0x5/0x5f ? inet_ioctl+0x1e9/0x200 ? srso_return_thunk+0x5/0x5f ? srso_return_thunk+0x5/0x5f ? sock_do_ioctl+0x7d/0x130 ? srso_return_thunk+0x5/0x5f ? __x64_sys_ioctl+0x97/0xe0 ? srso_return_thunk+0x5/0x5f ? srso_return_thunk+0x5/0x5f ? do_syscall_64+0x23b/0x610 ? srso_return_thunk+0x5/0x5f ? put_user_ifreq+0x7a/0x90 ? srso_return_thunk+0x5/0x5f ? sock_do_ioctl+0x107/0x130 ? srso_return_thunk+0x5/0x5f ? __x64_sys_ioctl+0x97/0xe0 ? srso_return_thunk+0x5/0x5f ? do_syscall_64+0x81/0x610 ? srso_return_thunk+0x5/0x5f ? exc_page_fault+0x7e/0x1a0 entry_SYSCALL_64_after_hwframe+0x76/0x7e Signed-off-by: Li Chen Reviewed-by: Dave Airlie Signed-off-by: Dave Airlie Link: https://patch.msgid.link/20260121113646.111561-1-me@linux.beauty --- drivers/gpu/drm/nouveau/nouveau_drm.c | 32 +++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 1527b801f013..f2e04a048ac2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1079,6 +1079,37 @@ nouveau_pmops_resume(struct device *dev) return ret; } +static void +nouveau_drm_shutdown(struct pci_dev *pdev) +{ + struct nouveau_drm *drm = pci_get_drvdata(pdev); + int ret; + + if (!drm) + return; + + if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF || + drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) + return; + + ret = nouveau_do_suspend(drm, false); + if (ret) + NV_ERROR(drm, "shutdown suspend failed with: %d\n", ret); + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + /* + * This is just to give the pci power transition time to settle + * before an immediate kexec jump. it’s mirroring the existing + * nouveau_pmops_suspend() behavior, which already does + * udelay(200) right after pci_set_power_state(..., pci_d3hot). In + * ->shutdown() we’re allowed to sleep, so I used usleep_range() + * instead of a busy-wait udelay(). + */ + usleep_range(200, 400); +} + static int nouveau_pmops_freeze(struct device *dev) { @@ -1408,6 +1439,7 @@ nouveau_drm_pci_driver = { .id_table = nouveau_drm_pci_table, .probe = nouveau_drm_probe, .remove = nouveau_drm_remove, + .shutdown = nouveau_drm_shutdown, .driver.pm = &nouveau_pm_ops, }; From 24a4241995ab7456c6751e0bd63382a95e70757f Mon Sep 17 00:00:00 2001 From: Chaitanya Kumar Borah Date: Mon, 2 Feb 2026 15:11:54 +0530 Subject: [PATCH 058/158] drm/colorop: Add destroy helper for colorop objects Add a helper that performs common cleanup and frees the associated object. This can be used by drivers if they do not require any driver-specific teardown. v2: - Add function documentation only before definition (Jani) Signed-off-by: Chaitanya Kumar Borah Reviewed-by: Suraj Kandpal Reviewed-by: Uma Shankar Reviewed-by: Alex Hung Acked-by: Jani Nikula Signed-off-by: Suraj Kandpal Link: https://patch.msgid.link/20260202094202.2871478-2-chaitanya.kumar.borah@intel.com --- drivers/gpu/drm/drm_colorop.c | 15 +++++++++++++++ include/drm/drm_colorop.h | 2 ++ 2 files changed, 17 insertions(+) diff --git a/drivers/gpu/drm/drm_colorop.c b/drivers/gpu/drm/drm_colorop.c index 44eb823585d2..c226870fde9e 100644 --- a/drivers/gpu/drm/drm_colorop.c +++ b/drivers/gpu/drm/drm_colorop.c @@ -178,6 +178,21 @@ void drm_colorop_cleanup(struct drm_colorop *colorop) } EXPORT_SYMBOL(drm_colorop_cleanup); +/** + * drm_colorop_destroy - destroy colorop + * @colorop: drm colorop + * + * Destroys @colorop by performing common DRM cleanup and freeing the + * colorop object. This can be used by drivers if they do not + * require any driver-specific teardown. + */ +void drm_colorop_destroy(struct drm_colorop *colorop) +{ + drm_colorop_cleanup(colorop); + kfree(colorop); +} +EXPORT_SYMBOL(drm_colorop_destroy); + /** * drm_colorop_pipeline_destroy - Helper for color pipeline destruction * diff --git a/include/drm/drm_colorop.h b/include/drm/drm_colorop.h index a3a32f9f918c..3056f3f02597 100644 --- a/include/drm/drm_colorop.h +++ b/include/drm/drm_colorop.h @@ -420,6 +420,8 @@ void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop, */ void drm_colorop_reset(struct drm_colorop *colorop); +void drm_colorop_destroy(struct drm_colorop *colorop); + /** * drm_colorop_index - find the index of a registered colorop * @colorop: colorop to find index for From 2864667476a40525511a1e854bcfa7c90392a990 Mon Sep 17 00:00:00 2001 From: Chaitanya Kumar Borah Date: Mon, 2 Feb 2026 15:11:55 +0530 Subject: [PATCH 059/158] drm: Allow driver-managed destruction of colorop objects Some drivers might want to embed struct drm_colorop inside driver-specific objects, similar to planes or CRTCs. In such cases, freeing only the drm_colorop is incorrect. Add a drm_colorop_funcs callback to allow drivers to provide a destroy hook that cleans up the full enclosing object. Make changes in helper functions to accept helper functions as argument. Pass NULL for now to retain current behavior. Signed-off-by: Chaitanya Kumar Borah Reviewed-by: Suraj Kandpal Reviewed-by: Uma Shankar Reviewed-by: Alex Hung Acked-by: Jani Nikula Signed-off-by: Suraj Kandpal Link: https://patch.msgid.link/20260202094202.2871478-3-chaitanya.kumar.borah@intel.com --- .../amd/display/amdgpu_dm/amdgpu_dm_colorop.c | 18 ++++++----- drivers/gpu/drm/drm_colorop.c | 31 +++++++++++++------ .../drm/i915/display/intel_color_pipeline.c | 8 ++--- drivers/gpu/drm/vkms/vkms_colorop.c | 10 +++--- include/drm/drm_colorop.h | 30 +++++++++++++++--- 5 files changed, 66 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c index a2de3bba8346..dfdb4fb4219f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c @@ -72,7 +72,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, NULL, amdgpu_dm_supported_degam_tfs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) @@ -89,7 +89,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_mult_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + ret = drm_plane_colorop_mult_init(dev, ops[i], plane, NULL, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) goto cleanup; @@ -104,7 +104,8 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, NULL, + DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) goto cleanup; @@ -120,7 +121,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, NULL, amdgpu_dm_supported_shaper_tfs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) @@ -137,7 +138,8 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES, + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, NULL, + MAX_COLOR_LUT_ENTRIES, DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) @@ -154,7 +156,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, LUT3D_SIZE, + ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, NULL, LUT3D_SIZE, DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) @@ -172,7 +174,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, NULL, amdgpu_dm_supported_blnd_tfs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) @@ -189,7 +191,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES, + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, NULL, MAX_COLOR_LUT_ENTRIES, DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) diff --git a/drivers/gpu/drm/drm_colorop.c b/drivers/gpu/drm/drm_colorop.c index c226870fde9e..2bce29176ab3 100644 --- a/drivers/gpu/drm/drm_colorop.c +++ b/drivers/gpu/drm/drm_colorop.c @@ -93,7 +93,8 @@ static const struct drm_prop_enum_list drm_colorop_lut3d_interpolation_list[] = /* Init Helpers */ static int drm_plane_colorop_init(struct drm_device *dev, struct drm_colorop *colorop, - struct drm_plane *plane, enum drm_colorop_type type, + struct drm_plane *plane, const struct drm_colorop_funcs *funcs, + enum drm_colorop_type type, uint32_t flags) { struct drm_mode_config *config = &dev->mode_config; @@ -109,6 +110,7 @@ static int drm_plane_colorop_init(struct drm_device *dev, struct drm_colorop *co colorop->type = type; colorop->plane = plane; colorop->next = NULL; + colorop->funcs = funcs; list_add_tail(&colorop->head, &config->colorop_list); colorop->index = config->num_colorop++; @@ -218,6 +220,7 @@ EXPORT_SYMBOL(drm_colorop_pipeline_destroy); * @dev: DRM device * @colorop: The drm_colorop object to initialize * @plane: The associated drm_plane + * @funcs: control functions for the new colorop * @supported_tfs: A bitfield of supported drm_plane_colorop_curve_1d_init enum values, * created using BIT(curve_type) and combined with the OR '|' * operator. @@ -225,7 +228,8 @@ EXPORT_SYMBOL(drm_colorop_pipeline_destroy); * @return zero on success, -E value on failure */ int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop, - struct drm_plane *plane, u64 supported_tfs, uint32_t flags) + struct drm_plane *plane, const struct drm_colorop_funcs *funcs, + u64 supported_tfs, uint32_t flags) { struct drm_prop_enum_list enum_list[DRM_COLOROP_1D_CURVE_COUNT]; int i, len; @@ -246,7 +250,7 @@ int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop * return -EINVAL; } - ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_CURVE, flags); + ret = drm_plane_colorop_init(dev, colorop, plane, funcs, DRM_COLOROP_1D_CURVE, flags); if (ret) return ret; @@ -303,20 +307,23 @@ static int drm_colorop_create_data_prop(struct drm_device *dev, struct drm_color * @dev: DRM device * @colorop: The drm_colorop object to initialize * @plane: The associated drm_plane + * @funcs: control functions for new colorop * @lut_size: LUT size supported by driver * @interpolation: 1D LUT interpolation type * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. * @return zero on success, -E value on failure */ int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop, - struct drm_plane *plane, uint32_t lut_size, + struct drm_plane *plane, + const struct drm_colorop_funcs *funcs, + uint32_t lut_size, enum drm_colorop_lut1d_interpolation_type interpolation, uint32_t flags) { struct drm_property *prop; int ret; - ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_LUT, flags); + ret = drm_plane_colorop_init(dev, colorop, plane, funcs, DRM_COLOROP_1D_LUT, flags); if (ret) return ret; @@ -354,11 +361,12 @@ int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_color EXPORT_SYMBOL(drm_plane_colorop_curve_1d_lut_init); int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop, - struct drm_plane *plane, uint32_t flags) + struct drm_plane *plane, const struct drm_colorop_funcs *funcs, + uint32_t flags) { int ret; - ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_CTM_3X4, flags); + ret = drm_plane_colorop_init(dev, colorop, plane, funcs, DRM_COLOROP_CTM_3X4, flags); if (ret) return ret; @@ -378,16 +386,18 @@ EXPORT_SYMBOL(drm_plane_colorop_ctm_3x4_init); * @dev: DRM device * @colorop: The drm_colorop object to initialize * @plane: The associated drm_plane + * @funcs: control functions for the new colorop * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. * @return zero on success, -E value on failure */ int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop, - struct drm_plane *plane, uint32_t flags) + struct drm_plane *plane, const struct drm_colorop_funcs *funcs, + uint32_t flags) { struct drm_property *prop; int ret; - ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_MULTIPLIER, flags); + ret = drm_plane_colorop_init(dev, colorop, plane, funcs, DRM_COLOROP_MULTIPLIER, flags); if (ret) return ret; @@ -406,6 +416,7 @@ EXPORT_SYMBOL(drm_plane_colorop_mult_init); int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop, struct drm_plane *plane, + const struct drm_colorop_funcs *funcs, uint32_t lut_size, enum drm_colorop_lut3d_interpolation_type interpolation, uint32_t flags) @@ -413,7 +424,7 @@ int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *col struct drm_property *prop; int ret; - ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_3D_LUT, flags); + ret = drm_plane_colorop_init(dev, colorop, plane, funcs, DRM_COLOROP_3D_LUT, flags); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_color_pipeline.c b/drivers/gpu/drm/i915/display/intel_color_pipeline.c index 04af552b3648..d3d73d60727c 100644 --- a/drivers/gpu/drm/i915/display/intel_color_pipeline.c +++ b/drivers/gpu/drm/i915/display/intel_color_pipeline.c @@ -25,7 +25,7 @@ int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_en colorop = intel_colorop_create(INTEL_PLANE_CB_PRE_CSC_LUT); - ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, + ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, NULL, PLANE_DEGAMMA_SIZE, DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, DRM_COLOROP_FLAG_ALLOW_BYPASS); @@ -39,7 +39,7 @@ int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_en prev_op = &colorop->base; colorop = intel_colorop_create(INTEL_PLANE_CB_CSC); - ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane, + ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane, NULL, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) return ret; @@ -52,7 +52,7 @@ int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_en plane->type == DRM_PLANE_TYPE_PRIMARY) { colorop = intel_colorop_create(INTEL_PLANE_CB_3DLUT); - ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane, 17, + ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane, NULL, 17, DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, true); if (ret) @@ -64,7 +64,7 @@ int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_en } colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT); - ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, + ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, NULL, PLANE_GAMMA_SIZE, DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, DRM_COLOROP_FLAG_ALLOW_BYPASS); diff --git a/drivers/gpu/drm/vkms/vkms_colorop.c b/drivers/gpu/drm/vkms/vkms_colorop.c index d03a1f2e9c41..9e9dd0494628 100644 --- a/drivers/gpu/drm/vkms/vkms_colorop.c +++ b/drivers/gpu/drm/vkms/vkms_colorop.c @@ -31,7 +31,7 @@ static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, supported_tfs, + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, NULL, supported_tfs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) goto cleanup; @@ -48,7 +48,8 @@ static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, NULL, + DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) goto cleanup; @@ -64,7 +65,8 @@ static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, NULL, + DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) goto cleanup; @@ -80,7 +82,7 @@ static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, supported_tfs, + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, NULL, supported_tfs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) goto cleanup; diff --git a/include/drm/drm_colorop.h b/include/drm/drm_colorop.h index 3056f3f02597..bd082854ca74 100644 --- a/include/drm/drm_colorop.h +++ b/include/drm/drm_colorop.h @@ -187,6 +187,19 @@ struct drm_colorop_state { struct drm_atomic_state *state; }; +/** + * struct drm_colorop_funcs - driver colorop control functions + */ +struct drm_colorop_funcs { + /** + * @destroy: + * + * Clean up colorop resources. This is called at driver unload time + * through drm_mode_config_cleanup() + */ + void (*destroy)(struct drm_colorop *colorop); +}; + /** * struct drm_colorop - DRM color operation control structure * @@ -362,6 +375,8 @@ struct drm_colorop { */ struct drm_property *next_property; + /** @funcs: colorop control functions */ + const struct drm_colorop_funcs *funcs; }; #define obj_to_colorop(x) container_of(x, struct drm_colorop, base) @@ -390,17 +405,22 @@ void drm_colorop_pipeline_destroy(struct drm_device *dev); void drm_colorop_cleanup(struct drm_colorop *colorop); int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop, - struct drm_plane *plane, u64 supported_tfs, uint32_t flags); + struct drm_plane *plane, const struct drm_colorop_funcs *funcs, + u64 supported_tfs, uint32_t flags); int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop, - struct drm_plane *plane, uint32_t lut_size, + struct drm_plane *plane, + const struct drm_colorop_funcs *funcs, + uint32_t lut_size, enum drm_colorop_lut1d_interpolation_type interpolation, uint32_t flags); int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop, - struct drm_plane *plane, uint32_t flags); + struct drm_plane *plane, const struct drm_colorop_funcs *funcs, + uint32_t flags); int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop, - struct drm_plane *plane, uint32_t flags); + struct drm_plane *plane, const struct drm_colorop_funcs *funcs, + uint32_t flags); int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop, - struct drm_plane *plane, + struct drm_plane *plane, const struct drm_colorop_funcs *funcs, uint32_t lut_size, enum drm_colorop_lut3d_interpolation_type interpolation, uint32_t flags); From 3f85dd9b8a2a2ea1599e4e16f57aeb693a059c55 Mon Sep 17 00:00:00 2001 From: Chaitanya Kumar Borah Date: Mon, 2 Feb 2026 15:11:56 +0530 Subject: [PATCH 060/158] drm/amd/display: Hook up colorop destroy helper for plane pipelines Provide a drm_colorop_funcs instance for amdgpu_dm color pipeline objects and hook up the common drm_colorop_destroy() helper as the destroy callback. Signed-off-by: Chaitanya Kumar Borah Reviewed-by: Uma Shankar Reviewed-by: Alex Hung Acked-by: Jani Nikula Signed-off-by: Suraj Kandpal Link: https://patch.msgid.link/20260202094202.2871478-4-chaitanya.kumar.borah@intel.com --- .../amd/display/amdgpu_dm/amdgpu_dm_colorop.c | 25 +++++++++++++------ 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c index dfdb4fb4219f..5130962193d9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c @@ -55,6 +55,10 @@ const u64 amdgpu_dm_supported_blnd_tfs = #define LUT3D_SIZE 17 +static const struct drm_colorop_funcs dm_colorop_funcs = { + .destroy = drm_colorop_destroy, +}; + int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list) { struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS]; @@ -72,7 +76,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, NULL, + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, &dm_colorop_funcs, amdgpu_dm_supported_degam_tfs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) @@ -89,7 +93,8 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_mult_init(dev, ops[i], plane, NULL, DRM_COLOROP_FLAG_ALLOW_BYPASS); + ret = drm_plane_colorop_mult_init(dev, ops[i], plane, &dm_colorop_funcs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) goto cleanup; @@ -104,7 +109,8 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, NULL, + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, + &dm_colorop_funcs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) goto cleanup; @@ -121,7 +127,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, NULL, + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, &dm_colorop_funcs, amdgpu_dm_supported_shaper_tfs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) @@ -138,7 +144,8 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, NULL, + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, + &dm_colorop_funcs, MAX_COLOR_LUT_ENTRIES, DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, DRM_COLOROP_FLAG_ALLOW_BYPASS); @@ -156,7 +163,8 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, NULL, LUT3D_SIZE, + ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, + &dm_colorop_funcs, LUT3D_SIZE, DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) @@ -174,7 +182,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, NULL, + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, &dm_colorop_funcs, amdgpu_dm_supported_blnd_tfs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) @@ -191,7 +199,8 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, NULL, MAX_COLOR_LUT_ENTRIES, + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, &dm_colorop_funcs, + MAX_COLOR_LUT_ENTRIES, DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) From f3d51fbe48e0f94a1981fbe3c5121dcac58c4f98 Mon Sep 17 00:00:00 2001 From: Chaitanya Kumar Borah Date: Mon, 2 Feb 2026 15:11:57 +0530 Subject: [PATCH 061/158] drm/vkms: Hook up colorop destroy helper for plane pipelines Provide a drm_colorop_funcs instance for vkms color pipeline objects and hook up the common drm_colorop_destroy() helper as the destroy callback. Signed-off-by: Chaitanya Kumar Borah Reviewed-by: Uma Shankar Reviewed-by: Alex Hung Reviewed-by: Louis Chauvet Acked-by: Jani Nikula Signed-off-by: Suraj Kandpal Link: https://patch.msgid.link/20260202094202.2871478-5-chaitanya.kumar.borah@intel.com --- drivers/gpu/drm/vkms/vkms_colorop.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/vkms/vkms_colorop.c b/drivers/gpu/drm/vkms/vkms_colorop.c index 9e9dd0494628..ba826ad384b7 100644 --- a/drivers/gpu/drm/vkms/vkms_colorop.c +++ b/drivers/gpu/drm/vkms/vkms_colorop.c @@ -12,6 +12,10 @@ static const u64 supported_tfs = BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF); +static const struct drm_colorop_funcs vkms_colorop_funcs = { + .destroy = drm_colorop_destroy, +}; + #define MAX_COLOR_PIPELINE_OPS 4 static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list) @@ -31,7 +35,8 @@ static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, NULL, supported_tfs, + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, &vkms_colorop_funcs, + supported_tfs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) goto cleanup; @@ -48,7 +53,7 @@ static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, NULL, + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, &vkms_colorop_funcs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) goto cleanup; @@ -65,7 +70,7 @@ static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, NULL, + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, &vkms_colorop_funcs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) goto cleanup; @@ -82,7 +87,8 @@ static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_pr goto cleanup; } - ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, NULL, supported_tfs, + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, &vkms_colorop_funcs, + supported_tfs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) goto cleanup; From a6e1c068be198ae37af478ccd852bd1a15abc8ed Mon Sep 17 00:00:00 2001 From: Chaitanya Kumar Borah Date: Mon, 2 Feb 2026 15:11:58 +0530 Subject: [PATCH 062/158] drm/i915/display: Hook up intel_colorop_destroy i915 embeds struct drm_colorop inside struct intel_colorop, so the default drm_colorop_destroy() helper cannot be used. Add an intel_colorop_destroy() helper that performs common DRM cleanup and frees intel_colorop object. This ensures correct teardown of plane color pipeline objects. Signed-off-by: Chaitanya Kumar Borah Reviewed-by: Suraj Kandpal Reviewed-by: Uma Shankar Acked-by: Jani Nikula Signed-off-by: Suraj Kandpal Link: https://patch.msgid.link/20260202094202.2871478-6-chaitanya.kumar.borah@intel.com --- drivers/gpu/drm/i915/display/intel_color_pipeline.c | 13 +++++++++---- drivers/gpu/drm/i915/display/intel_colorop.c | 6 ++++++ drivers/gpu/drm/i915/display/intel_colorop.h | 1 + 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_color_pipeline.c b/drivers/gpu/drm/i915/display/intel_color_pipeline.c index d3d73d60727c..8fecc53540ba 100644 --- a/drivers/gpu/drm/i915/display/intel_color_pipeline.c +++ b/drivers/gpu/drm/i915/display/intel_color_pipeline.c @@ -13,6 +13,10 @@ #define PLANE_DEGAMMA_SIZE 128 #define PLANE_GAMMA_SIZE 32 +static const struct drm_colorop_funcs intel_colorop_funcs = { + .destroy = intel_colorop_destroy, +}; + static int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_enum_list *list, enum pipe pipe) @@ -25,7 +29,7 @@ int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_en colorop = intel_colorop_create(INTEL_PLANE_CB_PRE_CSC_LUT); - ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, NULL, + ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, &intel_colorop_funcs, PLANE_DEGAMMA_SIZE, DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, DRM_COLOROP_FLAG_ALLOW_BYPASS); @@ -39,7 +43,7 @@ int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_en prev_op = &colorop->base; colorop = intel_colorop_create(INTEL_PLANE_CB_CSC); - ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane, NULL, + ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane, &intel_colorop_funcs, DRM_COLOROP_FLAG_ALLOW_BYPASS); if (ret) return ret; @@ -52,7 +56,8 @@ int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_en plane->type == DRM_PLANE_TYPE_PRIMARY) { colorop = intel_colorop_create(INTEL_PLANE_CB_3DLUT); - ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane, NULL, 17, + ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane, + &intel_colorop_funcs, 17, DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, true); if (ret) @@ -64,7 +69,7 @@ int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_en } colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT); - ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, NULL, + ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, &intel_colorop_funcs, PLANE_GAMMA_SIZE, DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, DRM_COLOROP_FLAG_ALLOW_BYPASS); diff --git a/drivers/gpu/drm/i915/display/intel_colorop.c b/drivers/gpu/drm/i915/display/intel_colorop.c index 1d84933f05aa..9e54f51cfad8 100644 --- a/drivers/gpu/drm/i915/display/intel_colorop.c +++ b/drivers/gpu/drm/i915/display/intel_colorop.c @@ -35,3 +35,9 @@ struct intel_colorop *intel_colorop_create(enum intel_color_block id) return colorop; } + +void intel_colorop_destroy(struct drm_colorop *colorop) +{ + drm_colorop_cleanup(colorop); + kfree(to_intel_colorop(colorop)); +} diff --git a/drivers/gpu/drm/i915/display/intel_colorop.h b/drivers/gpu/drm/i915/display/intel_colorop.h index 9276eee6e75a..638baf67d98d 100644 --- a/drivers/gpu/drm/i915/display/intel_colorop.h +++ b/drivers/gpu/drm/i915/display/intel_colorop.h @@ -13,5 +13,6 @@ struct intel_colorop; struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop); struct intel_colorop *intel_colorop_alloc(void); struct intel_colorop *intel_colorop_create(enum intel_color_block id); +void intel_colorop_destroy(struct drm_colorop *colorop); #endif /* __INTEL_COLOROP_H__ */ From fa15259eb65944551ad52988e5822dca0a21b090 Mon Sep 17 00:00:00 2001 From: Chaitanya Kumar Borah Date: Mon, 2 Feb 2026 15:11:59 +0530 Subject: [PATCH 063/158] drm: Clean up colorop objects during mode_config cleanup Tear down all registered drm_colorop objects during drm_mode_config_cleanup() by invoking their destroy callbacks. This ensures proper cleanup of color pipeline objects during DRM device removal. Signed-off-by: Chaitanya Kumar Borah Reviewed-by: Suraj Kandpal Reviewed-by: Uma Shankar Reviewed-by: Alex Hung Acked-by: Jani Nikula Signed-off-by: Suraj Kandpal Link: https://patch.msgid.link/20260202094202.2871478-7-chaitanya.kumar.borah@intel.com --- drivers/gpu/drm/drm_mode_config.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index d12db9b0bab8..84ae8a23a367 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -524,6 +524,7 @@ void drm_mode_config_cleanup(struct drm_device *dev) struct drm_property *property, *pt; struct drm_property_blob *blob, *bt; struct drm_plane *plane, *plt; + struct drm_colorop *colorop, *copt; list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list, head) { @@ -553,6 +554,11 @@ void drm_mode_config_cleanup(struct drm_device *dev) drm_property_destroy(dev, property); } + list_for_each_entry_safe(colorop, copt, &dev->mode_config.colorop_list, + head) { + colorop->funcs->destroy(colorop); + } + list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list, head) { plane->funcs->destroy(plane); From fe057ba431868a926fa54f28a0d2891aea6f0584 Mon Sep 17 00:00:00 2001 From: Chaitanya Kumar Borah Date: Mon, 2 Feb 2026 15:12:00 +0530 Subject: [PATCH 064/158] drm/vkms: Remove drm_colorop_pipeline_destroy() from vkms_destroy() Now that colorops are cleaned from drm_mode_config_cleanup(), remove drm_colorop_pipeline_destroy() from vkms_destroy(). Signed-off-by: Chaitanya Kumar Borah Reviewed-by: Uma Shankar Reviewed-by: Louis Chauvet Acked-by: Jani Nikula Signed-off-by: Suraj Kandpal Link: https://patch.msgid.link/20260202094202.2871478-8-chaitanya.kumar.borah@intel.com --- drivers/gpu/drm/vkms/vkms_drv.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 434c295f44ba..95020765c4c2 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -259,7 +259,6 @@ void vkms_destroy(struct vkms_config *config) fdev = config->dev->faux_dev; - drm_colorop_pipeline_destroy(&config->dev->drm); drm_dev_unregister(&config->dev->drm); drm_atomic_helper_shutdown(&config->dev->drm); devres_release_group(&fdev->dev, NULL); From 3c2d28f4a67af7ada8f3332270b0d349967e6aa1 Mon Sep 17 00:00:00 2001 From: Chaitanya Kumar Borah Date: Mon, 2 Feb 2026 15:12:01 +0530 Subject: [PATCH 065/158] drm/colorop: Use destroy callback for color pipeline teardown Switch drm_colorop_pipeline_destroy() to use the driver-provided destroy callback instead of directly calling drm_colorop_cleanup() and freeing the object. This allows drivers that embed struct drm_colorop in driver-specific objects to perform correct teardown. Signed-off-by: Chaitanya Kumar Borah Reviewed-by: Suraj Kandpal Reviewed-by: Uma Shankar Reviewed-by: Alex Hung Acked-by: Jani Nikula Signed-off-by: Suraj Kandpal Link: https://patch.msgid.link/20260202094202.2871478-9-chaitanya.kumar.borah@intel.com --- drivers/gpu/drm/drm_colorop.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_colorop.c b/drivers/gpu/drm/drm_colorop.c index 2bce29176ab3..aa19de769eb2 100644 --- a/drivers/gpu/drm/drm_colorop.c +++ b/drivers/gpu/drm/drm_colorop.c @@ -208,8 +208,7 @@ void drm_colorop_pipeline_destroy(struct drm_device *dev) struct drm_colorop *colorop, *next; list_for_each_entry_safe(colorop, next, &config->colorop_list, head) { - drm_colorop_cleanup(colorop); - kfree(colorop); + colorop->funcs->destroy(colorop); } } EXPORT_SYMBOL(drm_colorop_pipeline_destroy); From 55c19e27c56d7843afd08634114d510f67f75370 Mon Sep 17 00:00:00 2001 From: Chaitanya Kumar Borah Date: Mon, 2 Feb 2026 15:12:02 +0530 Subject: [PATCH 066/158] drm/i915/color: Add failure handling in plane color pipeline init The plane color pipeline initialization built up multiple colorop blocks inline, but did not reliably clean up partially constructed pipelines when an intermediate step failed. This could lead to leaked colorop objects and fragile error handling as the pipeline grows. Refactor the pipeline construction to use a common helper for adding colorop blocks. This centralizes allocation, initialization, and teardown logic, allowing the caller to reliably unwind all previously created colorops on failure. v2: - Refactor code to avoid repetition (Suraj) v3: - s/nvl/xe3plpd (Suraj) Signed-off-by: Chaitanya Kumar Borah Reviewed-by: Suraj Kandpal Acked-by: Jani Nikula Signed-off-by: Suraj Kandpal Link: https://patch.msgid.link/20260202094202.2871478-10-chaitanya.kumar.borah@intel.com --- .../drm/i915/display/intel_color_pipeline.c | 172 ++++++++++++------ 1 file changed, 121 insertions(+), 51 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_color_pipeline.c b/drivers/gpu/drm/i915/display/intel_color_pipeline.c index 8fecc53540ba..6cf8080ee800 100644 --- a/drivers/gpu/drm/i915/display/intel_color_pipeline.c +++ b/drivers/gpu/drm/i915/display/intel_color_pipeline.c @@ -2,6 +2,8 @@ /* * Copyright © 2025 Intel Corporation */ +#include + #include "intel_color.h" #include "intel_colorop.h" #include "intel_color_pipeline.h" @@ -10,6 +12,7 @@ #include "skl_universal_plane.h" #define MAX_COLOR_PIPELINES 1 +#define MAX_COLOROP 4 #define PLANE_DEGAMMA_SIZE 128 #define PLANE_GAMMA_SIZE 32 @@ -17,70 +20,137 @@ static const struct drm_colorop_funcs intel_colorop_funcs = { .destroy = intel_colorop_destroy, }; +/* + * 3DLUT can be bound to all three HDR planes. However, even with the latest + * color pipeline UAPI, there is no good way to represent a HW block which + * can be shared/attached at different stages of the pipeline. So right now, + * we expose 3DLUT only attached with the primary plane. + * + * That way we don't confuse the userspace with opaque commit failures + * on trying to enable it on multiple planes which would otherwise make + * the pipeline totally unusable. + */ +static const enum intel_color_block xe3plpd_primary_plane_pipeline[] = { + INTEL_PLANE_CB_PRE_CSC_LUT, + INTEL_PLANE_CB_CSC, + INTEL_PLANE_CB_3DLUT, + INTEL_PLANE_CB_POST_CSC_LUT, +}; + +static const enum intel_color_block hdr_plane_pipeline[] = { + INTEL_PLANE_CB_PRE_CSC_LUT, + INTEL_PLANE_CB_CSC, + INTEL_PLANE_CB_POST_CSC_LUT, +}; + +static bool plane_has_3dlut(struct intel_display *display, enum pipe pipe, + struct drm_plane *plane) +{ + return (DISPLAY_VER(display) >= 35 && + intel_color_crtc_has_3dlut(display, pipe) && + plane->type == DRM_PLANE_TYPE_PRIMARY); +} + +static +struct intel_colorop *intel_color_pipeline_plane_add_colorop(struct drm_plane *plane, + struct intel_colorop *prev, + enum intel_color_block id) +{ + struct drm_device *dev = plane->dev; + struct intel_colorop *colorop; + int ret; + + colorop = intel_colorop_create(id); + + if (IS_ERR(colorop)) + return colorop; + + switch (id) { + case INTEL_PLANE_CB_PRE_CSC_LUT: + ret = drm_plane_colorop_curve_1d_lut_init(dev, + &colorop->base, plane, + &intel_colorop_funcs, + PLANE_DEGAMMA_SIZE, + DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + break; + case INTEL_PLANE_CB_CSC: + ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane, + &intel_colorop_funcs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + break; + case INTEL_PLANE_CB_3DLUT: + ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane, + &intel_colorop_funcs, 17, + DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, + true); + break; + case INTEL_PLANE_CB_POST_CSC_LUT: + ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, + &intel_colorop_funcs, + PLANE_GAMMA_SIZE, + DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + break; + default: + drm_err(plane->dev, "Invalid colorop id [%d]", id); + ret = -EINVAL; + } + + if (ret) + goto cleanup; + + if (prev) + drm_colorop_set_next_property(&prev->base, &colorop->base); + + return colorop; + +cleanup: + intel_colorop_destroy(&colorop->base); + return ERR_PTR(ret); +} + static int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_enum_list *list, enum pipe pipe) { struct drm_device *dev = plane->dev; struct intel_display *display = to_intel_display(dev); - struct drm_colorop *prev_op; - struct intel_colorop *colorop; - int ret; + struct intel_colorop *colorop[MAX_COLOROP]; + struct intel_colorop *prev = NULL; + const enum intel_color_block *pipeline; + int pipeline_len; + int ret = 0; + int i; - colorop = intel_colorop_create(INTEL_PLANE_CB_PRE_CSC_LUT); - - ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, &intel_colorop_funcs, - PLANE_DEGAMMA_SIZE, - DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, - DRM_COLOROP_FLAG_ALLOW_BYPASS); - - if (ret) - return ret; - - list->type = colorop->base.base.id; - - /* TODO: handle failures and clean up */ - prev_op = &colorop->base; - - colorop = intel_colorop_create(INTEL_PLANE_CB_CSC); - ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane, &intel_colorop_funcs, - DRM_COLOROP_FLAG_ALLOW_BYPASS); - if (ret) - return ret; - - drm_colorop_set_next_property(prev_op, &colorop->base); - prev_op = &colorop->base; - - if (DISPLAY_VER(display) >= 35 && - intel_color_crtc_has_3dlut(display, pipe) && - plane->type == DRM_PLANE_TYPE_PRIMARY) { - colorop = intel_colorop_create(INTEL_PLANE_CB_3DLUT); - - ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane, - &intel_colorop_funcs, 17, - DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, - true); - if (ret) - return ret; - - drm_colorop_set_next_property(prev_op, &colorop->base); - - prev_op = &colorop->base; + if (plane_has_3dlut(display, pipe, plane)) { + pipeline = xe3plpd_primary_plane_pipeline; + pipeline_len = ARRAY_SIZE(xe3plpd_primary_plane_pipeline); + } else { + pipeline = hdr_plane_pipeline; + pipeline_len = ARRAY_SIZE(hdr_plane_pipeline); } - colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT); - ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, &intel_colorop_funcs, - PLANE_GAMMA_SIZE, - DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, - DRM_COLOROP_FLAG_ALLOW_BYPASS); - if (ret) - return ret; + for (i = 0; i < pipeline_len; i++) { + colorop[i] = intel_color_pipeline_plane_add_colorop(plane, prev, + pipeline[i]); + if (IS_ERR(colorop[i])) { + ret = PTR_ERR(colorop[i]); + goto cleanup; + } - drm_colorop_set_next_property(prev_op, &colorop->base); + prev = colorop[i]; + } - list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", list->type); + list->type = colorop[0]->base.base.id; + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", colorop[0]->base.base.id); return 0; + +cleanup: + while (--i >= 0) + intel_colorop_destroy(&colorop[i]->base); + return ret; } int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe) From 95ffa10056b33bf5a90090b02da2edd52e1e281c Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 28 Jan 2026 13:43:45 +0100 Subject: [PATCH 067/158] drm/atomic: Make drm_atomic_private_obj_init fallible MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we're going to move the drm_private_obj state allocation to a callback, we need to be able to deal with its possible failure. Make drm_private_obj_init return an error code on failure. Suggested-by: Ville Syrjälä Reviewed-by: Thomas Zimmermann Link: https://patch.msgid.link/20260128-drm-private-obj-reset-v4-1-90891fa3d3b0@redhat.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/drm_atomic.c | 14 +++++++++----- include/drm/drm_atomic.h | 8 ++++---- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 52738b80ddbe..4191a8333fc4 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -927,12 +927,14 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, * * Initialize the private object, which can be embedded into any * driver private object that needs its own atomic state. + * + * RETURNS: + * Zero on success, error code on failure */ -void -drm_atomic_private_obj_init(struct drm_device *dev, - struct drm_private_obj *obj, - struct drm_private_state *state, - const struct drm_private_state_funcs *funcs) +int drm_atomic_private_obj_init(struct drm_device *dev, + struct drm_private_obj *obj, + struct drm_private_state *state, + const struct drm_private_state_funcs *funcs) { memset(obj, 0, sizeof(*obj)); @@ -944,6 +946,8 @@ drm_atomic_private_obj_init(struct drm_device *dev, list_add_tail(&obj->head, &dev->mode_config.privobj_list); state->obj = obj; + + return 0; } EXPORT_SYMBOL(drm_atomic_private_obj_init); diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 178f8f62c80f..712f5fb977bf 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -723,10 +723,10 @@ struct drm_connector_state * __must_check drm_atomic_get_connector_state(struct drm_atomic_state *state, struct drm_connector *connector); -void drm_atomic_private_obj_init(struct drm_device *dev, - struct drm_private_obj *obj, - struct drm_private_state *state, - const struct drm_private_state_funcs *funcs); +int drm_atomic_private_obj_init(struct drm_device *dev, + struct drm_private_obj *obj, + struct drm_private_state *state, + const struct drm_private_state_funcs *funcs); void drm_atomic_private_obj_fini(struct drm_private_obj *obj); struct drm_private_state * __must_check From 47b5ac7daa46e2bc8e4916d856fdc036ac145bb6 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 28 Jan 2026 13:43:46 +0100 Subject: [PATCH 068/158] drm/atomic: Add new atomic_create_state callback to drm_private_obj The drm_private_obj initialization was inconsistent with the rest of the KMS objects. Indeed, it required to pass a preallocated state in drm_private_obj_init(), while all the others objects would have a reset callback that would be called later on to create the state. However, reset really is meant to reset the hardware and software state. That it creates an initial state is a side-effect that has been used in all objects but drm_private_obj. This is made more complex since some drm_private_obj, the DisplayPort ones in particular, need to be persistent across and suspend/resume cycle, and such a cycle would call drm_mode_config_reset(). Thus, we need to add a new callback to allocate a pristine state for a given private object. This discussion has also came up during the atomic state readout discussion, so it might be introduced into the other objects later on. Until all drivers are converted to that new allocation pattern, we will only call it if the passed state is NULL. This will be removed eventually. Reviewed-by: Dmitry Baryshkov Reviewed-by: Thomas Zimmermann Link: https://patch.msgid.link/20260128-drm-private-obj-reset-v4-2-90891fa3d3b0@redhat.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/drm_atomic.c | 18 ++++++++++++++++-- include/drm/drm_atomic.h | 13 +++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 4191a8333fc4..e3029c8f02e5 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -941,11 +941,25 @@ int drm_atomic_private_obj_init(struct drm_device *dev, drm_modeset_lock_init(&obj->lock); obj->dev = dev; - obj->state = state; obj->funcs = funcs; list_add_tail(&obj->head, &dev->mode_config.privobj_list); - state->obj = obj; + /* + * Not all users of drm_atomic_private_obj_init have been + * converted to using &drm_private_obj_funcs.atomic_create_state yet. + * For the time being, let's only call reset if the passed state is + * NULL. Otherwise, we will fallback to the previous behaviour. + */ + if (!state) { + state = obj->funcs->atomic_create_state(obj); + if (IS_ERR(state)) + return PTR_ERR(state); + + obj->state = state; + } else { + obj->state = state; + state->obj = obj; + } return 0; } diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 712f5fb977bf..0b1b32bcd2bd 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -261,6 +261,19 @@ struct drm_private_state; * drm_atomic_get_private_obj_state(). */ struct drm_private_state_funcs { + /** + * @atomic_create_state: + * + * Allocates a pristine, initialized, state for the private + * object and returns it. + * + * RETURNS: + * + * A new, pristine, private state instance or an error pointer + * on failure. + */ + struct drm_private_state *(*atomic_create_state)(struct drm_private_obj *obj); + /** * @atomic_duplicate_state: * From e7be39ed171662474d6d5c9a83d790ef7d244bcd Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 28 Jan 2026 13:43:47 +0100 Subject: [PATCH 069/158] drm/atomic-helper: Add private_obj atomic_create_state helper Now that we have an atomic_create_state callback for drm_private_objs, we can provide a helper for it. It's somewhat different from the other similar helpers though, because we definitely expect drm_private_obj to be subclassed. It wouldn't make sense for a driver to use it as-is. So we can't provide a straight implementation of the atomic_create_state callback, but rather we provide the parts that will deal with the drm_private_obj initialization, and we will leave the allocation and initialization of the subclass to drivers. Reviewed-by: Dmitry Baryshkov Reviewed-by: Thomas Zimmermann Link: https://patch.msgid.link/20260128-drm-private-obj-reset-v4-3-90891fa3d3b0@redhat.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/drm_atomic_state_helper.c | 22 ++++++++++++++++++++++ include/drm/drm_atomic_state_helper.h | 3 +++ 2 files changed, 25 insertions(+) diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index cee6d8fc44ad..d21f32f0ad51 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -714,6 +714,28 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, } EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state); +/** + * __drm_atomic_helper_private_obj_create_state - initializes private object state + * @obj: private object + * @state: new state to initialize + * + * Initializes the newly allocated @state, usually required when + * initializing the drivers. + * + * @obj is assumed to be zeroed. + * + * This is useful for drivers that use private states. + */ +void __drm_atomic_helper_private_obj_create_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + if (state) + state->obj = obj; + + obj->state = state; +} +EXPORT_SYMBOL(__drm_atomic_helper_private_obj_create_state); + /** * __drm_atomic_helper_private_obj_duplicate_state - copy atomic private state * @obj: CRTC object diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h index b9740edb2658..900672c6ea90 100644 --- a/include/drm/drm_atomic_state_helper.h +++ b/include/drm/drm_atomic_state_helper.h @@ -84,6 +84,9 @@ void __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state); void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, struct drm_connector_state *state); + +void __drm_atomic_helper_private_obj_create_state(struct drm_private_obj *obj, + struct drm_private_state *state); void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj, struct drm_private_state *state); From ca8453ce037e4485c67e9ad5207744158be7f0ca Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 28 Jan 2026 13:43:48 +0100 Subject: [PATCH 070/158] drm/bridge: Switch private_obj initialization to atomic_create_state The bridge implementation relies on a drm_private_obj, that is initialized by allocating and initializing a state, and then passing it to drm_private_obj_init. Since we're gradually moving away from that pattern to the more established one relying on a atomic_create_state implementation, let's migrate this instance to the new pattern. Reviewed-by: Dmitry Baryshkov Reviewed-by: Thomas Zimmermann Link: https://patch.msgid.link/20260128-drm-private-obj-reset-v4-4-90891fa3d3b0@redhat.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/drm_atomic_state_helper.c | 1 + drivers/gpu/drm/drm_bridge.c | 31 ++++++++++++----------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index d21f32f0ad51..716631e8a10a 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -825,6 +825,7 @@ void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge, struct drm_bridge_state *state) { memset(state, 0, sizeof(*state)); + __drm_atomic_helper_private_obj_create_state(&bridge->base, &state->base); state->bridge = bridge; } EXPORT_SYMBOL(__drm_atomic_helper_bridge_reset); diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 3b165a0d1e77..94864e05619d 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -460,7 +460,21 @@ drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj, bridge->funcs->atomic_destroy_state(bridge, state); } +static struct drm_private_state * +drm_bridge_atomic_create_priv_state(struct drm_private_obj *obj) +{ + struct drm_bridge *bridge = drm_priv_to_bridge(obj); + struct drm_bridge_state *state; + + state = bridge->funcs->atomic_reset(bridge); + if (IS_ERR(state)) + return ERR_CAST(state); + + return &state->base; +} + static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = { + .atomic_create_state = drm_bridge_atomic_create_priv_state, .atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state, .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state, }; @@ -537,26 +551,13 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, goto err_reset_bridge; } - if (drm_bridge_is_atomic(bridge)) { - struct drm_bridge_state *state; - - state = bridge->funcs->atomic_reset(bridge); - if (IS_ERR(state)) { - ret = PTR_ERR(state); - goto err_detach_bridge; - } - + if (drm_bridge_is_atomic(bridge)) drm_atomic_private_obj_init(bridge->dev, &bridge->base, - &state->base, + NULL, &drm_bridge_priv_state_funcs); - } return 0; -err_detach_bridge: - if (bridge->funcs->detach) - bridge->funcs->detach(bridge); - err_reset_bridge: bridge->dev = NULL; bridge->encoder = NULL; From 2dce31863b2867a54a0e7b97657ee09a7ea8ad21 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 28 Jan 2026 13:43:49 +0100 Subject: [PATCH 071/158] drm/dp_mst: Switch private_obj initialization to atomic_create_state The DP MST implementation relies on a drm_private_obj, that is initialized by allocating and initializing a state, and then passing it to drm_private_obj_init. Since we're gradually moving away from that pattern to the more established one relying on a atomic_create_state implementation, let's migrate this instance to the new pattern. Reviewed-by: Dmitry Baryshkov Link: https://patch.msgid.link/20260128-drm-private-obj-reset-v4-5-90891fa3d3b0@redhat.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/display/drm_dp_mst_topology.c | 37 ++++++++++++------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index be749dcad3b5..1ab0233a2a18 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -5184,6 +5184,28 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, kfree(mst_state); } +static struct drm_private_state * +drm_dp_mst_atomic_create_state(struct drm_private_obj *obj) +{ + struct drm_dp_mst_topology_mgr *mgr = + to_dp_mst_topology_mgr(obj); + struct drm_dp_mst_topology_state *mst_state; + + mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL); + if (!mst_state) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &mst_state->base); + + mst_state->total_avail_slots = 63; + mst_state->start_slot = 1; + + mst_state->mgr = mgr; + INIT_LIST_HEAD(&mst_state->payloads); + + return &mst_state->base; +} + static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, struct drm_dp_mst_branch *branch) { @@ -5620,6 +5642,7 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state) EXPORT_SYMBOL(drm_dp_mst_atomic_check); const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = { + .atomic_create_state = drm_dp_mst_atomic_create_state, .atomic_duplicate_state = drm_dp_mst_duplicate_state, .atomic_destroy_state = drm_dp_mst_destroy_state, }; @@ -5708,8 +5731,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id) { - struct drm_dp_mst_topology_state *mst_state; - mutex_init(&mgr->lock); mutex_init(&mgr->qlock); mutex_init(&mgr->delayed_destroy_lock); @@ -5743,18 +5764,8 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mgr->max_payloads = max_payloads; mgr->conn_base_id = conn_base_id; - mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL); - if (mst_state == NULL) - return -ENOMEM; - - mst_state->total_avail_slots = 63; - mst_state->start_slot = 1; - - mst_state->mgr = mgr; - INIT_LIST_HEAD(&mst_state->payloads); - drm_atomic_private_obj_init(dev, &mgr->base, - &mst_state->base, + NULL, &drm_dp_mst_topology_state_funcs); return 0; From 8e6da25bd60dcce21717b168851b76cae5497179 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 28 Jan 2026 13:43:50 +0100 Subject: [PATCH 072/158] drm/dp_tunnel: Switch private_obj initialization to atomic_create_state The DP tunnel implementation relies on a drm_private_obj, that is initialized by allocating and initializing a state, and then passing it to drm_private_obj_init. Since we're gradually moving away from that pattern to the more established one relying on a atomic_create_state implementation, let's migrate this instance to the new pattern. Reviewed-by: Dmitry Baryshkov Link: https://patch.msgid.link/20260128-drm-private-obj-reset-v4-6-90891fa3d3b0@redhat.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/display/drm_dp_tunnel.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c index 43f13a7c79b9..536b5aaae776 100644 --- a/drivers/gpu/drm/display/drm_dp_tunnel.c +++ b/drivers/gpu/drm/display/drm_dp_tunnel.c @@ -1497,7 +1497,22 @@ static void tunnel_group_destroy_state(struct drm_private_obj *obj, struct drm_p free_group_state(to_group_state(state)); } +static struct drm_private_state *tunnel_group_atomic_create_state(struct drm_private_obj *obj) +{ + struct drm_dp_tunnel_group_state *group_state; + + group_state = kzalloc(sizeof(*group_state), GFP_KERNEL); + if (!group_state) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &group_state->base); + INIT_LIST_HEAD(&group_state->tunnel_states); + + return &group_state->base; +} + static const struct drm_private_state_funcs tunnel_group_funcs = { + .atomic_create_state = tunnel_group_atomic_create_state, .atomic_duplicate_state = tunnel_group_duplicate_state, .atomic_destroy_state = tunnel_group_destroy_state, }; @@ -1581,19 +1596,11 @@ EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_new_state); static bool init_group(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_tunnel_group *group) { - struct drm_dp_tunnel_group_state *group_state; - - group_state = kzalloc(sizeof(*group_state), GFP_KERNEL); - if (!group_state) - return false; - - INIT_LIST_HEAD(&group_state->tunnel_states); - group->mgr = mgr; group->available_bw = -1; INIT_LIST_HEAD(&group->tunnels); - drm_atomic_private_obj_init(mgr->dev, &group->base, &group_state->base, + drm_atomic_private_obj_init(mgr->dev, &group->base, NULL, &tunnel_group_funcs); return true; From d2bbd8a4273e2f274a568f501ab93184a33328fd Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 28 Jan 2026 13:43:52 +0100 Subject: [PATCH 073/158] drm/arm: komeda: Switch private_obj initialization to atomic_create_state The ARM komeda driver relies on a number of drm_private_objs, that are initialized by allocating and initializing a state, and then passing it to drm_private_obj_init. Since we're gradually moving away from that pattern to the more established one relying on a atomic_create_state implementation, let's migrate this instance to the new pattern. Acked-by: Liviu Dudau Acked-by: Dmitry Baryshkov Link: https://patch.msgid.link/20260128-drm-private-obj-reset-v4-8-90891fa3d3b0@redhat.com Signed-off-by: Maxime Ripard --- .../drm/arm/display/komeda/komeda_pipeline.h | 2 + .../arm/display/komeda/komeda_private_obj.c | 208 ++++++++++++------ 2 files changed, 146 insertions(+), 64 deletions(-) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h index ac8725e24853..37b9e9220244 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h @@ -128,6 +128,8 @@ struct komeda_component { const struct komeda_component_funcs *funcs; }; +#define to_component(o) container_of(o, struct komeda_component, obj) + /** * struct komeda_component_output * diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c b/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c index 914400c4af73..4994b69c6595 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c @@ -40,7 +40,24 @@ komeda_layer_atomic_destroy_state(struct drm_private_obj *obj, kfree(st); } +static struct drm_private_state * +komeda_layer_atomic_create_state(struct drm_private_obj *obj) +{ + struct komeda_layer_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj); + komeda_component_state_reset(&st->base); + st->base.component = to_component(obj); + + return &st->base.obj; +} + static const struct drm_private_state_funcs komeda_layer_obj_funcs = { + .atomic_create_state = komeda_layer_atomic_create_state, .atomic_duplicate_state = komeda_layer_atomic_duplicate_state, .atomic_destroy_state = komeda_layer_atomic_destroy_state, }; @@ -48,14 +65,7 @@ static const struct drm_private_state_funcs komeda_layer_obj_funcs = { static int komeda_layer_obj_add(struct komeda_kms_dev *kms, struct komeda_layer *layer) { - struct komeda_layer_state *st; - - st = kzalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return -ENOMEM; - - st->base.component = &layer->base; - drm_atomic_private_obj_init(&kms->base, &layer->base.obj, &st->base.obj, + drm_atomic_private_obj_init(&kms->base, &layer->base.obj, NULL, &komeda_layer_obj_funcs); return 0; } @@ -82,7 +92,24 @@ komeda_scaler_atomic_destroy_state(struct drm_private_obj *obj, kfree(to_scaler_st(priv_to_comp_st(state))); } +static struct drm_private_state * +komeda_scaler_atomic_create_state(struct drm_private_obj *obj) +{ + struct komeda_scaler_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj); + komeda_component_state_reset(&st->base); + st->base.component = to_component(obj); + + return &st->base.obj; +} + static const struct drm_private_state_funcs komeda_scaler_obj_funcs = { + .atomic_create_state = komeda_scaler_atomic_create_state, .atomic_duplicate_state = komeda_scaler_atomic_duplicate_state, .atomic_destroy_state = komeda_scaler_atomic_destroy_state, }; @@ -90,15 +117,8 @@ static const struct drm_private_state_funcs komeda_scaler_obj_funcs = { static int komeda_scaler_obj_add(struct komeda_kms_dev *kms, struct komeda_scaler *scaler) { - struct komeda_scaler_state *st; - - st = kzalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return -ENOMEM; - - st->base.component = &scaler->base; drm_atomic_private_obj_init(&kms->base, - &scaler->base.obj, &st->base.obj, + &scaler->base.obj, NULL, &komeda_scaler_obj_funcs); return 0; } @@ -125,7 +145,24 @@ komeda_compiz_atomic_destroy_state(struct drm_private_obj *obj, kfree(to_compiz_st(priv_to_comp_st(state))); } +static struct drm_private_state * +komeda_compiz_atomic_create_state(struct drm_private_obj *obj) +{ + struct komeda_compiz_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj); + komeda_component_state_reset(&st->base); + st->base.component = to_component(obj); + + return &st->base.obj; +} + static const struct drm_private_state_funcs komeda_compiz_obj_funcs = { + .atomic_create_state = komeda_compiz_atomic_create_state, .atomic_duplicate_state = komeda_compiz_atomic_duplicate_state, .atomic_destroy_state = komeda_compiz_atomic_destroy_state, }; @@ -133,14 +170,7 @@ static const struct drm_private_state_funcs komeda_compiz_obj_funcs = { static int komeda_compiz_obj_add(struct komeda_kms_dev *kms, struct komeda_compiz *compiz) { - struct komeda_compiz_state *st; - - st = kzalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return -ENOMEM; - - st->base.component = &compiz->base; - drm_atomic_private_obj_init(&kms->base, &compiz->base.obj, &st->base.obj, + drm_atomic_private_obj_init(&kms->base, &compiz->base.obj, NULL, &komeda_compiz_obj_funcs); return 0; @@ -168,7 +198,24 @@ komeda_splitter_atomic_destroy_state(struct drm_private_obj *obj, kfree(to_splitter_st(priv_to_comp_st(state))); } +static struct drm_private_state * +komeda_splitter_atomic_create_state(struct drm_private_obj *obj) +{ + struct komeda_splitter_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj); + komeda_component_state_reset(&st->base); + st->base.component = to_component(obj); + + return &st->base.obj; +} + static const struct drm_private_state_funcs komeda_splitter_obj_funcs = { + .atomic_create_state = komeda_splitter_atomic_create_state, .atomic_duplicate_state = komeda_splitter_atomic_duplicate_state, .atomic_destroy_state = komeda_splitter_atomic_destroy_state, }; @@ -176,15 +223,8 @@ static const struct drm_private_state_funcs komeda_splitter_obj_funcs = { static int komeda_splitter_obj_add(struct komeda_kms_dev *kms, struct komeda_splitter *splitter) { - struct komeda_splitter_state *st; - - st = kzalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return -ENOMEM; - - st->base.component = &splitter->base; drm_atomic_private_obj_init(&kms->base, - &splitter->base.obj, &st->base.obj, + &splitter->base.obj, NULL, &komeda_splitter_obj_funcs); return 0; @@ -211,7 +251,24 @@ static void komeda_merger_atomic_destroy_state(struct drm_private_obj *obj, kfree(to_merger_st(priv_to_comp_st(state))); } +static struct drm_private_state * +komeda_merger_atomic_create_state(struct drm_private_obj *obj) +{ + struct komeda_merger_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj); + komeda_component_state_reset(&st->base); + st->base.component = to_component(obj); + + return &st->base.obj; +} + static const struct drm_private_state_funcs komeda_merger_obj_funcs = { + .atomic_create_state = komeda_merger_atomic_create_state, .atomic_duplicate_state = komeda_merger_atomic_duplicate_state, .atomic_destroy_state = komeda_merger_atomic_destroy_state, }; @@ -219,15 +276,8 @@ static const struct drm_private_state_funcs komeda_merger_obj_funcs = { static int komeda_merger_obj_add(struct komeda_kms_dev *kms, struct komeda_merger *merger) { - struct komeda_merger_state *st; - - st = kzalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return -ENOMEM; - - st->base.component = &merger->base; drm_atomic_private_obj_init(&kms->base, - &merger->base.obj, &st->base.obj, + &merger->base.obj, NULL, &komeda_merger_obj_funcs); return 0; @@ -255,7 +305,24 @@ komeda_improc_atomic_destroy_state(struct drm_private_obj *obj, kfree(to_improc_st(priv_to_comp_st(state))); } +static struct drm_private_state * +komeda_improc_atomic_create_state(struct drm_private_obj *obj) +{ + struct komeda_improc_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj); + komeda_component_state_reset(&st->base); + st->base.component = to_component(obj); + + return &st->base.obj; +} + static const struct drm_private_state_funcs komeda_improc_obj_funcs = { + .atomic_create_state = komeda_improc_atomic_create_state, .atomic_duplicate_state = komeda_improc_atomic_duplicate_state, .atomic_destroy_state = komeda_improc_atomic_destroy_state, }; @@ -263,14 +330,7 @@ static const struct drm_private_state_funcs komeda_improc_obj_funcs = { static int komeda_improc_obj_add(struct komeda_kms_dev *kms, struct komeda_improc *improc) { - struct komeda_improc_state *st; - - st = kzalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return -ENOMEM; - - st->base.component = &improc->base; - drm_atomic_private_obj_init(&kms->base, &improc->base.obj, &st->base.obj, + drm_atomic_private_obj_init(&kms->base, &improc->base.obj, NULL, &komeda_improc_obj_funcs); return 0; @@ -298,7 +358,24 @@ komeda_timing_ctrlr_atomic_destroy_state(struct drm_private_obj *obj, kfree(to_ctrlr_st(priv_to_comp_st(state))); } +static struct drm_private_state * +komeda_timing_ctrlr_atomic_create_state(struct drm_private_obj *obj) +{ + struct komeda_timing_ctrlr_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &st->base.obj); + komeda_component_state_reset(&st->base); + st->base.component = to_component(obj); + + return &st->base.obj; +} + static const struct drm_private_state_funcs komeda_timing_ctrlr_obj_funcs = { + .atomic_create_state = komeda_timing_ctrlr_atomic_create_state, .atomic_duplicate_state = komeda_timing_ctrlr_atomic_duplicate_state, .atomic_destroy_state = komeda_timing_ctrlr_atomic_destroy_state, }; @@ -306,14 +383,7 @@ static const struct drm_private_state_funcs komeda_timing_ctrlr_obj_funcs = { static int komeda_timing_ctrlr_obj_add(struct komeda_kms_dev *kms, struct komeda_timing_ctrlr *ctrlr) { - struct komeda_compiz_state *st; - - st = kzalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return -ENOMEM; - - st->base.component = &ctrlr->base; - drm_atomic_private_obj_init(&kms->base, &ctrlr->base.obj, &st->base.obj, + drm_atomic_private_obj_init(&kms->base, &ctrlr->base.obj, NULL, &komeda_timing_ctrlr_obj_funcs); return 0; @@ -342,7 +412,24 @@ komeda_pipeline_atomic_destroy_state(struct drm_private_obj *obj, kfree(priv_to_pipe_st(state)); } +static struct drm_private_state * +komeda_pipeline_atomic_create_state(struct drm_private_obj *obj) +{ + struct komeda_pipeline_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &st->obj); + st->active_comps = 0; + st->pipe = container_of(obj, struct komeda_pipeline, obj); + + return &st->obj; +} + static const struct drm_private_state_funcs komeda_pipeline_obj_funcs = { + .atomic_create_state = komeda_pipeline_atomic_create_state, .atomic_duplicate_state = komeda_pipeline_atomic_duplicate_state, .atomic_destroy_state = komeda_pipeline_atomic_destroy_state, }; @@ -350,14 +437,7 @@ static const struct drm_private_state_funcs komeda_pipeline_obj_funcs = { static int komeda_pipeline_obj_add(struct komeda_kms_dev *kms, struct komeda_pipeline *pipe) { - struct komeda_pipeline_state *st; - - st = kzalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return -ENOMEM; - - st->pipe = pipe; - drm_atomic_private_obj_init(&kms->base, &pipe->obj, &st->obj, + drm_atomic_private_obj_init(&kms->base, &pipe->obj, NULL, &komeda_pipeline_obj_funcs); return 0; From 8e46b1ebf37048303ac6cdd8abf9e31f9b4772ef Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 28 Jan 2026 13:43:53 +0100 Subject: [PATCH 074/158] drm/ingenic: Switch private_obj initialization to atomic_create_state The ingenic driver relies on two drm_private_objs, that are initialized by allocating and initializing a state, and then passing it to drm_private_obj_init. Since we're gradually moving away from that pattern to the more established one relying on a atomic_create_state implementation, let's migrate this instance to the new pattern. Acked-by: Paul Cercueil Link: https://patch.msgid.link/20260128-drm-private-obj-reset-v4-9-90891fa3d3b0@redhat.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/ingenic/ingenic-drm-drv.c | 28 ++++++++++++++--------- drivers/gpu/drm/ingenic/ingenic-ipu.c | 28 +++++++++++++---------- 2 files changed, 33 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index d3213fbf22be..862691991ed2 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -954,6 +954,20 @@ static void ingenic_drm_destroy_state(struct drm_private_obj *obj, kfree(priv_state); } +static struct drm_private_state * +ingenic_drm_create_state(struct drm_private_obj *obj) +{ + struct ingenic_drm_private_state *priv_state; + + priv_state = kzalloc(sizeof(*priv_state), GFP_KERNEL); + if (!priv_state) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &priv_state->base); + + return &priv_state->base; +} + DEFINE_DRM_GEM_DMA_FOPS(ingenic_drm_fops); static const struct drm_driver ingenic_drm_driver_data = { @@ -1034,6 +1048,7 @@ static struct drm_mode_config_helper_funcs ingenic_drm_mode_config_helpers = { }; static const struct drm_private_state_funcs ingenic_drm_private_state_funcs = { + .atomic_create_state = ingenic_drm_create_state, .atomic_duplicate_state = ingenic_drm_duplicate_state, .atomic_destroy_state = ingenic_drm_destroy_state, }; @@ -1087,7 +1102,6 @@ static void ingenic_drm_atomic_private_obj_fini(struct drm_device *drm, void *pr static int ingenic_drm_bind(struct device *dev, bool has_components) { struct platform_device *pdev = to_platform_device(dev); - struct ingenic_drm_private_state *private_state; const struct jz_soc_info *soc_info; struct ingenic_drm *priv; struct clk *parent_clk; @@ -1387,19 +1401,13 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) goto err_devclk_disable; } - private_state = kzalloc(sizeof(*private_state), GFP_KERNEL); - if (!private_state) { - ret = -ENOMEM; - goto err_clk_notifier_unregister; - } - - drm_atomic_private_obj_init(drm, &priv->private_obj, &private_state->base, + drm_atomic_private_obj_init(drm, &priv->private_obj, NULL, &ingenic_drm_private_state_funcs); ret = drmm_add_action_or_reset(drm, ingenic_drm_atomic_private_obj_fini, &priv->private_obj); if (ret) - goto err_private_state_free; + goto err_clk_notifier_unregister; ret = drm_dev_register(drm, 0); if (ret) { @@ -1411,8 +1419,6 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) return 0; -err_private_state_free: - kfree(private_state); err_clk_notifier_unregister: clk_notifier_unregister(parent_clk, &priv->clock_nb); err_devclk_disable: diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c index 32638a713241..253a1ce30997 100644 --- a/drivers/gpu/drm/ingenic/ingenic-ipu.c +++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c @@ -750,7 +750,22 @@ static void ingenic_ipu_destroy_state(struct drm_private_obj *obj, kfree(priv_state); } +static struct drm_private_state * +ingenic_ipu_create_state(struct drm_private_obj *obj) +{ + struct ingenic_ipu_private_state *priv_state; + + priv_state = kzalloc(sizeof(*priv_state), GFP_KERNEL); + if (!priv_state) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &priv_state->base); + + return &priv_state->base; +} + static const struct drm_private_state_funcs ingenic_ipu_private_state_funcs = { + .atomic_create_state = ingenic_ipu_create_state, .atomic_duplicate_state = ingenic_ipu_duplicate_state, .atomic_destroy_state = ingenic_ipu_destroy_state, }; @@ -793,7 +808,6 @@ static const struct regmap_config ingenic_ipu_regmap_config = { static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d) { struct platform_device *pdev = to_platform_device(dev); - struct ingenic_ipu_private_state *private_state; const struct soc_info *soc_info; struct drm_device *drm = d; struct drm_plane *plane; @@ -887,20 +901,10 @@ static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d) return err; } - private_state = kzalloc(sizeof(*private_state), GFP_KERNEL); - if (!private_state) { - err = -ENOMEM; - goto err_clk_unprepare; - } - - drm_atomic_private_obj_init(drm, &ipu->private_obj, &private_state->base, + drm_atomic_private_obj_init(drm, &ipu->private_obj, NULL, &ingenic_ipu_private_state_funcs); return 0; - -err_clk_unprepare: - clk_unprepare(ipu->clk); - return err; } static void ingenic_ipu_unbind(struct device *dev, From 4ada3ac5ffbc5279ab4bb16a7e038c027c0fa19a Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 28 Jan 2026 13:43:54 +0100 Subject: [PATCH 075/158] drm/msm: mdp5: Switch private_obj initialization to atomic_create_state The MSM mdp5 driver relies on a drm_private_obj, that is initialized by allocating and initializing a state, and then passing it to drm_private_obj_init. Since we're gradually moving away from that pattern to the more established one relying on a atomic_create_state implementation, let's migrate this instance to the new pattern. Reviewed-by: Dmitry Baryshkov Link: https://patch.msgid.link/20260128-drm-private-obj-reset-v4-10-90891fa3d3b0@redhat.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 41 +++++++++++++----------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 61edf6864092..1fc967159076 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -114,6 +114,24 @@ static void mdp5_global_destroy_state(struct drm_private_obj *obj, kfree(mdp5_state); } +static struct drm_private_state * +mdp5_global_create_state(struct drm_private_obj *obj) +{ + struct drm_device *dev = obj->dev; + struct msm_drm_private *priv = dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct mdp5_global_state *mdp5_state; + + mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); + if (!mdp5_state) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &mdp5_state->base); + mdp5_state->mdp5_kms = mdp5_kms; + + return &mdp5_state->base; +} + static void mdp5_global_print_state(struct drm_printer *p, const struct drm_private_state *state) { @@ -124,27 +142,12 @@ static void mdp5_global_print_state(struct drm_printer *p, } static const struct drm_private_state_funcs mdp5_global_state_funcs = { + .atomic_create_state = mdp5_global_create_state, .atomic_duplicate_state = mdp5_global_duplicate_state, .atomic_destroy_state = mdp5_global_destroy_state, .atomic_print_state = mdp5_global_print_state, }; -static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms) -{ - struct mdp5_global_state *state; - - state = kzalloc(sizeof(*state), GFP_KERNEL); - if (!state) - return -ENOMEM; - - state->mdp5_kms = mdp5_kms; - - drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state, - &state->base, - &mdp5_global_state_funcs); - return 0; -} - static void mdp5_enable_commit(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); @@ -713,9 +716,9 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) mdp5_kms->dev = dev; - ret = mdp5_global_obj_init(mdp5_kms); - if (ret) - goto fail; + drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state, + NULL, + &mdp5_global_state_funcs); /* we need to set a default rate before enabling. Set a safe * rate first, then figure out hw revision, and then set a From 9bf5b4dfe1ea7af0bc7846bec2c46da68a6f03e6 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 28 Jan 2026 13:43:55 +0100 Subject: [PATCH 076/158] drm/msm: dpu1: Switch private_obj initialization to atomic_create_state The MSM dpu1 driver relies on a drm_private_obj, that is initialized by allocating and initializing a state, and then passing it to drm_private_obj_init. Since we're gradually moving away from that pattern to the more established one relying on a atomic_create_state implementation, let's migrate this instance to the new pattern. Reviewed-by: Dmitry Baryshkov Link: https://patch.msgid.link/20260128-drm-private-obj-reset-v4-11-90891fa3d3b0@redhat.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c | 42 +++++++++++++------------ 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 0623f1dbed97..3ae0987cfffe 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -367,6 +367,24 @@ static void dpu_kms_global_destroy_state(struct drm_private_obj *obj, kfree(dpu_state); } +static struct drm_private_state * +dpu_kms_global_create_state(struct drm_private_obj *obj) +{ + struct drm_device *dev = obj->dev; + struct msm_drm_private *priv = dev->dev_private; + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); + struct dpu_global_state *dpu_state; + + dpu_state = kzalloc(sizeof(*dpu_state), GFP_KERNEL); + if (!dpu_state) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &dpu_state->base); + dpu_state->rm = &dpu_kms->rm; + + return &dpu_state->base; +} + static void dpu_kms_global_print_state(struct drm_printer *p, const struct drm_private_state *state) { @@ -376,28 +394,12 @@ static void dpu_kms_global_print_state(struct drm_printer *p, } static const struct drm_private_state_funcs dpu_kms_global_state_funcs = { + .atomic_create_state = dpu_kms_global_create_state, .atomic_duplicate_state = dpu_kms_global_duplicate_state, .atomic_destroy_state = dpu_kms_global_destroy_state, .atomic_print_state = dpu_kms_global_print_state, }; -static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms) -{ - struct dpu_global_state *state; - - state = kzalloc(sizeof(*state), GFP_KERNEL); - if (!state) - return -ENOMEM; - - drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state, - &state->base, - &dpu_kms_global_state_funcs); - - state->rm = &dpu_kms->rm; - - return 0; -} - static void dpu_kms_global_obj_fini(struct dpu_kms *dpu_kms) { drm_atomic_private_obj_fini(&dpu_kms->global_state); @@ -1158,9 +1160,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dev->mode_config.cursor_width = 512; dev->mode_config.cursor_height = 512; - rc = dpu_kms_global_obj_init(dpu_kms); - if (rc) - return rc; + drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state, + NULL, + &dpu_kms_global_state_funcs); atomic_set(&dpu_kms->bandwidth_ref, 0); From 5491f668910ed0b282beb10f3f6b2e4d3988a1a8 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 28 Jan 2026 13:43:58 +0100 Subject: [PATCH 077/158] drm/vc4: Switch private_obj initialization to atomic_create_state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The vc4 driver relies on a drm_private_obj, that is initialized by allocating and initializing a state, and then passing it to drm_private_obj_init. Since we're gradually moving away from that pattern to the more established one relying on a atomic_create_state implementation, let's migrate this instance to the new pattern. Reviewed-by: Maíra Canal Link: https://patch.msgid.link/20260128-drm-private-obj-reset-v4-14-90891fa3d3b0@redhat.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/vc4/vc4_kms.c | 69 ++++++++++++++++++++++++----------- 1 file changed, 48 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index e563c1210937..e94e2d344e0f 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -85,7 +85,22 @@ static void vc4_ctm_destroy_state(struct drm_private_obj *obj, kfree(ctm_state); } +static struct drm_private_state * +vc4_ctm_create_state(struct drm_private_obj *obj) +{ + struct vc4_ctm_state *ctm_state; + + ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); + if (!ctm_state) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &ctm_state->base); + + return &ctm_state->base; +} + static const struct drm_private_state_funcs vc4_ctm_state_funcs = { + .atomic_create_state = vc4_ctm_create_state, .atomic_duplicate_state = vc4_ctm_duplicate_state, .atomic_destroy_state = vc4_ctm_destroy_state, }; @@ -99,15 +114,9 @@ static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused) static int vc4_ctm_obj_init(struct vc4_dev *vc4) { - struct vc4_ctm_state *ctm_state; - drm_modeset_lock_init(&vc4->ctm_state_lock); - ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); - if (!ctm_state) - return -ENOMEM; - - drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base, + drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, NULL, &vc4_ctm_state_funcs); return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL); @@ -718,7 +727,22 @@ static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj, kfree(load_state); } +static struct drm_private_state * +vc4_load_tracker_create_state(struct drm_private_obj *obj) +{ + struct vc4_load_tracker_state *load_state; + + load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); + if (!load_state) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &load_state->base); + + return &load_state->base; +} + static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { + .atomic_create_state = vc4_load_tracker_create_state, .atomic_duplicate_state = vc4_load_tracker_duplicate_state, .atomic_destroy_state = vc4_load_tracker_destroy_state, }; @@ -732,14 +756,8 @@ static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused) static int vc4_load_tracker_obj_init(struct vc4_dev *vc4) { - struct vc4_load_tracker_state *load_state; - - load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); - if (!load_state) - return -ENOMEM; - drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker, - &load_state->base, + NULL, &vc4_load_tracker_state_funcs); return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL); @@ -800,7 +818,22 @@ static void vc4_hvs_channels_print_state(struct drm_printer *p, } } +static struct drm_private_state * +vc4_hvs_channels_create_state(struct drm_private_obj *obj) +{ + struct vc4_hvs_state *hvs_state; + + hvs_state = kzalloc(sizeof(*hvs_state), GFP_KERNEL); + if (!hvs_state) + return ERR_PTR(-ENOMEM); + + __drm_atomic_helper_private_obj_create_state(obj, &hvs_state->base); + + return &hvs_state->base; +} + static const struct drm_private_state_funcs vc4_hvs_state_funcs = { + .atomic_create_state = vc4_hvs_channels_create_state, .atomic_duplicate_state = vc4_hvs_channels_duplicate_state, .atomic_destroy_state = vc4_hvs_channels_destroy_state, .atomic_print_state = vc4_hvs_channels_print_state, @@ -815,14 +848,8 @@ static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused) static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4) { - struct vc4_hvs_state *state; - - state = kzalloc(sizeof(*state), GFP_KERNEL); - if (!state) - return -ENOMEM; - drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels, - &state->base, + NULL, &vc4_hvs_state_funcs); return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL); From c7384288d9266e52cd35aadb1749872caf3c0257 Mon Sep 17 00:00:00 2001 From: Matt Coster Date: Fri, 6 Feb 2026 16:02:12 +0000 Subject: [PATCH 078/158] drm/imagination: Improve handling of unknown FWCCB commands A couple small changes: - Validate the magic value at the head of FWCCB commands, and - Mask off the magic value before logging unknown command types to make them easier to interpret on sight. Reviewed-by: Frank Binns Link: https://patch.msgid.link/20260206-improve-bad-fwccb-cmd-v1-1-831a852ca127@imgtec.com Signed-off-by: Matt Coster --- drivers/gpu/drm/imagination/pvr_ccb.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_ccb.c b/drivers/gpu/drm/imagination/pvr_ccb.c index 9294b4ba1de7..2f4356a1e69f 100644 --- a/drivers/gpu/drm/imagination/pvr_ccb.c +++ b/drivers/gpu/drm/imagination/pvr_ccb.c @@ -136,6 +136,14 @@ pvr_ccb_slot_available_locked(struct pvr_ccb *pvr_ccb, u32 *write_offset) static void process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *cmd) { + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + + if ((cmd->cmd_type & ROGUE_CMD_MAGIC_DWORD_MASK) != ROGUE_CMD_MAGIC_DWORD_SHIFTED) { + drm_warn_once(drm_dev, "Received FWCCB command with bad magic value; ignoring (type=0x%08x)\n", + cmd->cmd_type); + return; + } + switch (cmd->cmd_type) { case ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART: pvr_power_reset(pvr_dev, false); @@ -151,8 +159,8 @@ process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *c break; default: - drm_info(from_pvr_device(pvr_dev), "Received unknown FWCCB command %x\n", - cmd->cmd_type); + drm_info(drm_dev, "Received unknown FWCCB command (type=%d)\n", + cmd->cmd_type & ~ROGUE_CMD_MAGIC_DWORD_MASK); break; } } From 4af267ce3441e10198daa52a8cc4b5cb4575d06f Mon Sep 17 00:00:00 2001 From: Matt Coster Date: Fri, 6 Feb 2026 16:02:13 +0000 Subject: [PATCH 079/158] drm/imagination: Mark FWCCB_CMD_UPDATE_STATS as known Suppress the "unknown type" warning when processing a FWCCB command of type CMD_UPDATE_STATS which is known but (currently) unused. Reviewed-by: Frank Binns Link: https://patch.msgid.link/20260206-improve-bad-fwccb-cmd-v1-2-831a852ca127@imgtec.com Signed-off-by: Matt Coster --- drivers/gpu/drm/imagination/pvr_ccb.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/imagination/pvr_ccb.c b/drivers/gpu/drm/imagination/pvr_ccb.c index 2f4356a1e69f..9d4464583129 100644 --- a/drivers/gpu/drm/imagination/pvr_ccb.c +++ b/drivers/gpu/drm/imagination/pvr_ccb.c @@ -158,6 +158,14 @@ process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *c pvr_free_list_process_grow_req(pvr_dev, &cmd->cmd_data.cmd_free_list_gs); break; + case ROGUE_FWIF_FWCCB_CMD_UPDATE_STATS: + /* + * We currently have no infrastructure for processing these + * stats. It may be added in the future, but for now just + * suppress the "unknown" warning when receiving this command. + */ + break; + default: drm_info(drm_dev, "Received unknown FWCCB command (type=%d)\n", cmd->cmd_type & ~ROGUE_CMD_MAGIC_DWORD_MASK); From 857e2d886ad32d9a49fe99fd06bd78f4354043f4 Mon Sep 17 00:00:00 2001 From: Chintan Patel Date: Mon, 2 Feb 2026 20:46:04 -0800 Subject: [PATCH 080/158] drm/panel: jdi-lt070me05000: Use MIPI DSI multi functions Convert to the non-deprecated mipi_dsi_*_multi() helpers per the TODO list. This reduces boilerplate error checking while providing proper error accumulation. Use mipi_dsi_msleep() and mipi_dsi_usleep_range() macros for delays. Replace mdelay(10) and mdelay(20) with mipi_dsi_usleep_range() calls using tighter slop (10-11ms and 20-21ms respectively) since these functions aren't run often and don't need large timing windows. In jdi_panel_off(), reset the error context between display_off and enter_sleep_mode to preserve the original behavior of continuing power-down even if display_off fails. This ensures enter_sleep_mode executes before GPIO/regulator control, which is critical for proper power sequencing. Signed-off-by: Chintan Patel Reviewed-by: Douglas Anderson Signed-off-by: Douglas Anderson Link: https://patch.msgid.link/20260203044605.5890-1-chintanlike@gmail.com --- .../gpu/drm/panel/panel-jdi-lt070me05000.c | 105 ++++-------------- 1 file changed, 24 insertions(+), 81 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c index 3513e5c4dd8c..01bd748aecec 100644 --- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c +++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c @@ -48,34 +48,16 @@ static inline struct jdi_panel *to_jdi_panel(struct drm_panel *panel) static int jdi_panel_init(struct jdi_panel *jdi) { struct mipi_dsi_device *dsi = jdi->dsi; - struct device *dev = &jdi->dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags |= MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_soft_reset(dsi); - if (ret < 0) - return ret; + mipi_dsi_dcs_soft_reset_multi(&dsi_ctx); + mipi_dsi_usleep_range(&dsi_ctx, 10000, 20000); - usleep_range(10000, 20000); - - ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT << 4); - if (ret < 0) { - dev_err(dev, "failed to set pixel format: %d\n", ret); - return ret; - } - - ret = mipi_dsi_dcs_set_column_address(dsi, 0, jdi->mode->hdisplay - 1); - if (ret < 0) { - dev_err(dev, "failed to set column address: %d\n", ret); - return ret; - } - - ret = mipi_dsi_dcs_set_page_address(dsi, 0, jdi->mode->vdisplay - 1); - if (ret < 0) { - dev_err(dev, "failed to set page address: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, MIPI_DCS_PIXEL_FMT_24BIT << 4); + mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0, jdi->mode->hdisplay - 1); + mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0, jdi->mode->vdisplay - 1); /* * BIT(5) BCTRL = 1 Backlight Control Block On, Brightness registers @@ -83,88 +65,49 @@ static int jdi_panel_init(struct jdi_panel *jdi) * BIT(3) BL = 1 Backlight Control On * BIT(2) DD = 0 Display Dimming is Off */ - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, - (u8[]){ 0x24 }, 1); - if (ret < 0) { - dev_err(dev, "failed to write control display: %d\n", ret); - return ret; - } + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24); /* CABC off */ - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_POWER_SAVE, - (u8[]){ 0x00 }, 1); - if (ret < 0) { - dev_err(dev, "failed to set cabc off: %d\n", ret); - return ret; - } + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00); - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "failed to set exit sleep mode: %d\n", ret); - return ret; - } + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); - msleep(120); - - ret = mipi_dsi_generic_write(dsi, (u8[]){0xB0, 0x00}, 2); - if (ret < 0) { - dev_err(dev, "failed to set mcap: %d\n", ret); - return ret; - } - - mdelay(10); + mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x00); + mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); /* Interface setting, video mode */ - ret = mipi_dsi_generic_write(dsi, (u8[]) - {0xB3, 0x26, 0x08, 0x00, 0x20, 0x00}, 6); - if (ret < 0) { - dev_err(dev, "failed to set display interface setting: %d\n" - , ret); - return ret; - } + mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb3, 0x26, 0x08, 0x00, 0x20, 0x00); + mipi_dsi_usleep_range(&dsi_ctx, 20000, 21000); - mdelay(20); + mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x03); - ret = mipi_dsi_generic_write(dsi, (u8[]){0xB0, 0x03}, 2); - if (ret < 0) { - dev_err(dev, "failed to set default values for mcap: %d\n" - , ret); - return ret; - } - - return 0; + return dsi_ctx.accum_err; } static int jdi_panel_on(struct jdi_panel *jdi) { struct mipi_dsi_device *dsi = jdi->dsi; - struct device *dev = &jdi->dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags |= MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) - dev_err(dev, "failed to set display on: %d\n", ret); + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); - return ret; + return dsi_ctx.accum_err; } static void jdi_panel_off(struct jdi_panel *jdi) { struct mipi_dsi_device *dsi = jdi->dsi; - struct device *dev = &jdi->dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) - dev_err(dev, "failed to set display off: %d\n", ret); - - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) - dev_err(dev, "failed to enter sleep mode: %d\n", ret); + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + /* Reset error to continue power-down even if display off failed */ + dsi_ctx.accum_err = 0; + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); msleep(100); } From e88bb45cf742177a18d63d306be345a294b4c1db Mon Sep 17 00:00:00 2001 From: "Kory Maincent (TI.com)" Date: Fri, 23 Jan 2026 17:12:19 +0100 Subject: [PATCH 081/158] dt-bindings: display: tilcdc: Convert to DT schema Convert the device tree binding documentation for tilcdc from plain text to DT binding schema. Reviewed-by: Krzysztof Kozlowski Signed-off-by: Kory Maincent (TI.com) Link: https://patch.msgid.link/20260123-feature_tilcdc-v5-1-5a44d2aa3f6f@bootlin.com Signed-off-by: Luca Ceresoli --- .../display/tilcdc/ti,am33xx-tilcdc.yaml | 100 ++++++++++++++++++ .../bindings/display/tilcdc/tilcdc.txt | 82 -------------- 2 files changed, 100 insertions(+), 82 deletions(-) create mode 100644 Documentation/devicetree/bindings/display/tilcdc/ti,am33xx-tilcdc.yaml delete mode 100644 Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt diff --git a/Documentation/devicetree/bindings/display/tilcdc/ti,am33xx-tilcdc.yaml b/Documentation/devicetree/bindings/display/tilcdc/ti,am33xx-tilcdc.yaml new file mode 100644 index 000000000000..eb0ebb678fa8 --- /dev/null +++ b/Documentation/devicetree/bindings/display/tilcdc/ti,am33xx-tilcdc.yaml @@ -0,0 +1,100 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +# Copyright 2025 Bootlin +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/tilcdc/ti,am33xx-tilcdc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: TI LCD Controller, found on AM335x, DA850, AM18x and OMAP-L138 + +maintainers: + - Kory Maincent + +properties: + compatible: + enum: + - ti,am33xx-tilcdc + - ti,da850-tilcdc + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + port: + $ref: /schemas/graph.yaml#/properties/port + + ti,hwmods: + $ref: /schemas/types.yaml#/definitions/string + description: + Name of the hwmod associated to the LCDC + + max-bandwidth: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + The maximum pixels per second that the memory interface / lcd + controller combination can sustain + # maximum: 2048*2048*60 + maximum: 251658240 + + max-width: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + The maximum horizontal pixel width supported by the lcd controller. + maximum: 2048 + + max-pixelclock: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + The maximum pixel clock that can be supported by the lcd controller + in KHz. + + blue-and-red-wiring: + enum: [straight, crossed] + description: + This property deals with the LCDC revision 2 (found on AM335x) + color errata [1]. + - "straight" indicates normal wiring that supports RGB565, + BGR888, and XBGR8888 color formats. + - "crossed" indicates wiring that has blue and red wires + crossed. This setup supports BGR565, RGB888 and XRGB8888 + formats. + - If the property is not present or its value is not recognized + the legacy mode is assumed. This configuration supports RGB565, + RGB888 and XRGB8888 formats. However, depending on wiring, the red + and blue colors are swapped in either 16 or 24-bit color modes. + + [1] There is an errata about AM335x color wiring. For 16-bit color + mode the wires work as they should (LCD_DATA[0:4] is for Blue[3:7]), + but for 24 bit color modes the wiring of blue and red components is + crossed and LCD_DATA[0:4] is for Red[3:7] and LCD_DATA[11:15] is + for Blue[3-7]. For more details see section 3.1.1 in AM335x + Silicon Errata + https://www.ti.com/general/docs/lit/getliterature.tsp?baseLiteratureNumber=sprz360 + +required: + - compatible + - interrupts + - reg + - port + +additionalProperties: false + +examples: + - | + display-controller@4830e000 { + compatible = "ti,am33xx-tilcdc"; + reg = <0x4830e000 0x1000>; + interrupt-parent = <&intc>; + interrupts = <36>; + ti,hwmods = "lcdc"; + + blue-and-red-wiring = "crossed"; + + port { + endpoint { + remote-endpoint = <&hdmi_0>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt deleted file mode 100644 index 3b3d0bbfcfff..000000000000 --- a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt +++ /dev/null @@ -1,82 +0,0 @@ -Device-Tree bindings for tilcdc DRM driver - -Required properties: - - compatible: value should be one of the following: - - "ti,am33xx-tilcdc" for AM335x based boards - - "ti,da850-tilcdc" for DA850/AM18x/OMAP-L138 based boards - - interrupts: the interrupt number - - reg: base address and size of the LCDC device - -Recommended properties: - - ti,hwmods: Name of the hwmod associated to the LCDC - -Optional properties: - - max-bandwidth: The maximum pixels per second that the memory - interface / lcd controller combination can sustain - - max-width: The maximum horizontal pixel width supported by - the lcd controller. - - max-pixelclock: The maximum pixel clock that can be supported - by the lcd controller in KHz. - - blue-and-red-wiring: Recognized values "straight" or "crossed". - This property deals with the LCDC revision 2 (found on AM335x) - color errata [1]. - - "straight" indicates normal wiring that supports RGB565, - BGR888, and XBGR8888 color formats. - - "crossed" indicates wiring that has blue and red wires - crossed. This setup supports BGR565, RGB888 and XRGB8888 - formats. - - If the property is not present or its value is not recognized - the legacy mode is assumed. This configuration supports RGB565, - RGB888 and XRGB8888 formats. However, depending on wiring, the red - and blue colors are swapped in either 16 or 24-bit color modes. - -Optional nodes: - - - port/ports: to describe a connection to an external encoder. The - binding follows Documentation/devicetree/bindings/graph.txt and - supports a single port with a single endpoint. - - - See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and - Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml for connecting - tfp410 DVI encoder or lcd panel to lcdc - -[1] There is an errata about AM335x color wiring. For 16-bit color mode - the wires work as they should (LCD_DATA[0:4] is for Blue[3:7]), - but for 24 bit color modes the wiring of blue and red components is - crossed and LCD_DATA[0:4] is for Red[3:7] and LCD_DATA[11:15] is - for Blue[3-7]. For more details see section 3.1.1 in AM335x - Silicon Errata: - https://www.ti.com/general/docs/lit/getliterature.tsp?baseLiteratureNumber=sprz360 - -Example: - - fb: fb@4830e000 { - compatible = "ti,am33xx-tilcdc", "ti,da850-tilcdc"; - reg = <0x4830e000 0x1000>; - interrupt-parent = <&intc>; - interrupts = <36>; - ti,hwmods = "lcdc"; - - blue-and-red-wiring = "crossed"; - - port { - lcdc_0: endpoint { - remote-endpoint = <&hdmi_0>; - }; - }; - }; - - tda19988: tda19988 { - compatible = "nxp,tda998x"; - reg = <0x70>; - - pinctrl-names = "default", "off"; - pinctrl-0 = <&nxp_hdmi_bonelt_pins>; - pinctrl-1 = <&nxp_hdmi_bonelt_off_pins>; - - port { - hdmi_0: endpoint { - remote-endpoint = <&lcdc_0>; - }; - }; - }; From bc3f1e36d12969060c9068b9c7c14407f869270e Mon Sep 17 00:00:00 2001 From: "Kory Maincent (TI.com)" Date: Fri, 23 Jan 2026 17:12:20 +0100 Subject: [PATCH 082/158] dt-bindings: display: tilcdc: Mark panel binding as deprecated Mark the ti,tilcdc,panel binding as deprecated in the documentation. This legacy binding should no longer be used for new designs. Users should migrate to the standard DRM panel bindings instead. Acked-by: Krzysztof Kozlowski Signed-off-by: Kory Maincent (TI.com) Link: https://patch.msgid.link/20260123-feature_tilcdc-v5-2-5a44d2aa3f6f@bootlin.com Signed-off-by: Luca Ceresoli --- Documentation/devicetree/bindings/display/tilcdc/panel.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/display/tilcdc/panel.txt b/Documentation/devicetree/bindings/display/tilcdc/panel.txt index 808216310ea2..b973174d704e 100644 --- a/Documentation/devicetree/bindings/display/tilcdc/panel.txt +++ b/Documentation/devicetree/bindings/display/tilcdc/panel.txt @@ -1,4 +1,5 @@ Device-Tree bindings for tilcdc DRM generic panel output driver +This binding is deprecated and should not be used. Required properties: - compatible: value should be "ti,tilcdc,panel". From 332146d21d1d4211520957458973f4316a6c2fa7 Mon Sep 17 00:00:00 2001 From: "Kory Maincent (TI.com)" Date: Fri, 23 Jan 2026 17:12:21 +0100 Subject: [PATCH 083/158] drm/tilcdc: Remove simulate_vesa_sync flag The tilcdc hardware does not generate VESA-compliant sync signals. It aligns the vertical sync (VS) on the second edge of the horizontal sync (HS) instead of the first edge. To compensate for this hardware behavior, the driver applies a timing adjustment in mode_fixup(). Previously, this adjustment was conditional based on the simulate_vesa_sync flag, which was only set when using external encoders. This appears problematic because: 1. The timing adjustment seems needed for the hardware behavior regardless of whether an external encoder is used 2. The external encoder infrastructure is driver-specific and being removed due to design issues 3. Boards using tilcdc without bridges (e.g., am335x-evm, am335x-evmsk) may not be getting the necessary timing adjustments Remove the simulate_vesa_sync flag and apply the VESA sync timing adjustment unconditionally, ensuring consistent behavior across all configurations. While it's unclear if the previous conditional behavior was causing actual issues, the unconditional adjustment better reflects the hardware's characteristics. Reviewed-by: Luca Ceresoli Signed-off-by: Kory Maincent (TI.com) Link: https://patch.msgid.link/20260123-feature_tilcdc-v5-3-5a44d2aa3f6f@bootlin.com Signed-off-by: Luca Ceresoli --- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 16 ---------------- drivers/gpu/drm/tilcdc/tilcdc_drv.h | 2 -- drivers/gpu/drm/tilcdc/tilcdc_external.c | 1 - 3 files changed, 19 deletions(-) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 52c95131af5a..b06b1453db2d 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -47,9 +47,6 @@ struct tilcdc_crtc { struct drm_framebuffer *next_fb; - /* Only set if an external encoder is connected */ - bool simulate_vesa_sync; - int sync_lost_count; bool frame_intact; struct work_struct recover_work; @@ -642,11 +639,6 @@ static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); - - if (!tilcdc_crtc->simulate_vesa_sync) - return true; - /* * tilcdc does not generate VESA-compliant sync but aligns * VS on the second edge of HS instead of first edge. @@ -866,14 +858,6 @@ void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc, tilcdc_crtc->info = info; } -void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc, - bool simulate_vesa_sync) -{ - struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); - - tilcdc_crtc->simulate_vesa_sync = simulate_vesa_sync; -} - void tilcdc_crtc_update_clk(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h index 58b276f82a66..3aba3a1155ba 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h @@ -160,8 +160,6 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc); void tilcdc_crtc_update_clk(struct drm_crtc *crtc); void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc, const struct tilcdc_panel_info *info); -void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc, - bool simulate_vesa_sync); void tilcdc_crtc_shutdown(struct drm_crtc *crtc); void tilcdc_crtc_destroy(struct drm_crtc *crtc); int tilcdc_crtc_update_fb(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index 3b86d002ef62..da755a411d9f 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c @@ -80,7 +80,6 @@ int tilcdc_add_component_encoder(struct drm_device *ddev) return -ENODEV; /* Only tda998x is supported at the moment. */ - tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true); tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x); return 0; From ca062af436593d66ce2afa4cd00a4cb2fb775755 Mon Sep 17 00:00:00 2001 From: "Kory Maincent (TI.com)" Date: Fri, 23 Jan 2026 17:12:22 +0100 Subject: [PATCH 084/158] drm/tilcdc: Add support for DRM bus flags and simplify panel config Migrate CRTC mode configuration to use standard DRM bus flags in preparation for removing the tilcdc_panel driver and its custom tilcdc_panel_info structure. Add support for DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE and DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE flags to control pixel clock and sync signal edge polarity, while maintaining backward compatibility with the existing tilcdc panel info structure. Simplify several hardware parameters by setting them to fixed defaults based on common usage across existing device trees: - DMA burst size: 16 (previously configurable via switch statement) - AC bias frequency: 255 (previously panel-specific) - FIFO DMA request delay: 128 (previously panel-specific) These parameters show no variation in real-world usage, so hardcoding them simplifies the driver without losing functionality. Preserve FIFO threshold configurability by detecting the SoC type, as this parameter varies between AM33xx (8) and DA850 (16) platforms. Reviewed-by: Luca Ceresoli Signed-off-by: Kory Maincent (TI.com) Link: https://patch.msgid.link/20260123-feature_tilcdc-v5-4-5a44d2aa3f6f@bootlin.com Signed-off-by: Luca Ceresoli --- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 47 ++++++++++------------------ drivers/gpu/drm/tilcdc/tilcdc_drv.c | 17 ++++++++-- drivers/gpu/drm/tilcdc/tilcdc_drv.h | 2 ++ 3 files changed, 34 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index b06b1453db2d..2309a9a0c925 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -285,27 +285,15 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) /* Configure the Burst Size and fifo threshold of DMA: */ reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770; - switch (info->dma_burst_sz) { - case 1: - reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1); - break; - case 2: - reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2); - break; - case 4: - reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4); - break; - case 8: - reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8); - break; - case 16: - reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16); - break; - default: - dev_err(dev->dev, "invalid burst size\n"); - return; + /* Use 16 bit DMA burst size by default */ + reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16); + if (priv->fifo_th) { + int fifo_th_val = ilog2(priv->fifo_th) - 3; + + reg |= (fifo_th_val << 8); + } else { + reg |= (info->fifo_th << 8); } - reg |= (info->fifo_th << 8); tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg); /* Configure timings: */ @@ -321,8 +309,8 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) /* Set AC Bias Period and Number of Transitions per Interrupt: */ reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00; - reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) | - LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt); + /* Use 255 AC Bias Pin Frequency by default */ + reg |= LCDC_AC_BIAS_FREQUENCY(255); /* * subtract one from hfp, hbp, hsw because the hardware uses @@ -392,20 +380,19 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) return; } } - reg |= info->fdd << 12; + /* Use 128 FIFO DMA Request Delay by default */ + reg |= 128 << 12; tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg); - if (info->invert_pxl_clk) + if (info->invert_pxl_clk || + mode->flags == DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK); else tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK); - if (info->sync_ctrl) - tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL); - else - tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL); - - if (info->sync_edge) + tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL); + if (info->sync_edge || + mode->flags == DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE) tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE); else tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 3dcbec312bac..fe01f3fcaf3c 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -31,6 +31,11 @@ #include "tilcdc_panel.h" #include "tilcdc_regs.h" +enum tilcdc_variant { + AM33XX_TILCDC, + DA850_TILCDC, +}; + static LIST_HEAD(module_list); static const u32 tilcdc_rev1_formats[] = { DRM_FORMAT_RGB565 }; @@ -198,6 +203,7 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct device_node *node = dev->of_node; struct tilcdc_drm_private *priv; + enum tilcdc_variant variant; u32 bpp = 0; int ret; @@ -209,6 +215,8 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev) if (IS_ERR(ddev)) return PTR_ERR(ddev); + variant = (uintptr_t)of_device_get_match_data(dev); + ddev->dev_private = priv; platform_set_drvdata(pdev, ddev); drm_mode_config_init(ddev); @@ -309,6 +317,11 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev) DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock); + if (variant == DA850_TILCDC) + priv->fifo_th = 16; + else + priv->fifo_th = 8; + ret = tilcdc_crtc_create(ddev); if (ret < 0) { dev_err(dev, "failed to create crtc\n"); @@ -598,8 +611,8 @@ static void tilcdc_pdev_shutdown(struct platform_device *pdev) } static const struct of_device_id tilcdc_of_match[] = { - { .compatible = "ti,am33xx-tilcdc", }, - { .compatible = "ti,da850-tilcdc", }, + { .compatible = "ti,am33xx-tilcdc", .data = (void *)AM33XX_TILCDC}, + { .compatible = "ti,da850-tilcdc", .data = (void *)DA850_TILCDC}, { }, }; MODULE_DEVICE_TABLE(of, tilcdc_of_match); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h index 3aba3a1155ba..79078b4ae739 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h @@ -61,6 +61,8 @@ struct tilcdc_drm_private { */ uint32_t max_width; + u32 fifo_th; + /* Supported pixel formats */ const uint32_t *pixelformats; uint32_t num_pixelformats; From 0ff223d991477fa4677dcb0f1fb00065847e2212 Mon Sep 17 00:00:00 2001 From: "Kory Maincent (TI.com)" Date: Fri, 23 Jan 2026 17:12:23 +0100 Subject: [PATCH 085/158] drm/tilcdc: Convert legacy panel binding via DT overlay at boot time To maintain backward compatibility while removing the deprecated tilcdc_panel driver, add a tilcdc_panel_legacy subdriver that converts the legacy "ti,tilcdc,panel" devicetree binding to the standard panel-dpi binding at early boot. The conversion uses an embedded device tree overlay that is applied and modified during subsys_initcall. The process: - Apply embedded overlay to create a tilcdc-panel-dpi node with port/endpoint connections to the LCDC - Copy all properties from the legacy panel node to the new tilcdc-panel-dpi node - Copy display-timings from the legacy panel - Convert legacy panel-info properties (invert-pxl-clk, sync-edge) to standard display timing properties (pixelclk-active, syncclk-active) - Disable the legacy panel by removing its compatible property to prevent the deprecated driver from binding The result is a standard tilcdc-panel-dpi node with proper endpoints and timing properties, allowing the DRM panel infrastructure to work with legacy devicetrees without modification. Other legacy panel-info properties are not migrated as they consistently use default values across all mainline devicetrees and can be hardcoded in the tilcdc driver. This feature is optional via CONFIG_DRM_TILCDC_PANEL_LEGACY and should only be enabled for systems with legacy devicetrees containing "ti,tilcdc,panel" nodes. Suggested-by: Tomi Valkeinen Link: https://lore.kernel.org/all/1d9a9269-bfda-4d43-938b-2df6b82b9369@ideasonboard.com/ Reviewed-by: Luca Ceresoli Reviewed-by: Herve Codina Signed-off-by: Kory Maincent (TI.com) Link: https://patch.msgid.link/20260123-feature_tilcdc-v5-5-5a44d2aa3f6f@bootlin.com Signed-off-by: Luca Ceresoli --- drivers/gpu/drm/tilcdc/Kconfig | 16 ++ drivers/gpu/drm/tilcdc/Makefile | 2 + drivers/gpu/drm/tilcdc/tilcdc_panel_legacy.c | 185 ++++++++++++++++++ .../gpu/drm/tilcdc/tilcdc_panel_legacy.dtso | 29 +++ 4 files changed, 232 insertions(+) create mode 100644 drivers/gpu/drm/tilcdc/tilcdc_panel_legacy.c create mode 100644 drivers/gpu/drm/tilcdc/tilcdc_panel_legacy.dtso diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index 24f9a245ba59..a36e809f984c 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig @@ -14,3 +14,19 @@ config DRM_TILCDC controller, for example AM33xx in beagle-bone, DA8xx, or OMAP-L1xx. This driver replaces the FB_DA8XX fbdev driver. +config DRM_TILCDC_PANEL_LEGACY + bool "Support device tree blobs using TI LCDC Panel binding" + default y + depends on DRM_TILCDC + depends on OF + depends on BACKLIGHT_CLASS_DEVICE + depends on PM + select OF_OVERLAY + select DRM_PANEL_SIMPLE + help + Modifies the live device tree at early boot to convert the legacy + "ti,tilcdc,panel" devicetree node to the standard panel-dpi node. + This allows to maintain backward compatibility for boards which + were using the deprecated tilcdc_panel driver. + If you find "ti,tilcdc,panel"-string from your DTB, you probably + need this. Otherwise you do not. diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile index f5190477de72..6d6a08b5adf4 100644 --- a/drivers/gpu/drm/tilcdc/Makefile +++ b/drivers/gpu/drm/tilcdc/Makefile @@ -11,3 +11,5 @@ tilcdc-y := \ tilcdc_drv.o obj-$(CONFIG_DRM_TILCDC) += tilcdc.o +obj-$(CONFIG_DRM_TILCDC_PANEL_LEGACY) += tilcdc_panel_legacy.o \ + tilcdc_panel_legacy.dtbo.o diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel_legacy.c b/drivers/gpu/drm/tilcdc/tilcdc_panel_legacy.c new file mode 100644 index 000000000000..37a69b3cf04b --- /dev/null +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel_legacy.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025 Bootlin + * Author: Kory Maincent + * + * To support the legacy "ti,tilcdc,panel" binding, the devicetree has to + * be transformed to the new panel-dpi binding with the endpoint associated. + */ + +#include +#include +#include +#include + +/* Embedded dtbo symbols created by cmd_wrap_S_dtb in scripts/Makefile.lib */ +extern char __dtbo_tilcdc_panel_legacy_begin[]; +extern char __dtbo_tilcdc_panel_legacy_end[]; + +static int __init +tilcdc_panel_update_prop(struct of_changeset *ocs, struct device_node *node, + char *name, void *val, int length) +{ + struct property *prop; + + prop = kzalloc(sizeof(*prop), GFP_KERNEL); + if (!prop) + return -ENOMEM; + + prop->name = kstrdup(name, GFP_KERNEL); + prop->length = length; + prop->value = kmemdup(val, length, GFP_KERNEL); + if (!prop->name || !prop->value) { + kfree(prop->name); + kfree(prop->value); + kfree(prop); + return -ENOMEM; + } + + return of_changeset_update_property(ocs, node, prop); +} + +static int __init tilcdc_panel_copy_props(struct device_node *old_panel, + struct device_node *new_panel) +{ + struct device_node *old_timing __free(device_node) = NULL; + struct device_node *new_timing __free(device_node) = NULL; + struct device_node *panel_info __free(device_node) = NULL; + struct device_node *child __free(device_node) = NULL; + u32 invert_pxl_clk = 0, sync_edge = 0; + struct of_changeset ocs; + struct property *prop; + int ret; + + child = of_get_child_by_name(old_panel, "display-timings"); + if (!child) + return -EINVAL; + + /* The default display timing is the one specified as native-mode. + * If no native-mode is specified then the first node is assumed + * to be the native mode. + */ + old_timing = of_parse_phandle(child, "native-mode", 0); + if (!old_timing) { + old_timing = of_get_next_child(child, NULL); + if (!old_timing) + return -EINVAL; + } + + panel_info = of_get_child_by_name(old_panel, "panel-info"); + if (!panel_info) + return -EINVAL; + + of_changeset_init(&ocs); + + /* Copy all panel properties to the new panel node */ + for_each_property_of_node(old_panel, prop) { + if (!strncmp(prop->name, "compatible", sizeof("compatible"))) + continue; + + ret = tilcdc_panel_update_prop(&ocs, new_panel, prop->name, + prop->value, prop->length); + if (ret) + goto destroy_ocs; + } + + new_timing = of_changeset_create_node(&ocs, new_panel, "panel-timing"); + if (!new_timing) { + ret = -ENODEV; + goto destroy_ocs; + } + + /* Copy all panel timing properties to the new panel node */ + for_each_property_of_node(old_timing, prop) { + ret = tilcdc_panel_update_prop(&ocs, new_timing, prop->name, + prop->value, prop->length); + if (ret) + goto destroy_ocs; + } + + /* Looked only for these two parameter as all the other are always + * set to default and not related to common DRM properties. + */ + of_property_read_u32(panel_info, "invert-pxl-clk", &invert_pxl_clk); + of_property_read_u32(panel_info, "sync-edge", &sync_edge); + + if (!invert_pxl_clk) { + ret = tilcdc_panel_update_prop(&ocs, new_timing, "pixelclk-active", + &(u32){cpu_to_be32(1)}, sizeof(u32)); + if (ret) + goto destroy_ocs; + } + + if (!sync_edge) { + ret = tilcdc_panel_update_prop(&ocs, new_timing, "syncclk-active", + &(u32){cpu_to_be32(1)}, sizeof(u32)); + if (ret) + goto destroy_ocs; + } + + /* Remove compatible property to avoid any driver compatible match */ + of_changeset_remove_property(&ocs, old_panel, + of_find_property(old_panel, "compatible", NULL)); + + of_changeset_apply(&ocs); + return 0; + +destroy_ocs: + of_changeset_destroy(&ocs); + return ret; +} + +static const struct of_device_id tilcdc_panel_of_match[] __initconst = { + { .compatible = "ti,tilcdc,panel", }, + {}, +}; + +static const struct of_device_id tilcdc_of_match[] __initconst = { + { .compatible = "ti,am33xx-tilcdc", }, + { .compatible = "ti,da850-tilcdc", }, + {}, +}; + +static int __init tilcdc_panel_legacy_init(void) +{ + struct device_node *new_panel __free(device_node) = NULL; + struct device_node *panel __free(device_node) = NULL; + struct device_node *lcdc __free(device_node) = NULL; + void *dtbo_start; + u32 dtbo_size; + int ovcs_id; + int ret; + + lcdc = of_find_matching_node(NULL, tilcdc_of_match); + panel = of_find_matching_node(NULL, tilcdc_panel_of_match); + + if (!of_device_is_available(panel) || + !of_device_is_available(lcdc)) + return 0; + + dtbo_start = __dtbo_tilcdc_panel_legacy_begin; + dtbo_size = __dtbo_tilcdc_panel_legacy_end - + __dtbo_tilcdc_panel_legacy_begin; + + ret = of_overlay_fdt_apply(dtbo_start, dtbo_size, &ovcs_id, NULL); + if (ret) + return ret; + + new_panel = of_find_node_by_name(NULL, "tilcdc-panel-dpi"); + if (!new_panel) { + ret = -ENODEV; + goto overlay_remove; + } + + ret = tilcdc_panel_copy_props(panel, new_panel); + if (ret) + goto overlay_remove; + + return 0; + +overlay_remove: + of_overlay_remove(&ovcs_id); + return ret; +} + +subsys_initcall(tilcdc_panel_legacy_init); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel_legacy.dtso b/drivers/gpu/drm/tilcdc/tilcdc_panel_legacy.dtso new file mode 100644 index 000000000000..ae71d10f5ec1 --- /dev/null +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel_legacy.dtso @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * DTS overlay for converting ti,tilcdc,panel binding to new binding. + * + * Copyright (C) 2025 Bootlin + * Author: Kory Maincent + */ + +/dts-v1/; +/plugin/; + +&{/} { + tilcdc-panel-dpi { + compatible = "panel-dpi"; + port { + panel_in: endpoint@0 { + remote-endpoint = <&lcd_0>; + }; + }; + }; +}; + +&lcdc { + port { + lcd_0: endpoint@0 { + remote-endpoint = <&panel_in>; + }; + }; +}; From 8f1e1ab9c794a6efa7ab2b076405b4984319efec Mon Sep 17 00:00:00 2001 From: "Kory Maincent (TI.com)" Date: Fri, 23 Jan 2026 17:12:24 +0100 Subject: [PATCH 086/158] drm/tilcdc: Remove tilcdc panel driver The tilcdc panel subdriver is a legacy, non-standard driver that has been replaced by the standard panel-dpi driver and panel-simple infrastructure. With the device tree bindings removed and all in-tree users migrated to use panel-dpi, this driver no longer has any associated device tree bindings or users. The panel-dpi driver combined with DRM bus flags provides equivalent functionality in a standard way that is compatible with the broader DRM panel ecosystem. This removal eliminates 400+ lines of redundant code and completes the migration to standard panel handling. Reviewed-by: Luca Ceresoli Signed-off-by: Kory Maincent (TI.com) Link: https://patch.msgid.link/20260123-feature_tilcdc-v5-6-5a44d2aa3f6f@bootlin.com Signed-off-by: Luca Ceresoli --- drivers/gpu/drm/tilcdc/Makefile | 1 - drivers/gpu/drm/tilcdc/tilcdc_drv.c | 3 - drivers/gpu/drm/tilcdc/tilcdc_panel.c | 408 -------------------------- drivers/gpu/drm/tilcdc/tilcdc_panel.h | 15 - 4 files changed, 427 deletions(-) delete mode 100644 drivers/gpu/drm/tilcdc/tilcdc_panel.c delete mode 100644 drivers/gpu/drm/tilcdc/tilcdc_panel.h diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile index 6d6a08b5adf4..b78204a65ce2 100644 --- a/drivers/gpu/drm/tilcdc/Makefile +++ b/drivers/gpu/drm/tilcdc/Makefile @@ -6,7 +6,6 @@ endif tilcdc-y := \ tilcdc_plane.o \ tilcdc_crtc.o \ - tilcdc_panel.o \ tilcdc_external.o \ tilcdc_drv.o diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index fe01f3fcaf3c..f03861ed6349 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -28,7 +28,6 @@ #include "tilcdc_drv.h" #include "tilcdc_external.h" -#include "tilcdc_panel.h" #include "tilcdc_regs.h" enum tilcdc_variant { @@ -634,7 +633,6 @@ static int __init tilcdc_drm_init(void) return -ENODEV; DBG("init"); - tilcdc_panel_init(); return platform_driver_register(&tilcdc_platform_driver); } @@ -642,7 +640,6 @@ static void __exit tilcdc_drm_fini(void) { DBG("fini"); platform_driver_unregister(&tilcdc_platform_driver); - tilcdc_panel_fini(); } module_init(tilcdc_drm_init); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c deleted file mode 100644 index 262f290d85d9..000000000000 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ /dev/null @@ -1,408 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2012 Texas Instruments - * Author: Rob Clark - */ - -#include -#include -#include - -#include