Files
linux/drivers/gpu/drm/msm/msm_gpu.c
Linus Torvalds 6dfafbd029 Merge tag 'drm-next-2025-12-03' of https://gitlab.freedesktop.org/drm/kernel
Pull drm updates from Dave Airlie:
 "There was a rather late merge of a new color pipeline feature, that
  some userspace projects are blocked on, and has seen a lot of work in
  amdgpu. This should have seen some time in -next. There is additional
  support for this for Intel, that if it arrives in the next day or two
  I'll pass it on in another pull request and you can decide if you want
  to take it.

  Highlights:
   - Arm Ethos NPU accelerator driver
   - new DRM color pipeline support
   - amdgpu will now run discrete SI/CIK cards instead of radeon, which
     enables vulkan support in userspace
   - msm gets gen8 gpu support
   - initial Xe3P support in xe

  Full detail summary:

  New driver:
   - Arm Ethos-U65/U85 accel driver

  Core:
   - support the drm color pipeline in vkms/amdgfx
   - add support for drm colorop pipeline
   - add COLOR PIPELINE plane property
   - add DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE
   - throttle dirty worker with vblank
   - use drm_for_each_bridge_in_chain_scoped in drm's bridge code
   - Ensure drm_client_modeset tests are enabled in UML
   - add simulated vblank interrupt - use in drivers
   - dumb buffer sizing helper
   - move freeing of drm client memory to driver
   - crtc sharpness strength property
   - stop using system_wq in scheduler/drivers
   - support emergency restore in drm-client

  Rust:
   - make slice::as_flattened usable on all supported rustc
   - add FromBytes::from_bytes_prefix() method
   - remove redundant device ptr from Rust GEM object
   - Change how AlwaysRefCounted is implemented for GEM objects

  gpuvm:
   - Add deferred vm_bo cleanup to GPUVM (for rust)

  atomic:
   - cleanup and improve state handling interfaces

  buddy:
   - optimize block management

  dma-buf:
   - heaps: Create heap per CMA reserved location
   - improve userspace documentation

  dp:
   - add POST_LT_ADJ_REQ training sequence
   - DPCD dSC quirk for synaptics panamera devices
   - helpers to query branch DSC max throughput

  ttm:
   - Rename ttm_bo_put to ttm_bo_fini
   - allow page protection flags on risc-v
   - rework pipelined eviction fence handling

  amdgpu:
   - enable amdgpu by default for SI/CI dGPUs
   - enable DC by default on SI
   - refactor CIK/SI enablement
   - add ABM KMS property
   - Re-enable DM idle optimizations
   - DC Analog encoders support
   - Powerplay fixes for fiji/iceland
   - Enable DC on bonaire by default
   - HMM cleanup
   - Add new RAS framework
   - DML2.1 updates
   - YCbCr420 fixes
   - DC FP fixes
   - DMUB fixes
   - LTTPR fixes
   - DTBCLK fixes
   - DMU cursor offload handling
   - Userq validation improvements
   - Unify shutdown callback handling
   - Suspend improvements
   - Power limit code cleanup
   - SR-IOV fixes
   - AUX backlight fixes
   - DCN 3.5 fixes
   - HDMI compliance fixes
   - DCN 4.0.1 cursor updates
   - DCN interrupt fix
   - DC KMS full update improvements
   - Add additional HDCP traces
   - DCN 3.2 fixes
   - DP MST fixes
   - Add support for new SR-IOV mailbox interface
   - UQ reset support
   - HDP flush rework
   - VCE1 support

  amdkfd:
   - HMM cleanups
   - Relax checks on save area overallocations
   - Fix GPU mappings after prefetch

  radeon:
   - refactor CIK/SI enablement

  xe:
   - Initial Xe3P support
   - panic support on VRAM for display
   - fix stolen size check
   - Loosen used tracking restriction
   - New SR-IOV debugfs structure and debugfs updates
   - Hide the GPU madvise flag behind a VM_BIND flag
   - Always expose VRAM provisioning data on discrete GPUs
   - Allow VRAM mappings for userptr when used with SVM
   - Allow pinning of p2p dma-buf
   - Use per-tile debugfs where appropriate
   - Add documentation for Execution Queues
   - PF improvements
   - VF migration recovery redesign work
   - User / Kernel VRAM partitioning
   - Update Tile-based messages
   - Allow configfs to disable specific GT types
   - VF provisioning and migration improvements
   - use SVM range helpers in PT layer
   - Initial CRI support
   - access VF registers using dedicated MMIO view
   - limit number of jobs per exec queue
   - add sriov_admin sysfs tree
   - more crescent island specific support
   - debugfs residency counter
   - SRIOV migration work
   - runtime registers for GFX 35

  i915:
   - add initial Xe3p_LPD display version 35 support
   - Enable LNL+ content adaptive sharpness filter
   - Use optimized VRR guardband
   - Enable Xe3p LT PHY
   - enable FBC support for Xe3p_LPD display
   - add display 30.02 firmware support
   - refactor SKL+ watermark latency setup
   - refactor fbdev handling
   - call i915/xe runtime PM via function pointers
   - refactor i915/xe stolen memory/display interfaces
   - use display version instead of gfx version in display code
   - extend i915_display_info with Type-C port details
   - lots of display cleanups/refactorings
   - set O_LARGEFILE in __create_shmem
   - skuip guc communication warning on reset
   - fix time conversions
   - defeature DRRS on LNL+
   - refactor intel_frontbuffer split between i915/xe/display
   - convert inteL_rom interfaces to struct drm_device
   - unify display register polling interfaces
   - aovid lock inversion when pinning to GGTT on CHV/BXT+VTD

  panel:
   - Add KD116N3730A08/A12, chromebook mt8189
   - JT101TM023, LQ079L1SX01,
   - GLD070WX3-SL01 MIPI DSI
   - Samsung LTL106AL0, Samsung LTL106AL01
   - Raystar RFF500F-AWH-DNN
   - Winstar WF70A8SYJHLNGA
   - Wanchanglong w552946aaa
   - Samsung SOFEF00
   - Lenovo X13s panel
   - ilitek-ili9881c - add rpi 5" support
   - visionx-rm69299 - add backlight support
   - edp - support AUI B116XAN02.0

  bridge:
   - improve ref counting
   - ti-sn65dsi86 - add support for DP mode with HPD
   - synopsis: support CEC, init timer with correct freq
   - ASL CS5263 DP-to-HDMI bridge support

  nova-core:
   - introduce bitfield! macro
   - introduce safe integer converters
   - GSP inits to fully booted state on Ampere
   - Use more future-proof register for GPU identification

  nova-drm:
   - select NOVA_CORE
   - 64-bit only

  nouveau:
   - improve reclocking on tegra 186+
   - add large page and compression support

  msm:
   - GPU:
      - Gen8 support: A840 (Kaanapali) and X2-85 (Glymur)
      - A612 support
   - MDSS:
      - Added support for Glymur and QCS8300 platforms
   - DPU:
      - Enabled Quad-Pipe support, unlocking higher resolutions support
      - Added support for Glymur platform
      - Documented DPU on QCS8300 platform as supported
   - DisplayPort:
      - Added support for Glymur platform
      - Added support lame remapping inside DP block
      - Documented DisplayPort controller on QCS8300 and SM6150/QCS615
        as supported

  tegra:
   - NVJPG driver

  panfrost:
   - display JM contexts over debugfs
   - export JM contexts to userspace
   - improve error and job handling

  panthor:
   - support custom ASN_HASH for mt8196
   - support mali-G1 GPU
   - flush shmem write before mapping buffers uncached
   - make timeout per-queue instead of per-job

  mediatek:
   - MT8195/88 HDMIv2/DDCv2 support

  rockchip:
   - dsi: add support for RK3368

  amdxdna:
   - enhance runtime PM
   - last hardware error reading uapi
   - support firmware debug output
   - add resource and telemetry data uapi
   - preemption support

  imx:
   - add driver for HDMI TX Parallel audio interface

  ivpu:
   - add support for user-managed preemption buffer
   - add userptr support
   - update JSM firware API to 3.33.0
   - add better alloc/free warnings
   - fix page fault in unbind all bos
   - rework bind/unbind of imported buffers
   - enable MCA ECC signalling
   - split fw runtime and global memory buffers
   - add fdinfo memory statistics

  tidss:
   - convert to drm logging
   - logging cleanup

  ast:
   - refactor generation init paths
   - add per chip generation detect_tx_chip
   - set quirks for each chip model

  atmel-hlcdc:
   - set LCDC_ATTRE register in plane disable
   - set correct values for plane scaler

  solomon:
   - use drm helper for get_modes and move_valid

  sitronix:
   - fix output position when clearing screens

  qaic:
   - support dma-buf exports
   - support new firmware's READ_DATA implementation
   - sahara AIC200 image table update
   - add sysfs support
   - add coredump support
   - add uevents support
   - PM support

  sun4i:
   - layer refactors to decouple plane from output
   - improve DE33 support

  vc4:
   - switch to generic CEC helpers

  komeda:
   - use drm_ logging functions

  vkms:
   - configfs support for display configuration

  vgem:
   - fix fence timer deadlock

  etnaviv:
   - add HWDB entry for GC8000 Nano Ultra VIP r6205"

* tag 'drm-next-2025-12-03' of https://gitlab.freedesktop.org/drm/kernel: (1869 commits)
  Revert "drm/amd: Skip power ungate during suspend for VPE"
  drm/amdgpu: use common defines for HUB faults
  drm/amdgpu/gmc12: add amdgpu_vm_handle_fault() handling
  drm/amdgpu/gmc11: add amdgpu_vm_handle_fault() handling
  drm/amdgpu: use static ids for ACP platform devs
  drm/amdgpu/sdma6: Update SDMA 6.0.3 FW version to include UMQ protected-fence fix
  drm/amdgpu: Forward VMID reservation errors
  drm/amdgpu/gmc8: Delegate VM faults to soft IRQ handler ring
  drm/amdgpu/gmc7: Delegate VM faults to soft IRQ handler ring
  drm/amdgpu/gmc6: Delegate VM faults to soft IRQ handler ring
  drm/amdgpu/gmc6: Cache VM fault info
  drm/amdgpu/gmc6: Don't print MC client as it's unknown
  drm/amdgpu/cz_ih: Enable soft IRQ handler ring
  drm/amdgpu/tonga_ih: Enable soft IRQ handler ring
  drm/amdgpu/iceland_ih: Enable soft IRQ handler ring
  drm/amdgpu/cik_ih: Enable soft IRQ handler ring
  drm/amdgpu/si_ih: Enable soft IRQ handler ring
  drm/amd/display: fix typo in display_mode_core_structs.h
  drm/amd/display: fix Smart Power OLED not working after S4
  drm/amd/display: Move RGB-type check for audio sync to DCE HW sequence
  ...
2025-12-04 08:53:30 -08:00

1153 lines
26 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
#include "drm/drm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "msm_fence.h"
#include "msm_gpu_trace.h"
//#include "adreno/adreno_gpu.h"
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
#include <linux/devcoredump.h>
#include <linux/sched/task.h>
/*
* Power Management:
*/
static int enable_pwrrail(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
int ret = 0;
if (gpu->gpu_reg) {
ret = regulator_enable(gpu->gpu_reg);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
return ret;
}
}
if (gpu->gpu_cx) {
ret = regulator_enable(gpu->gpu_cx);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
return ret;
}
}
return 0;
}
static int disable_pwrrail(struct msm_gpu *gpu)
{
if (gpu->gpu_cx)
regulator_disable(gpu->gpu_cx);
if (gpu->gpu_reg)
regulator_disable(gpu->gpu_reg);
return 0;
}
static int enable_clk(struct msm_gpu *gpu)
{
if (gpu->core_clk && gpu->fast_rate)
dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate);
/* Set the RBBM timer rate to 19.2Mhz */
if (gpu->rbbmtimer_clk)
clk_set_rate(gpu->rbbmtimer_clk, 19200000);
return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
}
static int disable_clk(struct msm_gpu *gpu)
{
clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
/*
* Set the clock to a deliberately low rate. On older targets the clock
* speed had to be non zero to avoid problems. On newer targets this
* will be rounded down to zero anyway so it all works out.
*/
if (gpu->core_clk)
dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000);
if (gpu->rbbmtimer_clk)
clk_set_rate(gpu->rbbmtimer_clk, 0);
return 0;
}
static int enable_axi(struct msm_gpu *gpu)
{
return clk_prepare_enable(gpu->ebi1_clk);
}
static int disable_axi(struct msm_gpu *gpu)
{
clk_disable_unprepare(gpu->ebi1_clk);
return 0;
}
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
int ret;
DBG("%s", gpu->name);
trace_msm_gpu_resume(0);
ret = enable_pwrrail(gpu);
if (ret)
return ret;
ret = enable_clk(gpu);
if (ret)
return ret;
ret = enable_axi(gpu);
if (ret)
return ret;
msm_devfreq_resume(gpu);
gpu->needs_hw_init = true;
return 0;
}
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
{
int ret;
DBG("%s", gpu->name);
trace_msm_gpu_suspend(0);
msm_devfreq_suspend(gpu);
ret = disable_axi(gpu);
if (ret)
return ret;
ret = disable_clk(gpu);
if (ret)
return ret;
ret = disable_pwrrail(gpu);
if (ret)
return ret;
gpu->suspend_count++;
return 0;
}
void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
struct drm_printer *p)
{
drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
}
int msm_gpu_hw_init(struct msm_gpu *gpu)
{
int ret;
WARN_ON(!mutex_is_locked(&gpu->lock));
if (!gpu->needs_hw_init)
return 0;
disable_irq(gpu->irq);
ret = gpu->funcs->hw_init(gpu);
if (!ret)
gpu->needs_hw_init = false;
enable_irq(gpu->irq);
return ret;
}
#ifdef CONFIG_DEV_COREDUMP
static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
struct msm_gpu *gpu = data;
struct drm_print_iterator iter;
struct drm_printer p;
struct msm_gpu_state *state;
state = msm_gpu_crashstate_get(gpu);
if (!state)
return 0;
iter.data = buffer;
iter.offset = 0;
iter.start = offset;
iter.remain = count;
p = drm_coredump_printer(&iter);
drm_printf(&p, "---\n");
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
drm_printf(&p, "time: %ptSp\n", &state->time);
if (state->comm)
drm_printf(&p, "comm: %s\n", state->comm);
if (state->cmd)
drm_printf(&p, "cmdline: %s\n", state->cmd);
gpu->funcs->show(gpu, state, &p);
msm_gpu_crashstate_put(gpu);
return count - iter.remain;
}
static void msm_gpu_devcoredump_free(void *data)
{
struct msm_gpu *gpu = data;
msm_gpu_crashstate_put(gpu);
}
static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
struct drm_gem_object *obj, u64 iova,
bool full, size_t offset, size_t size)
{
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
struct msm_gem_object *msm_obj = to_msm_bo(obj);
/* Don't record write only objects */
state_bo->size = size;
state_bo->flags = msm_obj->flags;
state_bo->iova = iova;
BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(msm_obj->name));
memcpy(state_bo->name, msm_obj->name, sizeof(state_bo->name));
if (full) {
void *ptr;
state_bo->data = kvmalloc(size, GFP_KERNEL);
if (!state_bo->data)
goto out;
ptr = msm_gem_get_vaddr_active(obj);
if (IS_ERR(ptr)) {
kvfree(state_bo->data);
state_bo->data = NULL;
goto out;
}
memcpy(state_bo->data, ptr + offset, size);
msm_gem_put_vaddr_locked(obj);
}
out:
state->nr_bos++;
}
static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit)
{
extern bool rd_full;
if (msm_context_is_vmbind(submit->queue->ctx)) {
struct drm_exec exec;
struct drm_gpuva *vma;
unsigned cnt = 0;
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
cnt = 0;
drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm));
drm_exec_retry_on_contention(&exec);
drm_gpuvm_for_each_va (vma, submit->vm) {
if (!vma->gem.obj)
continue;
cnt++;
drm_exec_lock_obj(&exec, vma->gem.obj);
drm_exec_retry_on_contention(&exec);
}
}
drm_gpuvm_for_each_va (vma, submit->vm)
cnt++;
state->bos = kcalloc(cnt, sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
if (state->bos)
drm_gpuvm_for_each_va(vma, submit->vm) {
bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
/* Skip MAP_NULL/PRR VMAs: */
if (!vma->gem.obj)
continue;
msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr,
dump, vma->gem.offset, vma->va.range);
}
drm_exec_fini(&exec);
} else {
state->bos = kcalloc(submit->nr_bos,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
for (int i = 0; state->bos && i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
msm_gem_lock(obj);
msm_gpu_crashstate_get_bo(state, obj, submit->bos[i].iova,
dump, 0, obj->size);
msm_gem_unlock(obj);
}
}
}
static void crashstate_get_vm_logs(struct msm_gpu_state *state, struct msm_gem_vm *vm)
{
uint32_t vm_log_len = (1 << vm->log_shift);
uint32_t vm_log_mask = vm_log_len - 1;
int first;
/* Bail if no log, or empty log: */
if (!vm->log || !vm->log[0].op)
return;
mutex_lock(&vm->mmu_lock);
/*
* log_idx is the next entry to overwrite, meaning it is the oldest, or
* first, entry (other than the special case handled below where the
* log hasn't wrapped around yet)
*/
first = vm->log_idx;
if (!vm->log[first].op) {
/*
* If the next log entry has not been written yet, then only
* entries 0 to idx-1 are valid (ie. we haven't wrapped around
* yet)
*/
state->nr_vm_logs = MAX(0, first - 1);
first = 0;
} else {
state->nr_vm_logs = vm_log_len;
}
state->vm_logs = kmalloc_array(
state->nr_vm_logs, sizeof(vm->log[0]), GFP_KERNEL);
if (!state->vm_logs) {
state->nr_vm_logs = 0;
}
for (int i = 0; i < state->nr_vm_logs; i++) {
int idx = (i + first) & vm_log_mask;
state->vm_logs[i] = vm->log[idx];
}
mutex_unlock(&vm->mmu_lock);
}
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
char *comm, char *cmd)
{
struct msm_gpu_state *state;
/* Check if the target supports capturing crash state */
if (!gpu->funcs->gpu_state_get)
return;
/* Only save one crash state at a time */
if (gpu->crashstate)
return;
state = gpu->funcs->gpu_state_get(gpu);
if (IS_ERR_OR_NULL(state))
return;
/* Fill in the additional crash state information */
state->comm = kstrdup(comm, GFP_KERNEL);
state->cmd = kstrdup(cmd, GFP_KERNEL);
if (fault_info)
state->fault_info = *fault_info;
if (submit && state->fault_info.ttbr0) {
struct msm_gpu_fault_info *info = &state->fault_info;
struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu;
msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
&info->asid);
msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
}
if (submit) {
crashstate_get_vm_logs(state, to_msm_vm(submit->vm));
crashstate_get_bos(state, submit);
}
/* Set the active crash state to be dumped on failure */
gpu->crashstate = state;
dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
}
#else
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
char *comm, char *cmd)
{
}
#endif
/*
* Hangcheck detection for locked gpu:
*/
static struct msm_gem_submit *
find_submit(struct msm_ringbuffer *ring, uint32_t fence)
{
struct msm_gem_submit *submit;
unsigned long flags;
spin_lock_irqsave(&ring->submit_lock, flags);
list_for_each_entry(submit, &ring->submits, node) {
if (submit->seqno == fence) {
spin_unlock_irqrestore(&ring->submit_lock, flags);
return submit;
}
}
spin_unlock_irqrestore(&ring->submit_lock, flags);
return NULL;
}
static void retire_submits(struct msm_gpu *gpu);
static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
{
struct msm_context *ctx = submit->queue->ctx;
struct task_struct *task;
WARN_ON(!mutex_is_locked(&submit->gpu->lock));
/* Note that kstrdup will return NULL if argument is NULL: */
*comm = kstrdup(ctx->comm, GFP_KERNEL);
*cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
task = get_pid_task(submit->pid, PIDTYPE_PID);
if (!task)
return;
if (!*comm)
*comm = kstrdup(task->comm, GFP_KERNEL);
if (!*cmd)
*cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
put_task_struct(task);
}
static void recover_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
struct task_struct *task;
int i;
mutex_lock(&gpu->lock);
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
/*
* If the submit retired while we were waiting for the worker to run,
* or waiting to acquire the gpu lock, then nothing more to do.
*/
if (!submit)
goto out_unlock;
/* Increment the fault counts */
submit->queue->faults++;
task = get_pid_task(submit->pid, PIDTYPE_PID);
if (!task)
gpu->global_faults++;
else {
struct msm_gem_vm *vm = to_msm_vm(submit->vm);
vm->faults++;
/*
* If userspace has opted-in to VM_BIND (and therefore userspace
* management of the VM), faults mark the VM as unusable. This
* matches vulkan expectations (vulkan is the main target for
* VM_BIND).
*/
if (!vm->managed)
msm_gem_vm_unusable(submit->vm);
}
get_comm_cmdline(submit, &comm, &cmd);
if (comm && cmd) {
DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
gpu->name, comm, cmd);
msm_rd_dump_submit(priv->hangrd, submit,
"offending task: %s (%s)", comm, cmd);
} else {
DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name);
msm_rd_dump_submit(priv->hangrd, submit, NULL);
}
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd);
kfree(cmd);
kfree(comm);
/*
* Update all the rings with the latest and greatest fence.. this
* needs to happen after msm_rd_dump_submit() to ensure that the
* bo's referenced by the offending submit are still around.
*/
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
uint32_t fence = ring->memptrs->fence;
/*
* For the current (faulting?) ring/submit advance the fence by
* one more to clear the faulting submit
*/
if (ring == cur_ring)
ring->memptrs->fence = ++fence;
msm_update_fence(ring->fctx, fence);
}
if (msm_gpu_active(gpu)) {
/* retire completed submits, plus the one that hung: */
retire_submits(gpu);
gpu->funcs->recover(gpu);
/*
* Replay all remaining submits starting with highest priority
* ring
*/
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
unsigned long flags;
spin_lock_irqsave(&ring->submit_lock, flags);
list_for_each_entry(submit, &ring->submits, node) {
/*
* If the submit uses an unusable vm make sure
* we don't actually run it
*/
if (to_msm_vm(submit->vm)->unusable)
submit->nr_cmds = 0;
gpu->funcs->submit(gpu, submit);
}
spin_unlock_irqrestore(&ring->submit_lock, flags);
}
}
pm_runtime_put(&gpu->pdev->dev);
out_unlock:
mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
}
void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info)
{
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
mutex_lock(&gpu->lock);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit && submit->fault_dumped)
goto resume_smmu;
if (submit) {
get_comm_cmdline(submit, &comm, &cmd);
/*
* When we get GPU iova faults, we can get 1000s of them,
* but we really only want to log the first one.
*/
submit->fault_dumped = true;
}
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd);
pm_runtime_put_sync(&gpu->pdev->dev);
kfree(cmd);
kfree(comm);
resume_smmu:
mutex_unlock(&gpu->lock);
}
static void hangcheck_timer_reset(struct msm_gpu *gpu)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
mod_timer(&gpu->hangcheck_timer,
round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
}
static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
return false;
if (!gpu->funcs->progress)
return false;
if (!gpu->funcs->progress(gpu, ring))
return false;
ring->hangcheck_progress_retries++;
return true;
}
static void hangcheck_handler(struct timer_list *t)
{
struct msm_gpu *gpu = timer_container_of(gpu, t, hangcheck_timer);
struct drm_device *dev = gpu->dev;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
uint32_t fence = ring->memptrs->fence;
if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
ring->hangcheck_fence = fence;
ring->hangcheck_progress_retries = 0;
} else if (fence_before(fence, ring->fctx->last_fence) &&
!made_progress(gpu, ring)) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
ring->hangcheck_progress_retries = 0;
DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
gpu->name, ring->id);
DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
gpu->name, ring->fctx->last_fence);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
msm_gpu_retire(gpu);
}
/*
* Performance Counters:
*/
/* called under perf_lock */
static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
{
uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
int i, n = min(ncntrs, gpu->num_perfcntrs);
/* read current values: */
for (i = 0; i < gpu->num_perfcntrs; i++)
current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
/* update cntrs: */
for (i = 0; i < n; i++)
cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
/* save current values: */
for (i = 0; i < gpu->num_perfcntrs; i++)
gpu->last_cntrs[i] = current_cntrs[i];
return n;
}
static void update_sw_cntrs(struct msm_gpu *gpu)
{
ktime_t time;
uint32_t elapsed;
unsigned long flags;
spin_lock_irqsave(&gpu->perf_lock, flags);
if (!gpu->perfcntr_active)
goto out;
time = ktime_get();
elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
gpu->totaltime += elapsed;
if (gpu->last_sample.active)
gpu->activetime += elapsed;
gpu->last_sample.active = msm_gpu_active(gpu);
gpu->last_sample.time = time;
out:
spin_unlock_irqrestore(&gpu->perf_lock, flags);
}
void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
{
unsigned long flags;
pm_runtime_get_sync(&gpu->pdev->dev);
spin_lock_irqsave(&gpu->perf_lock, flags);
/* we could dynamically enable/disable perfcntr registers too.. */
gpu->last_sample.active = msm_gpu_active(gpu);
gpu->last_sample.time = ktime_get();
gpu->activetime = gpu->totaltime = 0;
gpu->perfcntr_active = true;
update_hw_cntrs(gpu, 0, NULL);
spin_unlock_irqrestore(&gpu->perf_lock, flags);
}
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
{
gpu->perfcntr_active = false;
pm_runtime_put_sync(&gpu->pdev->dev);
}
/* returns -errno or # of cntrs sampled */
int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&gpu->perf_lock, flags);
if (!gpu->perfcntr_active) {
ret = -EINVAL;
goto out;
}
*activetime = gpu->activetime;
*totaltime = gpu->totaltime;
gpu->activetime = gpu->totaltime = 0;
ret = update_hw_cntrs(gpu, ncntrs, cntrs);
out:
spin_unlock_irqrestore(&gpu->perf_lock, flags);
return ret;
}
/*
* Cmdstream submission/retirement:
*/
static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
struct msm_gem_submit *submit)
{
int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
volatile struct msm_gpu_submit_stats *stats;
u64 elapsed, clock = 0, cycles;
unsigned long flags;
stats = &ring->memptrs->stats[index];
/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
do_div(elapsed, 192);
cycles = stats->cpcycles_end - stats->cpcycles_start;
/* Calculate the clock frequency from the number of CP cycles */
if (elapsed) {
clock = cycles * 1000;
do_div(clock, elapsed);
}
submit->queue->ctx->elapsed_ns += elapsed;
submit->queue->ctx->cycles += cycles;
trace_msm_gpu_submit_retired(submit, elapsed, clock,
stats->alwayson_start, stats->alwayson_end);
msm_submit_retire(submit);
pm_runtime_mark_last_busy(&gpu->pdev->dev);
spin_lock_irqsave(&ring->submit_lock, flags);
list_del(&submit->node);
spin_unlock_irqrestore(&ring->submit_lock, flags);
/* Update devfreq on transition from active->idle: */
mutex_lock(&gpu->active_lock);
gpu->active_submits--;
WARN_ON(gpu->active_submits < 0);
if (!gpu->active_submits) {
msm_devfreq_idle(gpu);
pm_runtime_put_autosuspend(&gpu->pdev->dev);
}
mutex_unlock(&gpu->active_lock);
msm_gem_submit_put(submit);
}
static void retire_submits(struct msm_gpu *gpu)
{
int i;
/* Retire the commits starting with highest priority */
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
while (true) {
struct msm_gem_submit *submit = NULL;
unsigned long flags;
spin_lock_irqsave(&ring->submit_lock, flags);
submit = list_first_entry_or_null(&ring->submits,
struct msm_gem_submit, node);
spin_unlock_irqrestore(&ring->submit_lock, flags);
/*
* If no submit, we are done. If submit->fence hasn't
* been signalled, then later submits are not signalled
* either, so we are also done.
*/
if (submit && dma_fence_is_signaled(submit->hw_fence)) {
retire_submit(gpu, ring, submit);
} else {
break;
}
}
}
wake_up_all(&gpu->retire_event);
}
static void retire_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
retire_submits(gpu);
}
/* call from irq handler to schedule work to retire bo's */
void msm_gpu_retire(struct msm_gpu *gpu)
{
int i;
for (i = 0; i < gpu->nr_rings; i++)
msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
kthread_queue_work(gpu->worker, &gpu->retire_work);
update_sw_cntrs(gpu);
}
/* add bo's to gpu's ring, and kick gpu: */
void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_ringbuffer *ring = submit->ring;
unsigned long flags;
WARN_ON(!mutex_is_locked(&gpu->lock));
pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_hw_init(gpu);
submit->seqno = submit->hw_fence->seqno;
update_sw_cntrs(gpu);
/*
* ring->submits holds a ref to the submit, to deal with the case
* that a submit completes before msm_ioctl_gem_submit() returns.
*/
msm_gem_submit_get(submit);
spin_lock_irqsave(&ring->submit_lock, flags);
list_add_tail(&submit->node, &ring->submits);
spin_unlock_irqrestore(&ring->submit_lock, flags);
/* Update devfreq on transition from idle->active: */
mutex_lock(&gpu->active_lock);
if (!gpu->active_submits) {
pm_runtime_get(&gpu->pdev->dev);
msm_devfreq_active(gpu);
}
gpu->active_submits++;
mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno;
pm_runtime_put(&gpu->pdev->dev);
hangcheck_timer_reset(gpu);
}
/*
* Init/Cleanup:
*/
static irqreturn_t irq_handler(int irq, void *data)
{
struct msm_gpu *gpu = data;
return gpu->funcs->irq(gpu);
}
static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
{
int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
if (ret < 1) {
gpu->nr_clocks = 0;
return ret;
}
gpu->nr_clocks = ret;
gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
gpu->nr_clocks, "core");
gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
gpu->nr_clocks, "rbbmtimer");
return 0;
}
/* Return a new address space for a msm_drm_private instance */
struct drm_gpuvm *
msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
bool kernel_managed)
{
struct drm_gpuvm *vm = NULL;
if (!gpu)
return NULL;
/*
* If the target doesn't support private address spaces then return
* the global one
*/
if (gpu->funcs->create_private_vm) {
vm = gpu->funcs->create_private_vm(gpu, kernel_managed);
if (!IS_ERR(vm))
to_msm_vm(vm)->pid = get_pid(task_pid(task));
}
if (IS_ERR_OR_NULL(vm))
vm = drm_gpuvm_get(gpu->vm);
return vm;
}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
{
struct msm_drm_private *priv = drm->dev_private;
int i, ret, nr_rings = config->nr_rings;
void *memptrs;
uint64_t memptrs_iova;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
gpu->worker = kthread_run_worker(0, "gpu-worker");
if (IS_ERR(gpu->worker)) {
ret = PTR_ERR(gpu->worker);
gpu->worker = NULL;
goto fail;
}
sched_set_fifo_low(gpu->worker->task);
mutex_init(&gpu->active_lock);
mutex_init(&gpu->lock);
init_waitqueue_head(&gpu->retire_event);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
/*
* If progress detection is supported, halve the hangcheck timer
* duration, as it takes two iterations of the hangcheck handler
* to detect a hang.
*/
if (funcs->progress)
priv->hangcheck_period /= 2;
timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
spin_lock_init(&gpu->perf_lock);
/* Map registers: */
gpu->mmio = msm_ioremap(pdev, config->ioname);
if (IS_ERR(gpu->mmio)) {
ret = PTR_ERR(gpu->mmio);
goto fail;
}
/* Get Interrupt: */
gpu->irq = platform_get_irq(pdev, 0);
if (gpu->irq < 0) {
ret = gpu->irq;
goto fail;
}
ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
IRQF_TRIGGER_HIGH, "gpu-irq", gpu);
if (ret) {
DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
goto fail;
}
ret = get_clocks(pdev, gpu);
if (ret)
goto fail;
gpu->ebi1_clk = msm_clk_get(pdev, "bus");
DBG("ebi1_clk: %p", gpu->ebi1_clk);
if (IS_ERR(gpu->ebi1_clk))
gpu->ebi1_clk = NULL;
/* Acquire regulators: */
gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
DBG("gpu_reg: %p", gpu->gpu_reg);
if (IS_ERR(gpu->gpu_reg))
gpu->gpu_reg = NULL;
gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
DBG("gpu_cx: %p", gpu->gpu_cx);
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
platform_set_drvdata(pdev, &gpu->adreno_smmu);
msm_devfreq_init(gpu);
gpu->vm = gpu->funcs->create_vm(gpu, pdev);
if (IS_ERR(gpu->vm)) {
ret = PTR_ERR(gpu->vm);
goto fail;
}
memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings,
check_apriv(gpu, MSM_BO_WC), gpu->vm, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
ret = PTR_ERR(memptrs);
DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
goto fail;
}
msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
if (nr_rings > ARRAY_SIZE(gpu->rb)) {
DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
ARRAY_SIZE(gpu->rb));
nr_rings = ARRAY_SIZE(gpu->rb);
}
/* Create ringbuffer(s): */
for (i = 0; i < nr_rings; i++) {
gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
if (IS_ERR(gpu->rb[i])) {
ret = PTR_ERR(gpu->rb[i]);
DRM_DEV_ERROR(drm->dev,
"could not create ringbuffer %d: %d\n", i, ret);
goto fail;
}
memptrs += sizeof(struct msm_rbmemptrs);
memptrs_iova += sizeof(struct msm_rbmemptrs);
}
gpu->nr_rings = nr_rings;
refcount_set(&gpu->sysprof_active, 1);
return 0;
fail:
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL;
}
msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
platform_set_drvdata(pdev, NULL);
return ret;
}
void msm_gpu_cleanup(struct msm_gpu *gpu)
{
int i;
DBG("%s", gpu->name);
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL;
}
msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
if (!IS_ERR_OR_NULL(gpu->vm)) {
struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
mmu->funcs->detach(mmu);
drm_gpuvm_put(gpu->vm);
}
if (gpu->worker) {
kthread_destroy_worker(gpu->worker);
}
msm_devfreq_cleanup(gpu);
platform_set_drvdata(gpu->pdev, NULL);
}