Merge tag 'amd-drm-next-7.1-2026-03-25' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-7.1-2026-03-25:

amdgpu:
- DSC fix
- Module parameter parsing fix
- PASID reuse fix
- drm_edid leak fix
- SMU 13.x fixes
- SMU 14.x fix
- Fence fix in amdgpu_amdkfd_submit_ib()
- LVDS fixes
- GPU page fault fix for non-4K pages
- Misc cleanups
- UserQ fixes
- SMU 15.0.8 support
- RAS updates
- Devcoredump fixes
- GFX queue priority fixes
- DPIA fixes
- DCN 4.2 updates
- Add debugfs interface for pcie64 registers
- SMU 15.x fixes
- VCN reset fixes
- Documentation fixes

amdkfd:
- Ordering fix in kfd_ioctl_create_process()

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patch.msgid.link/20260325175012.4185721-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie
2026-03-27 09:30:34 +10:00
104 changed files with 4866 additions and 944 deletions

View File

@@ -690,6 +690,7 @@ enum amdgpu_uid_type {
AMDGPU_UID_TYPE_XCD,
AMDGPU_UID_TYPE_AID,
AMDGPU_UID_TYPE_SOC,
AMDGPU_UID_TYPE_MID,
AMDGPU_UID_TYPE_MAX
};

View File

@@ -692,9 +692,9 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
goto err_ib_sched;
}
/* Drop the initial kref_init count (see drm_sched_main as example) */
dma_fence_put(f);
ret = dma_fence_wait(f, false);
/* Drop the returned fence reference after the wait completes */
dma_fence_put(f);
err_ib_sched:
amdgpu_job_free(job);

View File

@@ -617,6 +617,110 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
return r;
}
/**
* amdgpu_debugfs_regs_pcie64_read - Read from a 64-bit PCIE register
*
* @f: open file handle
* @buf: User buffer to store read data in
* @size: Number of bytes to read
* @pos: Offset to seek to
*/
static ssize_t amdgpu_debugfs_regs_pcie64_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
if (size & 0x7 || *pos & 0x7)
return -EINVAL;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
while (size) {
uint64_t value;
value = RREG64_PCIE_EXT(*pos);
r = put_user(value, (uint64_t *)buf);
if (r)
goto out;
result += 8;
buf += 8;
*pos += 8;
size -= 8;
}
r = result;
out:
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
/**
* amdgpu_debugfs_regs_pcie64_write - Write to a 64-bit PCIE register
*
* @f: open file handle
* @buf: User buffer to write data from
* @size: Number of bytes to write
* @pos: Offset to seek to
*/
static ssize_t amdgpu_debugfs_regs_pcie64_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
if (size & 0x7 || *pos & 0x7)
return -EINVAL;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
while (size) {
uint64_t value;
r = get_user(value, (uint64_t *)buf);
if (r)
goto out;
WREG64_PCIE_EXT(*pos, value);
result += 8;
buf += 8;
*pos += 8;
size -= 8;
}
r = result;
out:
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
/**
* amdgpu_debugfs_regs_didt_read - Read from a DIDT register
*
@@ -1525,6 +1629,12 @@ static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
.write = amdgpu_debugfs_regs_pcie_write,
.llseek = default_llseek
};
static const struct file_operations amdgpu_debugfs_regs_pcie64_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_pcie64_read,
.write = amdgpu_debugfs_regs_pcie64_write,
.llseek = default_llseek
};
static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_smc_read,
@@ -1587,6 +1697,7 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_gprwave_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_pcie64_fops,
&amdgpu_debugfs_regs_smc_fops,
&amdgpu_debugfs_gca_config_fops,
&amdgpu_debugfs_sensors_fops,
@@ -1604,6 +1715,7 @@ static const char * const debugfs_regs_names[] = {
"amdgpu_gprwave",
"amdgpu_regs_didt",
"amdgpu_regs_pcie",
"amdgpu_regs_pcie64",
"amdgpu_regs_smc",
"amdgpu_gca_config",
"amdgpu_sensors",

View File

@@ -3498,7 +3498,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
{
char *input = amdgpu_lockup_timeout;
char buf[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
char *input = buf;
char *timeout_setting = NULL;
int index = 0;
long timeout;
@@ -3508,9 +3509,17 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
adev->video_timeout = msecs_to_jiffies(2000);
if (!strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH))
if (!strnlen(amdgpu_lockup_timeout, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH))
return 0;
/*
* strsep() destructively modifies its input by replacing delimiters
* with '\0'. Use a stack copy so the global module parameter buffer
* remains intact for multi-GPU systems where this function is called
* once per device.
*/
strscpy(buf, amdgpu_lockup_timeout, sizeof(buf));
while ((timeout_setting = strsep(&input, ",")) &&
strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
ret = kstrtol(timeout_setting, 0, &timeout);

View File

@@ -324,7 +324,7 @@ static int amdgpu_discovery_get_tmr_info(struct amdgpu_device *adev,
ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
if (ret)
return ret;
adev->discovery.size = (u32)tmr_size;
adev->discovery.size = DISCOVERY_TMR_SIZE;
adev->discovery.offset = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
}
}
@@ -1394,6 +1394,9 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
struct list_head *el, *tmp;
struct kset *die_kset;
if (!ip_top)
return;
die_kset = &ip_top->die_kset;
spin_lock(&die_kset->list_lock);
list_for_each_prev_safe(el, tmp, &die_kset->list) {
@@ -1418,9 +1421,13 @@ void amdgpu_discovery_dump(struct amdgpu_device *adev, struct drm_printer *p)
struct ip_hw_instance *ip_inst;
int i = 0, j;
if (!ip_top)
return;
die_kset = &ip_top->die_kset;
drm_printf(p, "\nHW IP Discovery\n");
spin_lock(&die_kset->list_lock);
list_for_each(el_die, &die_kset->list) {
drm_printf(p, "die %d\n", i++);
@@ -1977,11 +1984,10 @@ static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
uint32_t *nps_type,
struct amdgpu_gmc_memrange **ranges,
struct amdgpu_gmc_memrange *ranges,
int *range_cnt, bool refresh)
{
uint8_t *discovery_bin = adev->discovery.bin;
struct amdgpu_gmc_memrange *mem_ranges;
struct table_info *info;
union nps_info *nps_info;
union nps_info nps_data;
@@ -2019,20 +2025,22 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
switch (le16_to_cpu(nps_info->v1.header.version_major)) {
case 1:
mem_ranges = kvzalloc_objs(*mem_ranges, nps_info->v1.count);
if (!mem_ranges)
return -ENOMEM;
*nps_type = nps_info->v1.nps_type;
if (*range_cnt < nps_info->v1.count) {
dev_dbg(adev->dev,
"not enough space for nps ranges: %d < %d\n",
*range_cnt, nps_info->v1.count);
return -ENOSPC;
}
*range_cnt = nps_info->v1.count;
for (i = 0; i < *range_cnt; i++) {
mem_ranges[i].base_address =
ranges[i].base_address =
nps_info->v1.instance_info[i].base_address;
mem_ranges[i].limit_address =
ranges[i].limit_address =
nps_info->v1.instance_info[i].limit_address;
mem_ranges[i].nid_mask = -1;
mem_ranges[i].flags = 0;
ranges[i].nid_mask = -1;
ranges[i].flags = 0;
}
*ranges = mem_ranges;
break;
default:
dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
@@ -2334,6 +2342,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
break;
case IP_VERSION(15, 0, 0):
case IP_VERSION(15, 0, 8):
amdgpu_device_ip_block_add(adev, &smu_v15_0_ip_block);
break;
default:

View File

@@ -46,7 +46,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
uint32_t *nps_type,
struct amdgpu_gmc_memrange **ranges,
struct amdgpu_gmc_memrange *ranges,
int *range_cnt, bool refresh);
void amdgpu_discovery_dump(struct amdgpu_device *adev, struct drm_printer *p);

View File

@@ -64,10 +64,19 @@ amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
container_of(evf_mgr, struct amdgpu_fpriv, evf_mgr);
struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
struct dma_fence *ev_fence;
bool cookie;
mutex_lock(&uq_mgr->userq_mutex);
/*
* This is intentionally after taking the userq_mutex since we do
* allocate memory while holding this lock, but only after ensuring that
* the eviction fence is signaled.
*/
cookie = dma_fence_begin_signalling();
ev_fence = amdgpu_evf_mgr_get_fence(evf_mgr);
amdgpu_userq_evict(uq_mgr, !evf_mgr->shutdown);
amdgpu_userq_evict(uq_mgr);
/*
* Signaling the eviction fence must be done while holding the
@@ -75,7 +84,12 @@ amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
* next fence.
*/
dma_fence_signal(ev_fence);
dma_fence_end_signalling(cookie);
dma_fence_put(ev_fence);
if (!evf_mgr->shutdown)
schedule_delayed_work(&uq_mgr->resume_work, 0);
mutex_unlock(&uq_mgr->userq_mutex);
}

View File

@@ -438,7 +438,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
* @ring: ring to init the fence driver on
*
* Init the fence driver for the requested ring (all asics).
* Helper function for amdgpu_fence_driver_init().
* Helper function for amdgpu_fence_driver_sw_init().
*/
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
{

View File

@@ -30,6 +30,7 @@
#include <linux/pagemap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-unwrap.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
@@ -106,6 +107,7 @@ amdgpu_gem_update_timeline_node(struct drm_file *filp,
*chain = dma_fence_chain_alloc();
if (!*chain) {
drm_syncobj_put(*syncobj);
*syncobj = NULL;
return -ENOMEM;
}
@@ -741,11 +743,10 @@ amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct dma_fence *fence;
int r = 0;
/* Always start from the VM's existing last update fence. */
fence = dma_fence_get(vm->last_update);
/* If the VM is not ready return only a stub. */
if (!amdgpu_vm_ready(vm))
return fence;
return dma_fence_get_stub();
/*
* First clean up any freed mappings in the VM.
@@ -754,7 +755,7 @@ amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
* schedules GPU work. If nothing needs clearing, @fence can remain as
* the original vm->last_update.
*/
r = amdgpu_vm_clear_freed(adev, vm, &fence);
r = amdgpu_vm_clear_freed(adev, vm, &vm->last_update);
if (r)
goto error;
@@ -771,47 +772,34 @@ amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error;
/*
* Decide which fence best represents the last update:
*
* MAP/REPLACE:
* - For always-valid mappings, use vm->last_update.
* - Otherwise, export bo_va->last_pt_update.
*
* UNMAP/CLEAR:
* Keep the fence returned by amdgpu_vm_clear_freed(). If no work was
* needed, it can remain as vm->last_pt_update.
*
* The VM and BO update fences are always initialized to a valid value.
* vm->last_update and bo_va->last_pt_update always start as valid fences.
* and are never expected to be NULL.
*/
switch (operation) {
case AMDGPU_VA_OP_MAP:
case AMDGPU_VA_OP_REPLACE:
/*
* For MAP/REPLACE, return the page table update fence for the
* mapping we just modified. bo_va is expected to be valid here.
*/
dma_fence_put(fence);
if ((operation == AMDGPU_VA_OP_MAP ||
operation == AMDGPU_VA_OP_REPLACE) &&
!amdgpu_vm_is_bo_always_valid(vm, bo_va->base.bo)) {
if (amdgpu_vm_is_bo_always_valid(vm, bo_va->base.bo))
fence = dma_fence_get(vm->last_update);
else
fence = dma_fence_get(bo_va->last_pt_update);
break;
case AMDGPU_VA_OP_UNMAP:
case AMDGPU_VA_OP_CLEAR:
default:
/* keep @fence as returned by amdgpu_vm_clear_freed() */
break;
/*
* For MAP/REPLACE of non per-VM BOs we need to sync to both the
* bo_va->last_pt_update and vm->last_update or otherwise we
* potentially miss the PDE updates.
*/
fence = dma_fence_unwrap_merge(vm->last_update,
bo_va->last_pt_update);
if (!fence) {
/* As fallback in OOM situations */
dma_fence_wait(vm->last_update, false);
dma_fence_wait(bo_va->last_pt_update, false);
fence = dma_fence_get_stub();
}
} else {
fence = dma_fence_get(vm->last_update);
}
return fence;
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
return fence;
return dma_fence_get(vm->last_update);
}
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
@@ -832,7 +820,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo_va *bo_va;
struct drm_syncobj *timeline_syncobj = NULL;
struct dma_fence_chain *timeline_chain = NULL;
struct dma_fence *fence;
struct drm_exec exec;
uint64_t vm_size;
int r = 0;
@@ -884,6 +871,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
if (args->flags & AMDGPU_VM_DELAY_UPDATE &&
args->vm_timeline_syncobj_out)
return -EINVAL;
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
!(args->flags & AMDGPU_VM_PAGE_PRT)) {
gobj = drm_gem_object_lookup(filp, args->handle);
@@ -973,11 +964,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
* that represents the last relevant update for this mapping. This
* fence can then be exported to the user-visible VM timeline.
*/
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) {
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
(!adev->debug_vm || timeline_syncobj)) {
struct dma_fence *fence;
fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
args->operation);
if (timeline_syncobj && fence) {
if (timeline_syncobj) {
if (!args->vm_timeline_point) {
/* Replace the existing fence when no point is given. */
drm_syncobj_replace_fence(timeline_syncobj,
@@ -988,6 +981,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
timeline_chain,
fence,
args->vm_timeline_point);
timeline_chain = NULL;
}
}
dma_fence_put(fence);
@@ -995,6 +989,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
}
error:
dma_fence_chain_free(timeline_chain);
if (timeline_syncobj)
drm_syncobj_put(timeline_syncobj);
drm_exec_fini(&exec);
error_put_gobj:
drm_gem_object_put(gobj);

View File

@@ -1374,18 +1374,18 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
struct amdgpu_mem_partition_info *mem_ranges,
uint8_t *exp_ranges)
{
struct amdgpu_gmc_memrange *ranges;
struct amdgpu_gmc_memrange ranges[AMDGPU_MAX_MEM_RANGES];
int range_cnt, ret, i, j;
uint32_t nps_type;
bool refresh;
if (!mem_ranges || !exp_ranges)
return -EINVAL;
range_cnt = AMDGPU_MAX_MEM_RANGES;
refresh = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) &&
(adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS);
ret = amdgpu_discovery_get_nps_info(adev, &nps_type, &ranges,
&range_cnt, refresh);
ret = amdgpu_discovery_get_nps_info(adev, &nps_type, ranges, &range_cnt,
refresh);
if (ret)
return ret;
@@ -1446,8 +1446,6 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
if (!*exp_ranges)
*exp_ranges = range_cnt;
err:
kvfree(ranges);
return ret;
}

View File

@@ -35,10 +35,13 @@
* PASIDs are global address space identifiers that can be shared
* between the GPU, an IOMMU and the driver. VMs on different devices
* may use the same PASID if they share the same address
* space. Therefore PASIDs are allocated using a global IDA. VMs are
* looked up from the PASID per amdgpu_device.
* space. Therefore PASIDs are allocated using IDR cyclic allocator
* (similar to kernel PID allocation) which naturally delays reuse.
* VMs are looked up from the PASID per amdgpu_device.
*/
static DEFINE_IDA(amdgpu_pasid_ida);
static DEFINE_IDR(amdgpu_pasid_idr);
static DEFINE_SPINLOCK(amdgpu_pasid_idr_lock);
/* Helper to free pasid from a fence callback */
struct amdgpu_pasid_cb {
@@ -50,8 +53,8 @@ struct amdgpu_pasid_cb {
* amdgpu_pasid_alloc - Allocate a PASID
* @bits: Maximum width of the PASID in bits, must be at least 1
*
* Allocates a PASID of the given width while keeping smaller PASIDs
* available if possible.
* Uses kernel's IDR cyclic allocator (same as PID allocation).
* Allocates sequentially with automatic wrap-around.
*
* Returns a positive integer on success. Returns %-EINVAL if bits==0.
* Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
@@ -59,14 +62,15 @@ struct amdgpu_pasid_cb {
*/
int amdgpu_pasid_alloc(unsigned int bits)
{
int pasid = -EINVAL;
int pasid;
for (bits = min(bits, 31U); bits > 0; bits--) {
pasid = ida_alloc_range(&amdgpu_pasid_ida, 1U << (bits - 1),
(1U << bits) - 1, GFP_KERNEL);
if (pasid != -ENOSPC)
break;
}
if (bits == 0)
return -EINVAL;
spin_lock(&amdgpu_pasid_idr_lock);
pasid = idr_alloc_cyclic(&amdgpu_pasid_idr, NULL, 1,
1U << bits, GFP_KERNEL);
spin_unlock(&amdgpu_pasid_idr_lock);
if (pasid >= 0)
trace_amdgpu_pasid_allocated(pasid);
@@ -81,7 +85,10 @@ int amdgpu_pasid_alloc(unsigned int bits)
void amdgpu_pasid_free(u32 pasid)
{
trace_amdgpu_pasid_freed(pasid);
ida_free(&amdgpu_pasid_ida, pasid);
spin_lock(&amdgpu_pasid_idr_lock);
idr_remove(&amdgpu_pasid_idr, pasid);
spin_unlock(&amdgpu_pasid_idr_lock);
}
static void amdgpu_pasid_free_cb(struct dma_fence *fence,
@@ -616,3 +623,15 @@ void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
}
}
}
/**
* amdgpu_pasid_mgr_cleanup - cleanup PASID manager
*
* Cleanup the IDR allocator.
*/
void amdgpu_pasid_mgr_cleanup(void)
{
spin_lock(&amdgpu_pasid_idr_lock);
idr_destroy(&amdgpu_pasid_idr);
spin_unlock(&amdgpu_pasid_idr_lock);
}

View File

@@ -74,6 +74,7 @@ int amdgpu_pasid_alloc(unsigned int bits);
void amdgpu_pasid_free(u32 pasid);
void amdgpu_pasid_free_delayed(struct dma_resv *resv,
u32 pasid);
void amdgpu_pasid_mgr_cleanup(void);
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);

View File

@@ -103,10 +103,8 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
if (!amdgpu_ctx_priority_is_valid(args->in.priority)) {
WARN(1, "Invalid context priority %d\n", args->in.priority);
if (!amdgpu_ctx_priority_is_valid(args->in.priority))
return -EINVAL;
}
switch (args->in.op) {
case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:

View File

@@ -999,15 +999,11 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
/* Resume all the queues for this process */
xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
queue = amdgpu_userq_get(uq_mgr, queue_id);
if (!queue)
continue;
if (!amdgpu_userq_buffer_vas_mapped(queue)) {
drm_file_err(uq_mgr->file,
"trying restore queue without va mapping\n");
queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
amdgpu_userq_put(queue);
continue;
}
@@ -1015,7 +1011,6 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
if (r)
ret = r;
amdgpu_userq_put(queue);
}
if (ret)
@@ -1232,10 +1227,8 @@ static void amdgpu_userq_restore_worker(struct work_struct *work)
}
ret = amdgpu_userq_restore_all(uq_mgr);
if (ret) {
if (ret)
drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
goto unlock;
}
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
@@ -1252,13 +1245,9 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
amdgpu_userq_detect_and_reset_queues(uq_mgr);
/* Try to unmap all the queues in this process ctx */
xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
queue = amdgpu_userq_get(uq_mgr, queue_id);
if (!queue)
continue;
r = amdgpu_userq_preempt_helper(queue);
if (r)
ret = r;
amdgpu_userq_put(queue);
}
if (ret)
@@ -1291,31 +1280,25 @@ amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
int ret;
xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
queue = amdgpu_userq_get(uq_mgr, queue_id);
if (!queue)
continue;
struct dma_fence *f = queue->last_fence;
if (!f || dma_fence_is_signaled(f)) {
amdgpu_userq_put(queue);
if (!f || dma_fence_is_signaled(f))
continue;
}
ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
if (ret <= 0) {
drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
f->context, f->seqno);
amdgpu_userq_put(queue);
return -ETIMEDOUT;
}
amdgpu_userq_put(queue);
}
return 0;
}
void
amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, bool schedule_resume)
amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_device *adev = uq_mgr->adev;
int ret;
@@ -1329,8 +1312,6 @@ amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, bool schedule_resume)
if (ret)
dev_err(adev->dev, "Failed to evict userqueue\n");
if (schedule_resume)
schedule_delayed_work(&uq_mgr->resume_work, 0);
}
int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,

View File

@@ -133,8 +133,7 @@ int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_userq_obj *userq_obj);
void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
bool schedule_resume);
void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr);
void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
struct amdgpu_eviction_fence_mgr *evf_mgr);

View File

@@ -705,7 +705,7 @@ amdgpu_userq_wait_count_fences(struct drm_file *filp,
num_fences++;
}
wait_info->num_fences = num_fences;
wait_info->num_fences = min(num_fences, USHRT_MAX);
r = 0;
error_unlock:
@@ -714,6 +714,19 @@ amdgpu_userq_wait_count_fences(struct drm_file *filp,
return r;
}
static int
amdgpu_userq_wait_add_fence(struct drm_amdgpu_userq_wait *wait_info,
struct dma_fence **fences, unsigned int *num_fences,
struct dma_fence *fence)
{
/* As fallback shouldn't userspace allocate enough space */
if (*num_fences >= wait_info->num_fences)
return dma_fence_wait(fence, true);
fences[(*num_fences)++] = dma_fence_get(fence);
return 0;
}
static int
amdgpu_userq_wait_return_fence_info(struct drm_file *filp,
struct drm_amdgpu_userq_wait *wait_info,
@@ -757,13 +770,12 @@ amdgpu_userq_wait_return_fence_info(struct drm_file *filp,
goto free_fences;
dma_fence_unwrap_for_each(f, &iter, fence) {
if (num_fences >= wait_info->num_fences) {
r = -EINVAL;
r = amdgpu_userq_wait_add_fence(wait_info, fences,
&num_fences, f);
if (r) {
dma_fence_put(fence);
goto free_fences;
}
fences[num_fences++] = dma_fence_get(f);
}
dma_fence_put(fence);
@@ -780,14 +792,12 @@ amdgpu_userq_wait_return_fence_info(struct drm_file *filp,
if (r)
goto free_fences;
if (num_fences >= wait_info->num_fences) {
dma_fence_put(fence);
r = -EINVAL;
r = amdgpu_userq_wait_add_fence(wait_info, fences,
&num_fences, fence);
dma_fence_put(fence);
if (r)
goto free_fences;
}
/* Give the reference to the fence array */
fences[num_fences++] = fence;
}
/* Lock all the GEM objects */
@@ -817,12 +827,10 @@ amdgpu_userq_wait_return_fence_info(struct drm_file *filp,
dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
DMA_RESV_USAGE_READ, fence) {
if (num_fences >= wait_info->num_fences) {
r = -EINVAL;
r = amdgpu_userq_wait_add_fence(wait_info, fences,
&num_fences, fence);
if (r)
goto error_unlock;
}
fences[num_fences++] = dma_fence_get(fence);
}
}
@@ -833,12 +841,10 @@ amdgpu_userq_wait_return_fence_info(struct drm_file *filp,
dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
DMA_RESV_USAGE_WRITE, fence) {
if (num_fences >= wait_info->num_fences) {
r = -EINVAL;
r = amdgpu_userq_wait_add_fence(wait_info, fences,
&num_fences, fence);
if (r)
goto error_unlock;
}
fences[num_fences++] = dma_fence_get(fence);
}
}
@@ -961,13 +967,13 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
}
num_read_bo_handles = wait_info->num_bo_read_handles;
ptr = u64_to_user_ptr(wait_info->bo_read_handles),
ptr = u64_to_user_ptr(wait_info->bo_read_handles);
r = drm_gem_objects_lookup(filp, ptr, num_read_bo_handles, &gobj_read);
if (r)
goto free_timeline_points;
num_write_bo_handles = wait_info->num_bo_write_handles;
ptr = u64_to_user_ptr(wait_info->bo_write_handles),
ptr = u64_to_user_ptr(wait_info->bo_write_handles);
r = drm_gem_objects_lookup(filp, ptr, num_write_bo_handles,
&gobj_write);
if (r)

View File

@@ -2916,6 +2916,7 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
xa_destroy(&adev->vm_manager.pasids);
amdgpu_vmid_mgr_fini(adev);
amdgpu_pasid_mgr_cleanup();
}
/**
@@ -2991,14 +2992,14 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
if (!root)
return false;
addr /= AMDGPU_GPU_PAGE_SIZE;
if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
node_id, addr, ts, write_fault)) {
node_id, addr >> PAGE_SHIFT, ts, write_fault)) {
amdgpu_bo_unref(&root);
return true;
}
addr /= AMDGPU_GPU_PAGE_SIZE;
r = amdgpu_bo_reserve(root, true);
if (r)
goto error_unref;

View File

@@ -6752,7 +6752,7 @@ static void gfx_v10_0_gfx_mqd_set_priority(struct amdgpu_device *adev,
/* set up default queue priority level
* 0x0 = low priority, 0x1 = high priority
*/
if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH)
if (prop->hqd_queue_priority == AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM)
priority = 1;
tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY);

View File

@@ -4088,7 +4088,7 @@ static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev,
/* set up default queue priority level
* 0x0 = low priority, 0x1 = high priority
*/
if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH)
if (prop->hqd_queue_priority == AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM)
priority = 1;
tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT;

View File

@@ -2227,7 +2227,7 @@ static int mes_v12_1_self_test(struct amdgpu_device *adev, int xcc_id)
struct amdgpu_bo *meta_bo = NULL, *ctx_bo = NULL;
void *meta_ptr = NULL, *ctx_ptr = NULL;
u64 meta_gpu_addr, ctx_gpu_addr;
int size, i, r, pasid;;
int size, i, r, pasid;
pasid = amdgpu_pasid_alloc(16);
if (pasid < 0)

View File

@@ -134,6 +134,21 @@ static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
static bool vcn_v4_0_3_is_psp_fw_reset_supported(struct amdgpu_device *adev)
{
uint32_t fw_ver = adev->psp.sos.fw_version;
uint32_t pgm = (fw_ver >> 8) & 0xFF;
/*
* FWDEV-159155: PSP SOS FW must be >= 0x0036015f for program 0x01
* before enabling VCN per-queue reset.
*/
if (pgm == 1)
return fw_ver >= 0x0036015f;
return true;
}
static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -141,7 +156,9 @@ static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
if (amdgpu_dpm_reset_vcn_is_supported(adev) && !amdgpu_sriov_vf(adev))
if (amdgpu_dpm_reset_vcn_is_supported(adev) &&
vcn_v4_0_3_is_psp_fw_reset_supported(adev) &&
!amdgpu_sriov_vf(adev))
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
return 0;

View File

@@ -3170,11 +3170,11 @@ static int kfd_ioctl_create_process(struct file *filep, struct kfd_process *p, v
struct kfd_process *process;
int ret;
/* Each FD owns only one kfd_process */
if (p->context_id != KFD_CONTEXT_ID_PRIMARY)
if (!filep->private_data || !p)
return -EINVAL;
if (!filep->private_data || !p)
/* Each FD owns only one kfd_process */
if (p->context_id != KFD_CONTEXT_ID_PRIMARY)
return -EINVAL;
mutex_lock(&kfd_processes_mutex);

View File

@@ -3994,8 +3994,9 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
drm_edid_free(aconnector->drm_edid);
aconnector->drm_edid = NULL;
if (sink->dc_edid.length == 0) {
aconnector->drm_edid = NULL;
hdmi_cec_unset_edid(aconnector);
if (aconnector->dc_link->aux_mode) {
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
@@ -5417,7 +5418,7 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
caps = &dm->backlight_caps[aconnector->bl_idx];
/* Only offer ABM property when non-OLED and user didn't turn off by module parameter */
if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0)
if (caps->ext_caps && !caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0)
drm_object_attach_property(&aconnector->base.base,
dm->adev->mode_info.abm_level_property,
ABM_SYSFS_CONTROL);
@@ -9885,7 +9886,7 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
}
/* Decrement skip count when SR is enabled and we're doing fast updates. */
if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
if (acrtc_state->update_type <= UPDATE_TYPE_FAST &&
(psr->psr_feature_enabled || pr->config.replay_supported)) {
if (aconn->sr_skip_count > 0)
aconn->sr_skip_count--;
@@ -9898,7 +9899,8 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
* a vblank event disable request to enable PSR/RP. PSR SU/RP
* can be enabled immediately once OS demonstrates an
* adequate number of fast atomic commits to notify KMD
* of update events. See `vblank_control_worker()`.
* of update events.
* See `amdgpu_dm_crtc_vblank_control_worker()`.
*/
if (!vrr_active &&
acrtc_attach->dm_irq_params.allow_sr_entry &&
@@ -10066,8 +10068,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/*
* If the dirty regions changed, PSR-SU need to be disabled temporarily
* and enabled it again after dirty regions are stable to avoid video glitch.
* PSR-SU will be enabled in vblank_control_worker() if user pause the video
* during the PSR-SU was disabled.
* PSR-SU will be enabled in
* amdgpu_dm_crtc_vblank_control_worker() if user
* pause the video during the PSR-SU was disabled.
*/
if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
acrtc_attach->dm_irq_params.allow_sr_entry &&
@@ -10093,7 +10096,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* fast updates.
*/
if (crtc->state->async_flip &&
(acrtc_state->update_type != UPDATE_TYPE_FAST ||
(acrtc_state->update_type > UPDATE_TYPE_FAST ||
get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
drm_warn_once(state->dev,
"[PLANE:%d:%s] async flip with non-fast update\n",
@@ -10101,7 +10104,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->flip_addrs[planes_count].flip_immediate =
crtc->state->async_flip &&
acrtc_state->update_type == UPDATE_TYPE_FAST &&
acrtc_state->update_type <= UPDATE_TYPE_FAST &&
get_mem_type(old_plane_state->fb) == get_mem_type(fb);
timestamp_ns = ktime_get_ns();
@@ -12528,6 +12531,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
if (dc_resource_is_dsc_encoding_supported(dc)) {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_new_crtc_state->mode_changed_independent_from_dsc = new_crtc_state->mode_changed;
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = add_affected_mst_dsc_crtcs(state, crtc);

View File

@@ -1005,6 +1005,7 @@ struct dm_crtc_state {
bool freesync_vrr_info_changed;
bool mode_changed_independent_from_dsc;
bool dsc_force_changed;
bool vrr_supported;
struct mod_freesync_config freesync_config;

View File

@@ -812,7 +812,6 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
unsigned long flags1;
bool forward_roi_change = false;
bool notify_ta = false;
bool all_crc_ready = true;
struct dc_stream_state *stream_state;
int i;
@@ -936,9 +935,6 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
continue;
}
if (!crtc_ctx->crc_info.crc[i].crc_ready)
all_crc_ready = false;
if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX)
/* Reset the reference frame count after user update the ROI
* or it reaches the maximum value.
@@ -948,9 +944,6 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
crtc_ctx->crc_info.crc[i].frame_count += 1;
}
spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
if (all_crc_ready)
complete_all(&crtc_ctx->crc_info.completion);
}
void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)

View File

@@ -70,7 +70,6 @@ struct crc_data {
struct crc_info {
struct crc_data crc[MAX_CRC_WINDOW_NUM];
struct completion completion;
spinlock_t lock;
};

View File

@@ -685,7 +685,7 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
* pitch, the DCC state, rotation, etc.
*/
if (crtc_state->async_flip &&
dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
dm_crtc_state->update_type > UPDATE_TYPE_FAST) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);

View File

@@ -1744,9 +1744,11 @@ int pre_validate_dsc(struct drm_atomic_state *state,
int ind = find_crtc_index_in_state_by_stream(state, stream);
if (ind >= 0) {
struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(state->crtcs[ind].new_state);
DRM_INFO_ONCE("%s:%d MST_DSC no mode changed for stream 0x%p\n",
__func__, __LINE__, stream);
state->crtcs[ind].new_state->mode_changed = 0;
dm_new_crtc_state->base.mode_changed = dm_new_crtc_state->mode_changed_independent_from_dsc;
}
}
}

View File

@@ -291,6 +291,11 @@ void dcn42_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
/* Clamp the requested clock to PMFW based on DCN limit. */
if (dc->debug.min_deep_sleep_dcfclk_khz > 0 && clk_mgr_base->clks.dcfclk_deep_sleep_khz < dc->debug.min_deep_sleep_dcfclk_khz)
clk_mgr_base->clks.dcfclk_deep_sleep_khz = dc->debug.min_deep_sleep_dcfclk_khz;
dcn42_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
}
@@ -469,6 +474,9 @@ static void dcn42_dump_clk_registers(struct clk_state_registers_and_bypass *regs
DC_LOG_SMU("CLK1_CLK1_CURRENT_CNT,%d,dppclk\n",
internal.CLK8_CLK1_CURRENT_CNT);
DC_LOG_SMU("CLK1_CLK4_CURRENT_CNT,%d,dtbclk\n",
internal.CLK8_CLK4_CURRENT_CNT);
DC_LOG_SMU("CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass\n",
internal.CLK8_CLK3_BYPASS_CNTL);
@@ -569,7 +577,6 @@ void dcn42_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct clk_mgr_dcn42 *clk_mgr = TO_CLK_MGR_DCN42(clk_mgr_int);
struct dcn42_smu_dpm_clks smu_dpm_clks = { 0 };
DC_LOGGER_INIT(clk_mgr_base->ctx->logger);
(void)dc_logger;
@@ -587,131 +594,7 @@ void dcn42_init_clocks(struct clk_mgr *clk_mgr_base)
dcn42_dump_clk_registers(&clk_mgr_base->boot_snapshot, clk_mgr);
clk_mgr_base->clks.ref_dtbclk_khz = clk_mgr_base->boot_snapshot.dtbclk * 10;
if (clk_mgr_base->boot_snapshot.dtbclk > 59000) {
/*dtbclk enabled based on*/
clk_mgr_base->clks.dtbclk_en = true;
}
if (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels != 0) {
/*skip to get clock table and notify pmfw watermark range again*/
DC_LOG_SMU("skip to get dpm_clks from pmfw from resume and acr\n");
return;
}
smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn42 *)dm_helpers_allocate_gpu_mem(
clk_mgr_base->ctx,
DC_MEM_ALLOC_TYPE_GART,
sizeof(DpmClocks_t_dcn42),
&smu_dpm_clks.mc_address.quad_part);
ASSERT(smu_dpm_clks.dpm_clks);
if (clk_mgr_base->ctx->dc->debug.pstate_enabled && clk_mgr_int->smu_present && smu_dpm_clks.mc_address.quad_part != 0) {
int i;
DpmClocks_t_dcn42 *dpm_clks = smu_dpm_clks.dpm_clks;
dcn42_get_dpm_table_from_smu(clk_mgr_int, &smu_dpm_clks);
DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
"NumDispClkLevelsEnabled: %d\n"
"NumSocClkLevelsEnabled: %d\n"
"VcnClkLevelsEnabled: %d\n"
"FClkLevelsEnabled: %d\n"
"NumMemPstatesEnabled: %d\n"
"MinGfxClk: %d\n"
"MaxGfxClk: %d\n",
dpm_clks->NumDcfClkLevelsEnabled,
dpm_clks->NumDispClkLevelsEnabled,
dpm_clks->NumSocClkLevelsEnabled,
dpm_clks->VcnClkLevelsEnabled,
dpm_clks->NumFclkLevelsEnabled,
dpm_clks->NumMemPstatesEnabled,
dpm_clks->MinGfxClk,
dpm_clks->MaxGfxClk);
for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) {
DC_LOG_SMU("dpm_clks->DcfClocks[%d] = %d\n",
i,
dpm_clks->DcfClocks[i]);
}
for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++) {
DC_LOG_SMU("dpm_clks->DispClocks[%d] = %d\n",
i, dpm_clks->DispClocks[i]);
}
for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
DC_LOG_SMU("dpm_clks->SocClocks[%d] = %d\n",
i, dpm_clks->SocClocks[i]);
}
for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
DC_LOG_SMU("dpm_clks->FclkClocks_Freq[%d] = %d\n",
i, dpm_clks->FclkClocks_Freq[i]);
DC_LOG_SMU("dpm_clks->FclkClocks_Voltage[%d] = %d\n",
i, dpm_clks->FclkClocks_Voltage[i]);
}
for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
DC_LOG_SMU("dpm_clks->SocVoltage[%d] = %d\n",
i, dpm_clks->SocVoltage[i]);
for (i = 0; i < NUM_MEM_PSTATE_LEVELS; i++) {
DC_LOG_SMU("dpm_clks.MemPstateTable[%d].UClk = %d\n"
"dpm_clks->MemPstateTable[%d].MemClk= %d\n"
"dpm_clks->MemPstateTable[%d].Voltage = %d\n",
i, dpm_clks->MemPstateTable[i].UClk,
i, dpm_clks->MemPstateTable[i].MemClk,
i, dpm_clks->MemPstateTable[i].Voltage);
}
if (clk_mgr_base->ctx->dc_bios->integrated_info && clk_mgr_base->ctx->dc->config.use_default_clock_table == false) {
/* DCFCLK */
dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
dpm_clks->DcfClocks,
dpm_clks->NumDcfClkLevelsEnabled);
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels = dpm_clks->NumDcfClkLevelsEnabled;
/* SOCCLK */
dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
dpm_clks->SocClocks,
dpm_clks->NumSocClkLevelsEnabled);
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_socclk_levels = dpm_clks->NumSocClkLevelsEnabled;
/* DISPCLK */
dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
dpm_clks->DispClocks,
dpm_clks->NumDispClkLevelsEnabled);
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels = dpm_clks->NumDispClkLevelsEnabled;
/* DPPCLK */
dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
dpm_clks->DppClocks,
dpm_clks->NumDispClkLevelsEnabled);
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels = dpm_clks->NumDispClkLevelsEnabled;
/* FCLK */
dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
dpm_clks->FclkClocks_Freq,
NUM_FCLK_DPM_LEVELS);
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels = dpm_clks->NumFclkLevelsEnabled;
clk_mgr_base->bw_params->clk_table.num_entries = dpm_clks->NumFclkLevelsEnabled;
/* Memory Pstate table is in reverse order*/
ASSERT(dpm_clks->NumMemPstatesEnabled <= NUM_MEM_PSTATE_LEVELS);
if (dpm_clks->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS)
dpm_clks->NumMemPstatesEnabled = NUM_MEM_PSTATE_LEVELS;
for (i = 0; i < dpm_clks->NumMemPstatesEnabled; i++) {
clk_mgr_base->bw_params->clk_table.entries[dpm_clks->NumMemPstatesEnabled - 1 - i].memclk_mhz = dpm_clks->MemPstateTable[i].UClk;
clk_mgr_base->bw_params->clk_table.entries[dpm_clks->NumMemPstatesEnabled - 1 - i].wck_ratio = dcn42_convert_wck_ratio(dpm_clks->MemPstateTable[i].WckRatio) ;
}
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels = dpm_clks->NumMemPstatesEnabled;
/* DTBCLK*/
clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz = clk_mgr_base->clks.ref_dtbclk_khz / 1000;
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels = 1;
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
clk_mgr_base->ctx->dc, clk_mgr_base->bw_params);
}
}
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
dm_helpers_free_gpu_mem(clk_mgr_base->ctx, DC_MEM_ALLOC_TYPE_GART,
smu_dpm_clks.dpm_clks);
clk_mgr_base->clks.dtbclk_en = clk_mgr_base->boot_snapshot.dtbclk > 59000;
}
static struct clk_bw_params dcn42_bw_params = {
@@ -1071,6 +954,127 @@ bool dcn42_is_smu_present(struct clk_mgr *clk_mgr_base)
return clk_mgr->smu_present;
}
static void dcn42_get_smu_clocks(struct clk_mgr_internal *clk_mgr_int)
{
struct clk_mgr *clk_mgr_base = &clk_mgr_int->base;
struct dcn42_smu_dpm_clks smu_dpm_clks = { 0 };
DC_LOGGER_INIT(clk_mgr_base->ctx->logger);
(void)dc_logger;
smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn42 *)dm_helpers_allocate_gpu_mem(
clk_mgr_base->ctx,
DC_MEM_ALLOC_TYPE_GART,
sizeof(DpmClocks_t_dcn42),
&smu_dpm_clks.mc_address.quad_part);
ASSERT(smu_dpm_clks.dpm_clks);
if (clk_mgr_base->ctx->dc->debug.pstate_enabled && smu_dpm_clks.mc_address.quad_part != 0) {
int i;
DpmClocks_t_dcn42 *dpm_clks = smu_dpm_clks.dpm_clks;
dcn42_get_dpm_table_from_smu(clk_mgr_int, &smu_dpm_clks);
DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
"NumDispClkLevelsEnabled: %d\n"
"NumSocClkLevelsEnabled: %d\n"
"VcnClkLevelsEnabled: %d\n"
"FClkLevelsEnabled: %d\n"
"NumMemPstatesEnabled: %d\n"
"MinGfxClk: %d\n"
"MaxGfxClk: %d\n",
dpm_clks->NumDcfClkLevelsEnabled,
dpm_clks->NumDispClkLevelsEnabled,
dpm_clks->NumSocClkLevelsEnabled,
dpm_clks->VcnClkLevelsEnabled,
dpm_clks->NumFclkLevelsEnabled,
dpm_clks->NumMemPstatesEnabled,
dpm_clks->MinGfxClk,
dpm_clks->MaxGfxClk);
for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) {
DC_LOG_SMU("dpm_clks->DcfClocks[%d] = %d\n",
i,
dpm_clks->DcfClocks[i]);
}
for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++) {
DC_LOG_SMU("dpm_clks->DispClocks[%d] = %d\n",
i, dpm_clks->DispClocks[i]);
}
for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
DC_LOG_SMU("dpm_clks->SocClocks[%d] = %d\n",
i, dpm_clks->SocClocks[i]);
}
for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
DC_LOG_SMU("dpm_clks->FclkClocks_Freq[%d] = %d\n",
i, dpm_clks->FclkClocks_Freq[i]);
DC_LOG_SMU("dpm_clks->FclkClocks_Voltage[%d] = %d\n",
i, dpm_clks->FclkClocks_Voltage[i]);
}
for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
DC_LOG_SMU("dpm_clks->SocVoltage[%d] = %d\n",
i, dpm_clks->SocVoltage[i]);
for (i = 0; i < NUM_MEM_PSTATE_LEVELS; i++) {
DC_LOG_SMU("dpm_clks.MemPstateTable[%d].UClk = %d\n"
"dpm_clks->MemPstateTable[%d].MemClk= %d\n"
"dpm_clks->MemPstateTable[%d].Voltage = %d\n",
i, dpm_clks->MemPstateTable[i].UClk,
i, dpm_clks->MemPstateTable[i].MemClk,
i, dpm_clks->MemPstateTable[i].Voltage);
}
if (clk_mgr_base->ctx->dc_bios->integrated_info && clk_mgr_base->ctx->dc->config.use_default_clock_table == false) {
/* DCFCLK */
dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
dpm_clks->DcfClocks,
dpm_clks->NumDcfClkLevelsEnabled);
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels = dpm_clks->NumDcfClkLevelsEnabled;
/* SOCCLK */
dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
dpm_clks->SocClocks,
dpm_clks->NumSocClkLevelsEnabled);
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_socclk_levels = dpm_clks->NumSocClkLevelsEnabled;
/* DISPCLK */
dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
dpm_clks->DispClocks,
dpm_clks->NumDispClkLevelsEnabled);
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels = dpm_clks->NumDispClkLevelsEnabled;
/* DPPCLK */
dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
dpm_clks->DppClocks,
dpm_clks->NumDispClkLevelsEnabled);
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels = dpm_clks->NumDispClkLevelsEnabled;
/* FCLK */
dcn42_init_single_clock(&clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
dpm_clks->FclkClocks_Freq,
NUM_FCLK_DPM_LEVELS);
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels = dpm_clks->NumFclkLevelsEnabled;
clk_mgr_base->bw_params->clk_table.num_entries = dpm_clks->NumFclkLevelsEnabled;
/* Memory Pstate table is in reverse order*/
ASSERT(dpm_clks->NumMemPstatesEnabled <= NUM_MEM_PSTATE_LEVELS);
if (dpm_clks->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS)
dpm_clks->NumMemPstatesEnabled = NUM_MEM_PSTATE_LEVELS;
for (i = 0; i < dpm_clks->NumMemPstatesEnabled; i++) {
clk_mgr_base->bw_params->clk_table.entries[dpm_clks->NumMemPstatesEnabled - 1 - i].memclk_mhz = dpm_clks->MemPstateTable[i].MemClk;
clk_mgr_base->bw_params->clk_table.entries[dpm_clks->NumMemPstatesEnabled - 1 - i].wck_ratio = dcn42_convert_wck_ratio(dpm_clks->MemPstateTable[i].WckRatio) ;
}
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels = dpm_clks->NumMemPstatesEnabled;
/* DTBCLK*/
clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz = 600; /* Fixed on platform */
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels = 1;
}
}
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
dm_helpers_free_gpu_mem(clk_mgr_base->ctx, DC_MEM_ALLOC_TYPE_GART,
smu_dpm_clks.dpm_clks);
}
static struct clk_mgr_funcs dcn42_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
@@ -1139,6 +1143,11 @@ void dcn42_clk_mgr_construct(
dcn42_bw_params.num_channels = ctx->dc_bios->integrated_info->ma_channel_number ? ctx->dc_bios->integrated_info->ma_channel_number : 1;
clk_mgr->base.base.dprefclk_khz = dcn42_smu_get_dprefclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = dcn42_smu_get_dtbclk(&clk_mgr->base);
clk_mgr->base.base.bw_params = &dcn42_bw_params;
if (clk_mgr->base.smu_present)
dcn42_get_smu_clocks(&clk_mgr->base);
}
/* in case we don't get a value from the BIOS, use default */
if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
@@ -1153,6 +1162,8 @@ void dcn42_clk_mgr_construct(
dcn42_read_ss_info_from_lut(&clk_mgr->base);
clk_mgr->base.base.bw_params = &dcn42_bw_params;
if (clk_mgr->base.smu_present)
dcn42_get_smu_clocks(&clk_mgr->base);
}
void dcn42_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)

View File

@@ -2695,7 +2695,7 @@ static bool is_surface_in_context(
static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
struct surface_update_descriptor update_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
if (!u->plane_info)
return update_type;
@@ -2769,28 +2769,12 @@ static struct surface_update_descriptor get_plane_info_update_type(const struct
if (memcmp(tiling, &u->surface->tiling_info, sizeof(*tiling)) != 0) {
update_flags->bits.swizzle_change = 1;
elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
switch (tiling->gfxversion) {
case DcGfxVersion9:
case DcGfxVersion10:
case DcGfxVersion11:
if (tiling->gfx9.swizzle != DC_SW_LINEAR) {
update_flags->bits.bandwidth_change = 1;
elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
break;
case DcGfxAddr3:
if (tiling->gfx_addr3.swizzle != DC_ADDR3_SW_LINEAR) {
update_flags->bits.bandwidth_change = 1;
elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
break;
case DcGfxVersion7:
case DcGfxVersion8:
case DcGfxVersionUnknown:
default:
break;
if (tiling->flags.avoid_full_update_on_tiling_change) {
elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
} else {
update_flags->bits.bandwidth_change = 1;
elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
}
@@ -2803,7 +2787,7 @@ static struct surface_update_descriptor get_scaling_info_update_type(
const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
struct surface_update_descriptor update_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
if (!u->scaling_info)
return update_type;
@@ -2854,11 +2838,11 @@ static struct surface_update_descriptor get_scaling_info_update_type(
return update_type;
}
static struct surface_update_descriptor det_surface_update(
static struct surface_update_descriptor check_update_surface(
const struct dc_check_config *check_config,
struct dc_surface_update *u)
{
struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
union surface_update_flags *update_flags = &u->surface->update_flags;
if (u->surface->force_full_update) {
@@ -2878,7 +2862,7 @@ static struct surface_update_descriptor det_surface_update(
if (u->flip_addr) {
update_flags->bits.addr_update = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
elevate_update_type(&overall_type, UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_STREAM);
if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
update_flags->bits.tmz_changed = 1;
@@ -2892,27 +2876,43 @@ static struct surface_update_descriptor det_surface_update(
if (u->input_csc_color_matrix) {
update_flags->bits.input_csc_change = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
elevate_update_type(&overall_type,
check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
LOCK_DESCRIPTOR_STREAM);
}
if (u->cursor_csc_color_matrix) {
elevate_update_type(&overall_type,
check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
LOCK_DESCRIPTOR_STREAM);
}
if (u->coeff_reduction_factor) {
update_flags->bits.coeff_reduction_change = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
elevate_update_type(&overall_type,
check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
LOCK_DESCRIPTOR_STREAM);
}
if (u->gamut_remap_matrix) {
update_flags->bits.gamut_remap_change = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
elevate_update_type(&overall_type,
check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
LOCK_DESCRIPTOR_STREAM);
}
if (u->cm || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) {
update_flags->bits.gamma_change = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
elevate_update_type(&overall_type,
check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
LOCK_DESCRIPTOR_STREAM);
}
if (u->cm && (u->cm->flags.bits.lut3d_enable || u->surface->cm.flags.bits.lut3d_enable)) {
update_flags->bits.lut_3d = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
elevate_update_type(&overall_type,
check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
LOCK_DESCRIPTOR_STREAM);
}
if (u->cm && u->cm->flags.bits.lut3d_dma_enable != u->surface->cm.flags.bits.lut3d_dma_enable &&
@@ -2928,9 +2928,10 @@ static struct surface_update_descriptor det_surface_update(
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
// TODO: Should be fast?
update_flags->bits.hdr_mult = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
elevate_update_type(&overall_type,
check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST,
LOCK_DESCRIPTOR_STREAM);
}
if (u->sdr_white_level_nits)
@@ -2984,7 +2985,7 @@ static struct surface_update_descriptor check_update_surfaces_for_stream(
int surface_count,
struct dc_stream_update *stream_update)
{
struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE };
/* When countdown finishes, promote this flip to full to trigger deferred final transition */
if (check_config->deferred_transition_state && !check_config->transition_countdown_to_steady_state) {
@@ -3051,7 +3052,18 @@ static struct surface_update_descriptor check_update_surfaces_for_stream(
if (su_flags->raw)
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
// Non-global cases
/* Non-global cases */
if (stream_update->hdr_static_metadata ||
stream_update->vrr_infopacket ||
stream_update->vsc_infopacket ||
stream_update->vsp_infopacket ||
stream_update->hfvsif_infopacket ||
stream_update->adaptive_sync_infopacket ||
stream_update->vtem_infopacket ||
stream_update->avi_infopacket) {
elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (stream_update->output_csc_transform) {
su_flags->bits.out_csc = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
@@ -3061,11 +3073,32 @@ static struct surface_update_descriptor check_update_surfaces_for_stream(
su_flags->bits.out_tf = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
if (stream_update->periodic_interrupt) {
elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (stream_update->dither_option) {
elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (stream_update->cursor_position || stream_update->cursor_attributes) {
elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
/* TODO - cleanup post blend CM */
if (stream_update->func_shaper || stream_update->lut3d_func) {
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
if (stream_update->pending_test_pattern) {
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
}
for (int i = 0 ; i < surface_count; i++) {
struct surface_update_descriptor inner_type =
det_surface_update(check_config, &updates[i]);
check_update_surface(check_config, &updates[i]);
elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
}
@@ -3092,6 +3125,81 @@ struct surface_update_descriptor dc_check_update_surfaces_for_stream(
return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update);
}
/*
* check_update_state_and_surfaces_for_stream() - Determine update type (fast, med, or full)
*
* This function performs checks on the DC global state, and is therefore not re-entrant. It
* should not be called from DM.
*
* See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
*/
static struct surface_update_descriptor check_update_state_and_surfaces_for_stream(
const struct dc *dc,
const struct dc_check_config *check_config,
const struct dc_stream_state *stream,
const struct dc_surface_update *updates,
const int surface_count,
const struct dc_stream_update *stream_update)
{
const struct dc_state *context = dc->current_state;
struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE};
if (updates)
for (int i = 0; i < surface_count; i++)
if (!is_surface_in_context(context, updates[i].surface))
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
if (stream) {
const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
if (stream_status == NULL || stream_status->plane_count != surface_count)
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (dc->idle_optimizations_allowed)
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
if (dc_can_clear_cursor_limit(dc))
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
return overall_type;
}
/*
* dc_check_update_state_and_surfaces_for_stream() - Determine update type (fast, med, or full)
*
* This function performs checks on the DC global state, stream and surface update, and is
* therefore not re-entrant. It should not be called from DM.
*
* See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
*/
static struct surface_update_descriptor dc_check_update_state_and_surfaces_for_stream(
const struct dc *dc,
const struct dc_check_config *check_config,
struct dc_stream_state *stream,
struct dc_surface_update *updates,
int surface_count,
struct dc_stream_update *stream_update)
{
/* check updates against the entire DC state (global) first */
struct surface_update_descriptor overall_update_type = check_update_state_and_surfaces_for_stream(
dc,
check_config,
stream,
updates,
surface_count,
stream_update);
/* check updates for stream and plane */
struct surface_update_descriptor stream_update_type = dc_check_update_surfaces_for_stream(
check_config,
updates,
surface_count,
stream_update);
elevate_update_type(&overall_update_type, stream_update_type.update_type, stream_update_type.lock_descriptor);
return overall_update_type;
}
static struct dc_stream_status *stream_get_status(
struct dc_state *ctx,
struct dc_stream_state *stream)
@@ -3448,13 +3556,6 @@ static void update_seamless_boot_flags(struct dc *dc,
}
}
static bool full_update_required_weak(
const struct dc *dc,
const struct dc_surface_update *srf_updates,
int surface_count,
const struct dc_stream_update *stream_update,
const struct dc_stream_state *stream);
struct pipe_split_policy_backup {
bool dynamic_odm_policy;
bool subvp_policy;
@@ -3524,12 +3625,11 @@ static bool update_planes_and_stream_state(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
enum surface_update_type *new_update_type,
struct surface_update_descriptor *update_descriptor,
struct dc_state **new_context)
{
struct dc_state *context;
int i, j;
enum surface_update_type update_type;
const struct dc_stream_status *stream_status;
struct dc_context *dc_ctx = dc->ctx;
@@ -3543,17 +3643,20 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
context = dc->current_state;
update_type = dc_check_update_surfaces_for_stream(
&dc->check_config, srf_updates, surface_count, stream_update).update_type;
if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
update_type = UPDATE_TYPE_FULL;
*update_descriptor = dc_check_update_state_and_surfaces_for_stream(
dc,
&dc->check_config,
stream,
srf_updates,
surface_count,
stream_update);
/* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
* E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
* Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
*/
force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
if (update_type == UPDATE_TYPE_FULL)
if (update_descriptor->update_type == UPDATE_TYPE_FULL)
backup_planes_and_stream_state(&dc->scratch.current_state, stream);
/* update current stream with the new updates */
@@ -3579,7 +3682,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
}
if (update_type == UPDATE_TYPE_FULL) {
if (update_descriptor->update_type == UPDATE_TYPE_FULL) {
if (stream_update) {
uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
stream_update->stream->update_flags.raw = 0xFFFFFFFF;
@@ -3589,13 +3692,13 @@ static bool update_planes_and_stream_state(struct dc *dc,
srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF;
}
if (update_type >= update_surface_trace_level)
if (update_descriptor->update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
for (i = 0; i < surface_count; i++)
copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
if (update_type >= UPDATE_TYPE_FULL) {
if (update_descriptor->update_type >= UPDATE_TYPE_FULL) {
struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
for (i = 0; i < surface_count; i++)
@@ -3633,7 +3736,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *surface = srf_updates[i].surface;
if (update_type != UPDATE_TYPE_MED)
if (update_descriptor->update_type != UPDATE_TYPE_MED)
continue;
if (surface->update_flags.bits.position_change) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
@@ -3647,7 +3750,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
}
if (update_type == UPDATE_TYPE_FULL) {
if (update_descriptor->update_type == UPDATE_TYPE_FULL) {
struct pipe_split_policy_backup policy;
bool minimize = false;
@@ -3676,8 +3779,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
update_seamless_boot_flags(dc, context, surface_count, stream);
*new_context = context;
*new_update_type = update_type;
if (update_type == UPDATE_TYPE_FULL)
if (update_descriptor->update_type == UPDATE_TYPE_FULL)
backup_planes_and_stream_state(&dc->scratch.new_state, stream);
return true;
@@ -3757,7 +3859,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
program_cursor_position(dc, stream);
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
if (update_type <= UPDATE_TYPE_FAST)
continue;
if (stream_update->dsc_config)
@@ -4066,7 +4168,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
struct pipe_ctx *top_pipe_to_program = NULL;
struct dc_stream_status *stream_status = NULL;
bool should_offload_fams2_flip = false;
bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
bool should_lock_all_pipes = (update_type > UPDATE_TYPE_FAST);
if (should_lock_all_pipes)
determine_pipe_unlock_order(dc, context);
@@ -4126,7 +4228,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
continue;
pipe_ctx->plane_state->triplebuffer_flips = false;
if (update_type == UPDATE_TYPE_FAST &&
if (update_type <= UPDATE_TYPE_FAST &&
dc->hwss.program_triplebuffer != NULL &&
!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
/*triple buffer for VUpdate only*/
@@ -4183,7 +4285,7 @@ static void commit_planes_for_stream(struct dc *dc,
{
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
bool should_lock_all_pipes = (update_type > UPDATE_TYPE_FAST);
bool subvp_prev_use = false;
bool subvp_curr_use = false;
uint8_t current_stream_mask = 0;
@@ -4200,7 +4302,7 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FULL && dc->optimized_required)
hwss_process_outstanding_hw_updates(dc, dc->current_state);
if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
if (update_type > UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
dc->res_pool->funcs->prepare_mcache_programming(dc, context);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -4262,7 +4364,7 @@ static void commit_planes_for_stream(struct dc *dc,
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if ((update_type > UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_inbox1_lock(dc, stream->link)) {
@@ -4333,7 +4435,7 @@ static void commit_planes_for_stream(struct dc *dc,
}
dc->hwss.post_unlock_program_front_end(dc, context);
if (update_type != UPDATE_TYPE_FAST)
if (update_type > UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
dc->hwss.commit_subvp_config(dc, context);
@@ -4349,7 +4451,7 @@ static void commit_planes_for_stream(struct dc *dc,
return;
}
if (update_type != UPDATE_TYPE_FAST) {
if (update_type > UPDATE_TYPE_FAST) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
@@ -4377,7 +4479,7 @@ static void commit_planes_for_stream(struct dc *dc,
if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
continue;
pipe_ctx->plane_state->triplebuffer_flips = false;
if (update_type == UPDATE_TYPE_FAST &&
if (update_type <= UPDATE_TYPE_FAST &&
dc->hwss.program_triplebuffer != NULL &&
!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
/*triple buffer for VUpdate only*/
@@ -4404,7 +4506,7 @@ static void commit_planes_for_stream(struct dc *dc,
continue;
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
if (update_type <= UPDATE_TYPE_FAST)
continue;
stream_status =
@@ -4423,7 +4525,7 @@ static void commit_planes_for_stream(struct dc *dc,
continue;
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
if (update_type <= UPDATE_TYPE_FAST)
continue;
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
@@ -4434,7 +4536,7 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
if (dc->hwss.program_front_end_for_ctx && update_type > UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
//Pipe busy until some frame and line #
@@ -4462,7 +4564,7 @@ static void commit_planes_for_stream(struct dc *dc,
}
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
if (update_type <= UPDATE_TYPE_FAST) {
if (dc->hwss.set_flip_control_gsl)
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -4499,7 +4601,7 @@ static void commit_planes_for_stream(struct dc *dc,
srf_updates[i].cm->flags.bits.lut3d_enable &&
srf_updates[i].cm->flags.bits.lut3d_dma_enable &&
dc->hwss.trigger_3dlut_dma_load)
dc->hwss.trigger_3dlut_dma_load(dc, pipe_ctx);
dc->hwss.trigger_3dlut_dma_load(pipe_ctx);
/*program triple buffer after lock based on flip type*/
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
@@ -4519,7 +4621,7 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if ((update_type > UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
@@ -4552,13 +4654,13 @@ static void commit_planes_for_stream(struct dc *dc,
/* If enabling subvp or transitioning from subvp->subvp, enable the
* phantom streams before we program front end for the phantom pipes.
*/
if (update_type != UPDATE_TYPE_FAST) {
if (update_type > UPDATE_TYPE_FAST) {
if (dc->hwss.enable_phantom_streams)
dc->hwss.enable_phantom_streams(dc, context);
}
}
if (update_type != UPDATE_TYPE_FAST)
if (update_type > UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
if (subvp_prev_use && !subvp_curr_use) {
@@ -4571,7 +4673,7 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.disable_phantom_streams(dc, context);
}
if (update_type != UPDATE_TYPE_FAST)
if (update_type > UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
dc->hwss.commit_subvp_config(dc, context);
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
@@ -5043,191 +5145,12 @@ static bool commit_minimal_transition_state(struct dc *dc,
return true;
}
void populate_fast_updates(struct dc_fast_update *fast_update,
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_update *stream_update)
{
int i = 0;
if (stream_update) {
fast_update[0].out_transfer_func = stream_update->out_transfer_func;
fast_update[0].output_csc_transform = stream_update->output_csc_transform;
} else {
fast_update[0].out_transfer_func = NULL;
fast_update[0].output_csc_transform = NULL;
}
for (i = 0; i < surface_count; i++) {
fast_update[i].flip_addr = srf_updates[i].flip_addr;
fast_update[i].gamma = srf_updates[i].gamma;
fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix;
fast_update[i].cm_hist_control = srf_updates[i].cm_hist_control;
}
}
static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count)
{
int i;
if (fast_update[0].out_transfer_func ||
fast_update[0].output_csc_transform)
return true;
for (i = 0; i < surface_count; i++) {
if (fast_update[i].flip_addr ||
fast_update[i].gamma ||
fast_update[i].gamut_remap_matrix ||
fast_update[i].input_csc_color_matrix ||
fast_update[i].cursor_csc_color_matrix ||
fast_update[i].cm_hist_control ||
fast_update[i].coeff_reduction_factor)
return true;
}
return false;
}
bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count)
{
int i;
if (fast_update[0].out_transfer_func ||
fast_update[0].output_csc_transform)
return true;
for (i = 0; i < surface_count; i++) {
if (fast_update[i].input_csc_color_matrix ||
fast_update[i].gamma ||
fast_update[i].gamut_remap_matrix ||
fast_update[i].coeff_reduction_factor ||
fast_update[i].cm_hist_control ||
fast_update[i].cursor_csc_color_matrix)
return true;
}
return false;
}
static bool full_update_required_weak(
const struct dc *dc,
const struct dc_surface_update *srf_updates,
int surface_count,
const struct dc_stream_update *stream_update,
const struct dc_stream_state *stream)
{
const struct dc_state *context = dc->current_state;
if (srf_updates)
for (int i = 0; i < surface_count; i++)
if (!is_surface_in_context(context, srf_updates[i].surface))
return true;
if (stream) {
const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
if (stream_status == NULL || stream_status->plane_count != surface_count)
return true;
}
if (dc->idle_optimizations_allowed)
return true;
if (dc_can_clear_cursor_limit(dc))
return true;
return false;
}
static bool full_update_required(
const struct dc *dc,
const struct dc_surface_update *srf_updates,
int surface_count,
const struct dc_stream_update *stream_update,
const struct dc_stream_state *stream)
{
const union dc_plane_cm_flags blend_only_flags = {
.bits = {
.blend_enable = 1,
}
};
if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
return true;
for (int i = 0; i < surface_count; i++) {
if (srf_updates &&
(srf_updates[i].plane_info ||
srf_updates[i].scaling_info ||
(srf_updates[i].hdr_mult.value &&
srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
(srf_updates[i].sdr_white_level_nits &&
srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) ||
srf_updates[i].in_transfer_func ||
srf_updates[i].surface->force_full_update ||
(srf_updates[i].flip_addr &&
srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
(srf_updates[i].cm &&
((srf_updates[i].cm->flags.all != blend_only_flags.all && srf_updates[i].cm->flags.all != 0) ||
(srf_updates[i].surface->cm.flags.all != blend_only_flags.all && srf_updates[i].surface->cm.flags.all != 0)))))
return true;
}
if (stream_update &&
(((stream_update->src.height != 0 && stream_update->src.width != 0) ||
(stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
stream_update->integer_scaling_update) ||
stream_update->hdr_static_metadata ||
stream_update->abm_level ||
stream_update->periodic_interrupt ||
stream_update->vrr_infopacket ||
stream_update->vsc_infopacket ||
stream_update->vsp_infopacket ||
stream_update->hfvsif_infopacket ||
stream_update->vtem_infopacket ||
stream_update->adaptive_sync_infopacket ||
stream_update->avi_infopacket ||
stream_update->dpms_off ||
stream_update->allow_freesync ||
stream_update->vrr_active_variable ||
stream_update->vrr_active_fixed ||
stream_update->gamut_remap ||
stream_update->output_color_space ||
stream_update->dither_option ||
stream_update->wb_update ||
stream_update->dsc_config ||
stream_update->mst_bw_update ||
stream_update->func_shaper ||
stream_update->lut3d_func ||
stream_update->pending_test_pattern ||
stream_update->crtc_timing_adjust ||
stream_update->scaler_sharpener_update ||
stream_update->hw_cursor_req))
return true;
return false;
}
static bool fast_update_only(
const struct dc *dc,
const struct dc_fast_update *fast_update,
const struct dc_surface_update *srf_updates,
int surface_count,
const struct dc_stream_update *stream_update,
const struct dc_stream_state *stream)
{
return fast_updates_exist(fast_update, surface_count)
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
static bool update_planes_and_stream_v2(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update)
{
struct dc_state *context;
enum surface_update_type update_type;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
/* In cases where MPO and split or ODM are used transitions can
* cause underflow. Apply stream configuration with minimal pipe
@@ -5235,11 +5158,9 @@ static bool update_planes_and_stream_v2(struct dc *dc,
*/
bool force_minimal_pipe_splitting = 0;
bool is_plane_addition = 0;
bool is_fast_update_only;
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
surface_count, stream_update, stream);
struct surface_update_descriptor update_descriptor = {0};
force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
dc,
stream,
@@ -5258,7 +5179,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
surface_count,
stream,
stream_update,
&update_type,
&update_descriptor,
&context))
return false;
@@ -5268,7 +5189,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
dc_state_release(context);
return false;
}
update_type = UPDATE_TYPE_FULL;
elevate_update_type(&update_descriptor, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (dc->hwss.is_pipe_topology_transition_seamless &&
@@ -5277,13 +5198,13 @@ static bool update_planes_and_stream_v2(struct dc *dc,
commit_minimal_transition_state_in_dc_update(dc, context, stream,
srf_updates, surface_count);
if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) {
if (update_descriptor.update_type <= UPDATE_TYPE_FAST) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
stream,
stream_update,
update_type,
update_descriptor.update_type,
context);
} else {
if (!stream_update &&
@@ -5299,7 +5220,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
surface_count,
stream,
stream_update,
update_type,
update_descriptor.update_type,
context);
}
if (dc->current_state != context)
@@ -5313,14 +5234,8 @@ static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
struct dc_stream_update *stream_update,
enum surface_update_type update_type)
{
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
ASSERT(update_type < UPDATE_TYPE_FULL);
populate_fast_updates(fast_update, srf_updates, surface_count,
stream_update);
if (fast_update_only(dc, fast_update, srf_updates, surface_count,
stream_update, stream) &&
!dc->check_config.enable_legacy_fast_update)
if (update_type <= UPDATE_TYPE_FAST)
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -5411,7 +5326,7 @@ static bool update_planes_and_stream_v3(struct dc *dc,
struct dc_stream_update *stream_update)
{
struct dc_state *new_context;
enum surface_update_type update_type;
struct surface_update_descriptor update_descriptor = {0};
/*
* When this function returns true and new_context is not equal to
@@ -5423,22 +5338,26 @@ static bool update_planes_and_stream_v3(struct dc *dc,
* replaced by a newer context. Refer to the use of
* swap_and_free_current_context below.
*/
if (!update_planes_and_stream_state(dc, srf_updates, surface_count,
stream, stream_update, &update_type,
if (!update_planes_and_stream_state(dc,
srf_updates,
surface_count,
stream,
stream_update,
&update_descriptor,
&new_context))
return false;
if (new_context == dc->current_state) {
commit_planes_and_stream_update_on_current_context(dc,
srf_updates, surface_count, stream,
stream_update, update_type);
stream_update, update_descriptor.update_type);
if (dc->check_config.transition_countdown_to_steady_state)
dc->check_config.transition_countdown_to_steady_state--;
} else {
commit_planes_and_stream_update_with_new_context(dc,
srf_updates, surface_count, stream,
stream_update, update_type, new_context);
stream_update, update_descriptor.update_type, new_context);
}
return true;
@@ -7212,7 +7131,7 @@ struct dc_update_scratch_space {
struct dc_stream_update *stream_update;
bool update_v3;
bool do_clear_update_flags;
enum surface_update_type update_type;
struct surface_update_descriptor update_descriptor;
struct dc_state *new_context;
enum update_v3_flow flow;
struct dc_state *backup_context;
@@ -7295,45 +7214,28 @@ static bool update_planes_and_stream_prepare_v3(
ASSERT(scratch->flow == UPDATE_V3_FLOW_INVALID);
dc_exit_ips_for_hw_access(scratch->dc);
/* HWSS path determination needs to be done prior to updating the surface and stream states. */
struct dc_fast_update fast_update[MAX_SURFACES] = { 0 };
populate_fast_updates(fast_update,
scratch->surface_updates,
scratch->surface_count,
scratch->stream_update);
const bool is_hwss_fast_path_only =
fast_update_only(scratch->dc,
fast_update,
scratch->surface_updates,
scratch->surface_count,
scratch->stream_update,
scratch->stream) &&
!scratch->dc->check_config.enable_legacy_fast_update;
if (!update_planes_and_stream_state(
scratch->dc,
scratch->surface_updates,
scratch->surface_count,
scratch->stream,
scratch->stream_update,
&scratch->update_type,
&scratch->update_descriptor,
&scratch->new_context
)) {
return false;
}
if (scratch->new_context == scratch->dc->current_state) {
ASSERT(scratch->update_type < UPDATE_TYPE_FULL);
ASSERT(scratch->update_descriptor.update_type < UPDATE_TYPE_FULL);
scratch->flow = is_hwss_fast_path_only
scratch->flow = scratch->update_descriptor.update_type <= UPDATE_TYPE_FAST
? UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST
: UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL;
return true;
}
ASSERT(scratch->update_type >= UPDATE_TYPE_FULL);
ASSERT(scratch->update_descriptor.update_type >= UPDATE_TYPE_FULL);
const bool seamless = scratch->dc->hwss.is_pipe_topology_transition_seamless(
scratch->dc,
@@ -7406,7 +7308,7 @@ static void update_planes_and_stream_execute_v3_commit(
intermediate_update ? scratch->intermediate_count : scratch->surface_count,
scratch->stream,
use_stream_update ? scratch->stream_update : NULL,
intermediate_context ? UPDATE_TYPE_FULL : scratch->update_type,
intermediate_context ? UPDATE_TYPE_FULL : scratch->update_descriptor.update_type,
// `dc->current_state` only used in `NO_NEW_CONTEXT`, where it is equal to `new_context`
intermediate_context ? scratch->intermediate_context : scratch->new_context
);
@@ -7424,7 +7326,7 @@ static void update_planes_and_stream_execute_v3(
scratch->surface_count,
scratch->stream,
scratch->stream_update,
scratch->update_type,
scratch->update_descriptor.update_type,
scratch->new_context
);
break;

View File

@@ -749,10 +749,10 @@ struct clock_source *resource_find_used_clk_src_for_sharing(
return NULL;
}
static enum pixel_format convert_pixel_format_to_dalsurface(
static enum dc_pixel_format convert_pixel_format_to_dalsurface(
enum surface_pixel_format surface_pixel_format)
{
enum pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
enum dc_pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
switch (surface_pixel_format) {
case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:

View File

@@ -33,7 +33,6 @@
#include "dc_dmub_srv.h"
#include "dc_state_priv.h"
#include "dc_stream_priv.h"
#include "dce/dmub_hw_lock_mgr.h"
#define DC_LOGGER dc->ctx->logger
#ifndef MIN
@@ -259,7 +258,6 @@ void program_cursor_attributes(
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
bool unlock_dmub = false;
if (!stream)
return;
@@ -278,12 +276,6 @@ void program_cursor_attributes(
if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) {
dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
} else {
if (dc->hwss.dmub_hw_control_lock && pipe_ctx->stream &&
should_use_dmub_inbox0_lock_for_link(dc, pipe_ctx->stream->link)) {
dc->hwss.dmub_hw_control_lock(dc, dc->current_state, true);
unlock_dmub = true;
}
dc->hwss.cursor_lock(dc, pipe_to_program, true);
if (pipe_to_program->next_odm_pipe)
dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
@@ -306,9 +298,6 @@ void program_cursor_attributes(
dc->hwss.cursor_lock(dc, pipe_to_program, false);
if (pipe_to_program->next_odm_pipe)
dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
if (unlock_dmub)
dc->hwss.dmub_hw_control_lock(dc, dc->current_state, false);
}
}
}
@@ -416,7 +405,6 @@ void program_cursor_position(
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
bool unlock_dmub = false;
if (!stream)
return;
@@ -436,16 +424,10 @@ void program_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) {
if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update)
dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
} else {
if (dc->hwss.dmub_hw_control_lock && pipe_ctx->stream &&
should_use_dmub_inbox0_lock_for_link(dc, pipe_ctx->stream->link)) {
dc->hwss.dmub_hw_control_lock(dc, dc->current_state, true);
unlock_dmub = true;
}
else
dc->hwss.cursor_lock(dc, pipe_to_program, true);
}
}
dc->hwss.set_cursor_position(pipe_ctx);
@@ -457,14 +439,10 @@ void program_cursor_position(
}
if (pipe_to_program) {
if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) {
if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update)
dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
} else {
else
dc->hwss.cursor_lock(dc, pipe_to_program, false);
if (unlock_dmub)
dc->hwss.dmub_hw_control_lock(dc, dc->current_state, false);
}
}
}

View File

@@ -63,7 +63,7 @@ struct dcn_dsc_reg_state;
struct dcn_optc_reg_state;
struct dcn_dccg_reg_state;
#define DC_VER "3.2.374"
#define DC_VER "3.2.375"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -467,6 +467,7 @@ struct dc_static_screen_params {
*/
enum surface_update_type {
UPDATE_TYPE_ADDR_ONLY, /* only surface address is being updated, no other programming needed */
UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
UPDATE_TYPE_MED, /* ISR safe, most of programming needed, no bw/clk change*/
UPDATE_TYPE_FULL, /* may need to shuffle resources */
@@ -562,6 +563,7 @@ struct dc_config {
bool frame_update_cmd_version2;
struct spl_sharpness_range dcn_sharpness_range;
struct spl_sharpness_range dcn_override_sharpness_range;
bool no_native422_support;
};
enum visual_confirm {
@@ -986,7 +988,6 @@ struct link_service;
* causing an issue or not.
*/
struct dc_debug_options {
bool native422_support;
bool disable_dsc;
enum visual_confirm visual_confirm;
int visual_confirm_rect_height;
@@ -1215,6 +1216,7 @@ struct dc_debug_options {
bool enable_dmu_recovery;
unsigned int force_vmin_threshold;
bool enable_otg_frame_sync_pwa;
unsigned int min_deep_sleep_dcfclk_khz;
};
@@ -1879,18 +1881,6 @@ struct dc_scaling_info {
struct scaling_taps scaling_quality;
};
struct dc_fast_update {
const struct dc_flip_addrs *flip_addr;
const struct dc_gamma *gamma;
const struct colorspace_transform *gamut_remap_matrix;
const struct dc_csc_transform *input_csc_color_matrix;
const struct fixed31_32 *coeff_reduction_factor;
struct dc_transfer_func *out_transfer_func;
struct dc_csc_transform *output_csc_transform;
const struct dc_csc_transform *cursor_csc_color_matrix;
struct cm_hist_control *cm_hist_control;
};
struct dc_surface_update {
struct dc_plane_state *surface;
@@ -2029,12 +2019,7 @@ bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count);
void populate_fast_updates(struct dc_fast_update *fast_update,
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_update *stream_update);
/*
/*
* Set up streams and links associated to drive sinks
* The streams parameter is an absolute set of all active streams.
*

View File

@@ -52,6 +52,7 @@ struct dc_dsc_policy {
uint32_t max_target_bpp;
uint32_t min_target_bpp;
bool enable_dsc_when_not_needed;
bool ycbcr422_simple;
};
struct dc_dsc_config_options {

View File

@@ -218,7 +218,7 @@ enum surface_pixel_format {
/* Pixel format */
enum pixel_format {
enum dc_pixel_format {
/*graph*/
PIXEL_FORMAT_UNINITIALIZED,
PIXEL_FORMAT_INDEX8,
@@ -445,6 +445,10 @@ enum dc_gfxversion {
enum swizzle_mode_addr3_values swizzle;
} gfx_addr3;/*gfx with addr3 and above*/
};
struct {
bool avoid_full_update_on_tiling_change;
} flags;
};
/* Rotation angle */

View File

@@ -63,7 +63,8 @@ static void populate_inits_from_splinits(struct scl_inits *inits,
inits->h_c = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int_c, spl_inits->h_filter_init_frac_c >> 5, 0, 19);
inits->v_c = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int_c, spl_inits->v_filter_init_frac_c >> 5, 0, 19);
}
static void populate_splformat_from_format(enum spl_pixel_format *spl_pixel_format, const enum pixel_format pixel_format)
static void populate_splformat_from_format(enum spl_pixel_format *spl_pixel_format,
const enum dc_pixel_format pixel_format)
{
if (pixel_format < PIXEL_FORMAT_INVALID)
*spl_pixel_format = (enum spl_pixel_format)pixel_format;

View File

@@ -1144,10 +1144,12 @@ union replay_low_refresh_rate_enable_options {
union replay_optimization {
struct {
//BIT[0-3]: Replay Teams Optimization
//BIT[0-1]: Replay Teams Optimization
unsigned int TEAMS_OPTIMIZATION_VER_1 :1;
unsigned int TEAMS_OPTIMIZATION_VER_2 :1;
unsigned int RESERVED_2_3 :2;
//BIT[2]: Replay Live Capture with CVT
unsigned int LIVE_CAPTURE_WITH_CVT :1;
unsigned int RESERVED_3 :1;
} bits;
unsigned int raw;
@@ -1196,6 +1198,8 @@ struct replay_config {
bool frame_skip_supported;
/* Replay Received Frame Skipping Error HPD. */
bool received_frame_skipping_error_hpd;
/* Live capture with CVT is activated */
bool live_capture_with_cvt_activated;
};
/* Replay feature flags*/

View File

@@ -122,6 +122,7 @@
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GLOBAL_FGCG_REP_CNTL, DCCG_GLOBAL_FGCG_REP_DIS, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, DP_DTO, ENABLE, 0, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, DP_DTO, ENABLE, 1, mask_sh),\
DCCG_SFII(OTG, PIXEL_RATE_CNTL, DP_DTO, ENABLE, 2, mask_sh),\

View File

@@ -122,6 +122,33 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
.program_hpd_filter = dce110_program_hpd_filter,
};
static const struct link_encoder_funcs dce110_lnk_enc_funcs_no_hpd = {
.validate_output_with_stream =
dce110_link_encoder_validate_output_with_stream,
.hw_init = dce110_link_encoder_hw_init,
.setup = dce110_link_encoder_setup,
.enable_tmds_output = dce110_link_encoder_enable_tmds_output,
.enable_dp_output = dce110_link_encoder_enable_dp_output,
.enable_dp_mst_output = dce110_link_encoder_enable_dp_mst_output,
.enable_lvds_output = dce110_link_encoder_enable_lvds_output,
.enable_analog_output = dce110_link_encoder_enable_analog_output,
.disable_output = dce110_link_encoder_disable_output,
.dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = dce110_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
dce110_link_encoder_update_mst_stream_allocation_table,
.psr_program_dp_dphy_fast_training =
dce110_psr_program_dp_dphy_fast_training,
.psr_program_secondary_packet = dce110_psr_program_secondary_packet,
.connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
.is_dig_enabled = dce110_is_dig_enabled,
.destroy = dce110_link_encoder_destroy,
.get_max_link_cap = dce110_link_encoder_get_max_link_cap,
.get_dig_frontend = dce110_get_dig_frontend,
.get_hpd_state = dce110_get_hpd_state,
.program_hpd_filter = dce110_program_hpd_filter,
};
static enum bp_result link_transmitter_control(
struct dce110_link_encoder *enc110,
struct bp_transmitter_control *cntl)
@@ -865,7 +892,10 @@ void dce110_link_encoder_construct(
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
enum bp_result result = BP_RESULT_OK;
enc110->base.funcs = &dce110_lnk_enc_funcs;
if (hpd_regs)
enc110->base.funcs = &dce110_lnk_enc_funcs;
else
enc110->base.funcs = &dce110_lnk_enc_funcs_no_hpd;
enc110->base.ctx = init_data->ctx;
enc110->base.id = init_data->encoder;
enc110->base.analog_id = init_data->analog_encoder;
@@ -1855,6 +1885,33 @@ static const struct link_encoder_funcs dce60_lnk_enc_funcs = {
.program_hpd_filter = dce110_program_hpd_filter,
};
static const struct link_encoder_funcs dce60_lnk_enc_funcs_no_hpd = {
.validate_output_with_stream =
dce110_link_encoder_validate_output_with_stream,
.hw_init = dce110_link_encoder_hw_init,
.setup = dce110_link_encoder_setup,
.enable_tmds_output = dce110_link_encoder_enable_tmds_output,
.enable_dp_output = dce60_link_encoder_enable_dp_output,
.enable_dp_mst_output = dce60_link_encoder_enable_dp_mst_output,
.enable_lvds_output = dce110_link_encoder_enable_lvds_output,
.enable_analog_output = dce110_link_encoder_enable_analog_output,
.disable_output = dce110_link_encoder_disable_output,
.dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = dce60_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
dce110_link_encoder_update_mst_stream_allocation_table,
.psr_program_dp_dphy_fast_training =
dce110_psr_program_dp_dphy_fast_training,
.psr_program_secondary_packet = dce110_psr_program_secondary_packet,
.connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
.is_dig_enabled = dce110_is_dig_enabled,
.destroy = dce110_link_encoder_destroy,
.get_max_link_cap = dce110_link_encoder_get_max_link_cap,
.get_dig_frontend = dce110_get_dig_frontend,
.get_hpd_state = dce110_get_hpd_state,
.program_hpd_filter = dce110_program_hpd_filter,
};
void dce60_link_encoder_construct(
struct dce110_link_encoder *enc110,
const struct encoder_init_data *init_data,
@@ -1867,7 +1924,10 @@ void dce60_link_encoder_construct(
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
enum bp_result result = BP_RESULT_OK;
enc110->base.funcs = &dce60_lnk_enc_funcs;
if (hpd_regs)
enc110->base.funcs = &dce60_lnk_enc_funcs;
else
enc110->base.funcs = &dce60_lnk_enc_funcs_no_hpd;
enc110->base.ctx = init_data->ctx;
enc110->base.id = init_data->encoder;
enc110->base.analog_id = init_data->analog_encoder;

View File

@@ -202,7 +202,7 @@ static unsigned int dml_round_to_multiple(unsigned int num, unsigned int multipl
return (num - remainder);
}
static unsigned int dml_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info)
static unsigned int dml_get_num_active_pipes(unsigned int num_planes, const struct core_display_cfg_support_info *cfg_support_info)
{
unsigned int num_active_pipes = 0;
@@ -546,9 +546,9 @@ static bool dml_is_vertical_rotation(enum dml2_rotation_angle Scan)
return is_vert;
}
static int unsigned dml_get_gfx_version(enum dml2_swizzle_mode sw_mode)
static unsigned int dml_get_gfx_version(enum dml2_swizzle_mode sw_mode)
{
int unsigned version = 0;
unsigned int version = 0;
if (sw_mode == dml2_sw_linear ||
sw_mode == dml2_sw_256b_2d ||
@@ -1761,7 +1761,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->PixelPTEBytesPerRow = (unsigned int)((double)*p->dpte_row_width_ub / (double)*p->PixelPTEReqWidth * *p->PTERequestSize);
// VBA_DELTA, VBA doesn't have programming value for pte row height linear.
*p->dpte_row_height_linear = (unsigned int)1 << (unsigned int)math_floor2(math_log((float)(p->PTEBufferSizeInRequests * PixelPTEReqWidth_linear / p->Pitch), 2.0), 1);
*p->dpte_row_height_linear = 1U << (unsigned int)math_floor2(math_log((float)(p->PTEBufferSizeInRequests * PixelPTEReqWidth_linear / p->Pitch), 2.0), 1);
if (*p->dpte_row_height_linear > 128)
*p->dpte_row_height_linear = 128;
@@ -3377,7 +3377,7 @@ static void calculate_cursor_req_attributes(
DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
DML_LOG_VERBOSE("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
DML_LOG_VERBOSE("DML::%s: cursor_pitch = %d\n", __func__, cursor_bpp == 2 ? 256 : (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1));
DML_LOG_VERBOSE("DML::%s: cursor_pitch = %d\n", __func__, cursor_bpp == 2 ? 256 : 1U << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1));
#endif
}
@@ -12205,15 +12205,15 @@ static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const
{
double refclk_freq_in_mhz = (display_cfg->overrides.hw.dlg_ref_clk_mhz > 0) ? (double)display_cfg->overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
wm_regs->fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
wm_regs->sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
wm_regs->uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
wm_regs->urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
wm_regs->usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
wm_regs->fclk_pstate = (unsigned int)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
wm_regs->sr_enter = (unsigned int)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_exit = (unsigned int)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_enter_z8 = (unsigned int)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_exit_z8 = (unsigned int)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
wm_regs->temp_read_or_ppt = (unsigned int)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
wm_regs->uclk_pstate = (unsigned int)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
wm_regs->urgent = (unsigned int)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
wm_regs->usr = (unsigned int)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
wm_regs->refcyc_per_trip_to_mem = (unsigned int)(mode_lib->mp.UrgentLatency * refclk_freq_in_mhz);
wm_regs->refcyc_per_meta_trip_to_mem = (unsigned int)(mode_lib->mp.MetaTripToMemory * refclk_freq_in_mhz);
wm_regs->frac_urg_bw_flip = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidthImmediateFlip * 1000);
@@ -12692,7 +12692,7 @@ static void rq_dlg_get_dlg_reg(
disp_dlg_regs->refcyc_per_vm_req_flip = (unsigned int)(math_pow(2, 23) - 1);
DML_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
DML_ASSERT(disp_dlg_regs->dst_y_after_scaler < 8U);
DML_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
if (disp_dlg_regs->dst_y_per_pte_row_nom_l >= (unsigned int)math_pow(2, 17)) {
@@ -13248,7 +13248,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.cstate_max_cap_mode = dml_get_cstate_max_cap_mode(mode_lib);
out->min_clocks.dcn4x.dpprefclk_khz = (int unsigned)dml_get_global_dppclk_khz(mode_lib);
out->min_clocks.dcn4x.dpprefclk_khz = (unsigned int)dml_get_global_dppclk_khz(mode_lib);
out->informative.qos.max_active_fclk_change_latency_supported = dml_get_fclk_change_latency(mode_lib);

View File

@@ -143,7 +143,7 @@ static unsigned int find_pipes_assigned_to_plane(struct dml2_context *ctx,
{
int i;
unsigned int num_found = 0;
unsigned int plane_id_assigned_to_pipe = -1;
unsigned int plane_id_assigned_to_pipe = UINT_MAX;
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];

View File

@@ -1174,12 +1174,12 @@ static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2
const struct dc_state *context, const struct dml_display_cfg_st *dml_dispcfg, unsigned int stream_id, int plane_index)
{
unsigned int plane_id;
int i = 0;
int location = -1;
unsigned int i = 0;
unsigned int location = UINT_MAX;
if (!get_plane_id(context->bw_ctx.dml2, context, plane, stream_id, plane_index, &plane_id)) {
ASSERT(false);
return -1;
return UINT_MAX;
}
for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {

View File

@@ -102,7 +102,7 @@ static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
}
}
static bool dpp1_dscl_is_video_format(enum pixel_format format)
static bool dpp1_dscl_is_video_format(enum dc_pixel_format format)
{
if (format >= PIXEL_FORMAT_VIDEO_BEGIN
&& format <= PIXEL_FORMAT_VIDEO_END)
@@ -111,7 +111,7 @@ static bool dpp1_dscl_is_video_format(enum pixel_format format)
return false;
}
static bool dpp1_dscl_is_420_format(enum pixel_format format)
static bool dpp1_dscl_is_420_format(enum dc_pixel_format format)
{
if (format == PIXEL_FORMAT_420BPP8 ||
format == PIXEL_FORMAT_420BPP10)

View File

@@ -94,7 +94,7 @@ static int dpp401_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
}
}
static bool dpp401_dscl_is_video_format(enum pixel_format format)
static bool dpp401_dscl_is_video_format(enum dc_pixel_format format)
{
if (format >= PIXEL_FORMAT_VIDEO_BEGIN
&& format <= PIXEL_FORMAT_VIDEO_END)
@@ -103,7 +103,7 @@ static bool dpp401_dscl_is_video_format(enum pixel_format format)
return false;
}
static bool dpp401_dscl_is_420_format(enum pixel_format format)
static bool dpp401_dscl_is_420_format(enum dc_pixel_format format)
{
if (format == PIXEL_FORMAT_420BPP8 ||
format == PIXEL_FORMAT_420BPP10)

View File

@@ -680,9 +680,6 @@ static void get_dsc_enc_caps(
} else {
build_dsc_enc_caps(dsc, dsc_enc_caps);
}
if (dsc->ctx->dc->debug.native422_support)
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
}
/* Returns 'false' if no intersection was found for at least one capability.
@@ -1100,13 +1097,14 @@ static bool setup_dsc_config(
branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_0_mps;
break;
case PIXEL_ENCODING_YCBCR422:
is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_422;
sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps;
branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps;
if (!is_dsc_possible) {
if (policy.ycbcr422_simple) {
is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_SIMPLE_422;
dsc_cfg->ycbcr422_simple = is_dsc_possible;
sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_0_mps;
} else {
is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_422;
sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps;
branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps;
}
break;
case PIXEL_ENCODING_YCBCR420:
@@ -1406,6 +1404,7 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
policy->min_target_bpp = 8;
/* DP specs limits to 3 x bpc */
policy->max_target_bpp = 3 * bpc;
policy->ycbcr422_simple = true;
break;
case PIXEL_ENCODING_YCBCR420:
/* DP specs limits to 6 */

View File

@@ -100,7 +100,7 @@ void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;

View File

@@ -128,7 +128,7 @@ void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int m
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;

View File

@@ -78,7 +78,7 @@ static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsign
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;

View File

@@ -6,6 +6,7 @@
#include "dcn31/dcn31_hubbub.h"
#include "dcn32/dcn32_hubbub.h"
#include "dcn35/dcn35_hubbub.h"
#include "dcn401/dcn401_hubbub.h"
#include "dcn42/dcn42_hubbub.h"
#include "dm_services.h"
#include "reg_helper.h"
@@ -429,15 +430,6 @@ static void hubbub42_allow_self_refresh_control(struct hubbub *hubbub, bool allo
REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow);
if (!allow && hubbub->ctx->dc->debug.disable_stutter) {/*controlled by registry key*/
REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_VALUE, 0,
DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_ENABLE, 1);
REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
}
}
static void hubbub42_set_sdp_control(struct hubbub *hubbub, bool dc_control)
{
@@ -494,6 +486,46 @@ static bool hubbub42_program_watermarks(
return wm_pending;
}
static void hubbub42_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
uint32_t request_limit = 96; //MAX(12 * memory_channel_count, 96);
REG_UPDATE(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, request_limit);
}
static bool dcn42_program_arbiter(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
bool wm_pending = false;
uint32_t temp;
/* request backpressure and outstanding return threshold (unused)*/
//REG_UPDATE(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD, arb_regs->req_stall_threshold);
/* 401 delta: do not update P-State stall threshold (handled by fw) */
// REG_UPDATE(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD, arb_regs->pstate_stall_threshold);
if (safe_to_lower || arb_regs->allow_sdpif_rate_limit_when_cstate_req > hubbub2->allow_sdpif_rate_limit_when_cstate_req) {
hubbub2->allow_sdpif_rate_limit_when_cstate_req = arb_regs->allow_sdpif_rate_limit_when_cstate_req;
/* only update the required bits */
REG_GET(DCHUBBUB_CTRL_STATUS, DCHUBBUB_HW_DEBUG, &temp);
if (hubbub2->allow_sdpif_rate_limit_when_cstate_req) {
temp |= (1 << 5);
} else {
temp &= ~(1 << 5);
}
REG_UPDATE(DCHUBBUB_CTRL_STATUS, DCHUBBUB_HW_DEBUG, temp);
} else {
wm_pending = true;
}
return wm_pending;
}
static const struct hubbub_funcs hubbub42_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx,
@@ -509,13 +541,16 @@ static const struct hubbub_funcs hubbub42_funcs = {
.force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub35_init_watermarks,
.program_det_size = dcn32_program_det_size,
.program_compbuf_size = dcn35_program_compbuf_size,
.init_crb = dcn35_init_crb,
.init_crb = dcn401_init_crb,
.dchvm_init = dcn35_dchvm_init,
.hubbub_read_state = hubbub2_read_state,
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.dchubbub_init = hubbub35_init,
.dchvm_init = dcn35_dchvm_init,
.set_request_limit = hubbub42_set_request_limit,
.program_det_segments = dcn401_program_det_segments,
.program_compbuf_segments = dcn401_program_compbuf_segments,
.wait_for_det_update = dcn401_wait_for_det_update,
.program_arbiter = dcn42_program_arbiter,
.hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub42_construct(struct dcn20_hubbub *hubbub2,

View File

@@ -245,6 +245,39 @@ static void hubp42_program_deadline(
REFCYC_PER_VM_DMDATA, dlg_attr->refcyc_per_vm_dmdata);
}
void hubp42_program_requestor(
struct hubp *hubp,
struct dml2_display_rq_regs *rq_regs)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBPRET_CONTROL,
DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
REG_SET_4(DCN_EXPANSION_MODE, 0,
DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
REG_SET_8(DCHUBP_REQ_SIZE_CONFIG, 0,
CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
DPTE_GROUP_SIZE, rq_regs->rq_regs_l.dpte_group_size,
VM_GROUP_SIZE, rq_regs->rq_regs_l.mpte_group_size,
SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height,
PTE_ROW_HEIGHT_LINEAR, rq_regs->rq_regs_l.pte_row_height_linear);
REG_SET_7(DCHUBP_REQ_SIZE_CONFIG_C, 0,
CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
DPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.dpte_group_size,
SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height,
PTE_ROW_HEIGHT_LINEAR_C, rq_regs->rq_regs_c.pte_row_height_linear);
}
void hubp42_setup(
struct hubp *hubp,
struct dml2_dchub_per_pipe_register_set *pipe_regs,
@@ -255,7 +288,7 @@ void hubp42_setup(
* disable the requestors is not needed
*/
hubp401_vready_at_or_After_vsync(hubp, pipe_global_sync, timing);
hubp401_program_requestor(hubp, &pipe_regs->rq_regs);
hubp42_program_requestor(hubp, &pipe_regs->rq_regs);
hubp42_program_deadline(hubp, &pipe_regs->dlg_regs, &pipe_regs->ttu_regs);
}
static void hubp42_program_surface_config(

View File

@@ -48,6 +48,8 @@
HUBP_SF(CURSOR0_0_HUBP_3DLUT_ADDRESS_LOW, HUBP_3DLUT_ADDRESS_LOW, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_DLG_PARAM, REFCYC_PER_3DLUT_GROUP, mask_sh)
struct dml2_display_rq_regs;
bool hubp42_construct(
struct dcn20_hubp *hubp2,
struct dc_context *ctx,
@@ -64,6 +66,10 @@ void hubp42_program_3dlut_fl_config(struct hubp *hubp,
void hubp42_read_state(struct hubp *hubp);
void hubp42_program_requestor(
struct hubp *hubp,
struct dml2_display_rq_regs *rq_regs);
void hubp42_setup(
struct hubp *hubp,
struct dml2_dchub_per_pipe_register_set *pipe_regs,

View File

@@ -757,6 +757,9 @@ static void dcn32_initialize_min_clocks(struct dc *dc)
{
struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
if (!dc->clk_mgr || !dc->clk_mgr->bw_params || !dc->clk_mgr->funcs)
return;
clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ;
clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
@@ -765,9 +768,10 @@ static void dcn32_initialize_min_clocks(struct dc *dc)
clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
clocks->fclk_p_state_change_support = true;
clocks->p_state_change_support = true;
if (dc->debug.disable_boot_optimizations) {
clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
} else {
} else if (dc->clk_mgr->funcs->get_dispclk_from_dentist) {
/* Even though DPG_EN = 1 for the connected display, it still requires the
* correct timing so we cannot set DISPCLK to min freq or it could cause
* audio corruption. Read current DISPCLK from DENTIST and request the same
@@ -776,10 +780,10 @@ static void dcn32_initialize_min_clocks(struct dc *dc)
clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
}
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
dc->current_state,
true);
if (dc->clk_mgr->funcs->update_clocks)
dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
dc->current_state,
true);
}
void dcn32_init_hw(struct dc *dc)
@@ -1007,7 +1011,8 @@ void dcn32_init_hw(struct dc *dc)
DMUB_FW_VERSION(7, 0, 35)) {
/* FAMS2 is disabled */
dc->debug.fams2_config.bits.enable = false;
if (dc->debug.using_dml2 && dc->res_pool->funcs->update_bw_bounding_box) {
if (dc->debug.using_dml2 && dc->res_pool->funcs->update_bw_bounding_box &&
dc->clk_mgr && dc->clk_mgr->bw_params) {
/* update bounding box if FAMS2 disabled */
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
}

View File

@@ -369,12 +369,14 @@ void dcn401_init_hw(struct dc *dc)
}
}
void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
void dcn401_trigger_3dlut_dma_load(struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
const struct pipe_ctx *primary_dpp_pipe_ctx = resource_get_primary_dpp_pipe(pipe_ctx);
struct hubp *primary_hubp = primary_dpp_pipe_ctx ?
primary_dpp_pipe_ctx->plane_res.hubp : NULL;
if (hubp->funcs->hubp_enable_3dlut_fl) {
hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
if (primary_hubp && primary_hubp->funcs->hubp_enable_3dlut_fl) {
primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true);
}
}
@@ -382,8 +384,11 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state)
{
struct dc *dc = pipe_ctx->plane_res.hubp->ctx->dc;
const struct pipe_ctx *primary_dpp_pipe_ctx = resource_get_primary_dpp_pipe(pipe_ctx);
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct hubp *primary_hubp = primary_dpp_pipe_ctx ?
primary_dpp_pipe_ctx->plane_res.hubp : NULL;
const struct dc_plane_cm *cm = &plane_state->cm;
int mpcc_id = hubp->inst;
struct mpc *mpc = dc->res_pool->mpc;
@@ -481,25 +486,41 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, 12, mpcc_id);
if (mpc->funcs->update_3dlut_fast_load_select)
mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, primary_hubp->inst);
/* HUBP */
if (hubp->funcs->hubp_program_3dlut_fl_config)
hubp->funcs->hubp_program_3dlut_fl_config(hubp, &cm->lut3d_dma);
if (primary_hubp->inst == hubp->inst) {
/* only program if this is the primary dpp pipe for the given plane */
if (hubp->funcs->hubp_program_3dlut_fl_config)
hubp->funcs->hubp_program_3dlut_fl_config(hubp, &cm->lut3d_dma);
if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, cm->lut3d_dma.format);
if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, cm->lut3d_dma.format);
if (hubp->funcs->hubp_program_3dlut_fl_addr)
hubp->funcs->hubp_program_3dlut_fl_addr(hubp, &cm->lut3d_dma.addr);
if (hubp->funcs->hubp_program_3dlut_fl_addr)
hubp->funcs->hubp_program_3dlut_fl_addr(hubp, &cm->lut3d_dma.addr);
if (hubp->funcs->hubp_enable_3dlut_fl) {
hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
if (hubp->funcs->hubp_enable_3dlut_fl) {
hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
} else {
/* GPU memory only supports fast load path */
BREAK_TO_DEBUGGER();
lut_enable = false;
result = false;
}
} else {
/* GPU memory only supports fast load path */
BREAK_TO_DEBUGGER();
lut_enable = false;
result = false;
/* re-trigger priamry HUBP to load 3DLUT */
if (primary_hubp->funcs->hubp_enable_3dlut_fl) {
primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true);
}
/* clear FL setup on this pipe's HUBP */
memset(&lut3d_dma, 0, sizeof(lut3d_dma));
if (hubp->funcs->hubp_program_3dlut_fl_config)
hubp->funcs->hubp_program_3dlut_fl_config(hubp, &lut3d_dma);
if (hubp->funcs->hubp_enable_3dlut_fl)
hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
}
} else {
/* Legacy (Host) Load Mode */
@@ -1809,42 +1830,41 @@ void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
* This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless
* of whether OTG lock is currently being held or not.
*/
struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL };
struct pipe_ctx *odm_pipe, *mpc_pipe;
int i, wa_pipe_ct = 0;
const struct pipe_ctx *otg_master_pipe_ctx = resource_get_otg_master(pipe_ctx);
struct timing_generator *tg = otg_master_pipe_ctx ?
otg_master_pipe_ctx->stream_res.tg : NULL;
const struct pipe_ctx *primary_dpp_pipe_ctx = resource_is_pipe_type(pipe_ctx, DPP_PIPE) ?
resource_get_primary_dpp_pipe(pipe_ctx) : pipe_ctx;
struct hubp *primary_hubp = primary_dpp_pipe_ctx ?
primary_dpp_pipe_ctx->plane_res.hubp : NULL;
for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) {
for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) {
if (mpc_pipe->plane_state &&
mpc_pipe->plane_state->cm.flags.bits.lut3d_enable &&
mpc_pipe->plane_state->cm.flags.bits.lut3d_dma_enable) {
wa_pipes[wa_pipe_ct++] = mpc_pipe;
}
}
if (!otg_master_pipe_ctx && !tg) {
return;
}
if (wa_pipe_ct > 0) {
if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true);
if (primary_dpp_pipe_ctx &&
primary_dpp_pipe_ctx->plane_state &&
primary_dpp_pipe_ctx->plane_state->cm.flags.bits.lut3d_enable &&
primary_dpp_pipe_ctx->plane_state->cm.flags.bits.lut3d_dma_enable) {
if (tg->funcs->set_vupdate_keepout)
tg->funcs->set_vupdate_keepout(tg, true);
for (i = 0; i < wa_pipe_ct; ++i) {
if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
if (primary_hubp->funcs->hubp_enable_3dlut_fl) {
primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true);
}
pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status)
pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false);
tg->funcs->unlock(tg);
if (tg->funcs->wait_update_lock_status)
tg->funcs->wait_update_lock_status(tg, false);
for (i = 0; i < wa_pipe_ct; ++i) {
if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
if (primary_hubp->funcs->hubp_enable_3dlut_fl) {
primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true);
}
if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false);
if (tg->funcs->set_vupdate_keepout)
tg->funcs->set_vupdate_keepout(tg, false);
} else {
pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
tg->funcs->unlock(tg);
}
}

View File

@@ -41,8 +41,7 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
bool dcn401_set_output_transfer_func(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream);
void dcn401_trigger_3dlut_dma_load(struct dc *dc,
struct pipe_ctx *pipe_ctx);
void dcn401_trigger_3dlut_dma_load(struct pipe_ctx *pipe_ctx);
void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx,
unsigned int *tmds_div);
enum dc_status dcn401_enable_stream_timing(

View File

@@ -69,6 +69,7 @@ void dcn42_init_hw(struct dc *dc)
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
bool dchub_ref_freq_changed;
int current_dchub_ref_freq = 0;
if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
@@ -203,7 +204,8 @@ void dcn42_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
if (link->link_enc->funcs->is_dig_enabled &&
if (link && link->link_enc &&
link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
hws->funcs.power_down) {
hws->funcs.power_down(dc);
@@ -260,8 +262,12 @@ void dcn42_init_hw(struct dc *dc)
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->clk_mgr->bw_params->num_channels, dc->config.sdpif_request_limit_words_per_umc);
if (dc->res_pool->hubbub->funcs->set_request_limit &&
dc->clk_mgr && dc->clk_mgr->bw_params &&
dc->config.sdpif_request_limit_words_per_umc > 0)
dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub,
dc->clk_mgr->bw_params->num_channels,
dc->config.sdpif_request_limit_words_per_umc);
// Get DMCUB capabilities
if (dc->ctx->dmub_srv) {
@@ -269,13 +275,18 @@ void dcn42_init_hw(struct dc *dc)
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
/* sw and fw FAMS versions must match for support */
dc->debug.fams2_config.bits.enable &=
dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support
if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
|| res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver;
dchub_ref_freq_changed =
res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq;
if ((!dc->debug.fams2_config.bits.enable || dchub_ref_freq_changed) &&
dc->res_pool->funcs->update_bw_bounding_box &&
dc->clk_mgr && dc->clk_mgr->bw_params) {
/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
if (dc->clk_mgr)
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
}
}
if (dc->res_pool->pg_cntl) {

View File

@@ -1120,7 +1120,7 @@ struct hw_sequencer_funcs {
void (*program_output_csc)(struct dc *dc, struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix, int opp_id);
void (*trigger_3dlut_dma_load)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*trigger_3dlut_dma_load)(struct pipe_ctx *pipe_ctx);
/* VM Related */
int (*init_sys_ctx)(struct dce_hwseq *hws,

View File

@@ -256,7 +256,7 @@ struct default_adjustment {
enum dc_color_space out_color_space;
enum dc_color_space in_color_space;
enum dc_color_depth color_depth;
enum pixel_format surface_pixel_format;
enum dc_pixel_format surface_pixel_format;
enum graphics_csc_adjust_type csc_adjust_type;
bool force_hw_default;
};

View File

@@ -160,7 +160,7 @@ struct scaler_data {
struct scaling_ratios ratios;
struct scl_inits inits;
struct sharpness_adj sharpness;
enum pixel_format format;
enum dc_pixel_format format;
struct line_buffer_params lb_params;
// Below struct holds the scaler values to program hw registers
struct dscl_prog_data dscl_prog_data;

View File

@@ -781,7 +781,6 @@ static void restore_phy_clocks_for_destructive_link_verification(const struct dc
}
static void verify_link_capability_destructive(struct dc_link *link,
struct dc_sink *sink,
enum dc_detect_reason reason)
{
bool should_prepare_phy_clocks =
@@ -855,11 +854,11 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
return destrictive;
}
static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
static void verify_link_capability(struct dc_link *link,
enum dc_detect_reason reason)
{
if (should_verify_link_capability_destructively(link, reason))
verify_link_capability_destructive(link, sink, reason);
verify_link_capability_destructive(link, reason);
else
verify_link_capability_non_destructive(link);
}
@@ -1453,8 +1452,9 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
if (is_local_sink_detect_success && link->local_sink)
verify_link_capability(link, link->local_sink, reason);
if (is_local_sink_detect_success && link->local_sink) {
verify_link_capability(link, reason);
}
DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__,
link->link_index, is_local_sink_detect_success, pre_link_type, link->type);

View File

@@ -181,7 +181,8 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
/* link can be also enabled by vbios. In this case it is not recorded
* in pipe_ctx. Disable link phy here to make sure it is completely off
*/
dp_disable_link_phy(link, &link_res, link->connector_signal);
if (dc_is_dp_signal(link->connector_signal))
dp_disable_link_phy(link, &link_res, link->connector_signal);
}
void link_resume(struct dc_link *link)

View File

@@ -176,11 +176,12 @@ void pg_cntl42_hubp_dpp_pg_control(struct pg_cntl *pg_cntl, unsigned int hubp_dp
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl;
bool block_enabled;
bool skip_pg = pg_cntl->ctx->dc->debug.ignore_pg ||
pg_cntl->ctx->dc->debug.disable_hubp_power_gate ||
pg_cntl->ctx->dc->debug.disable_dpp_power_gate ||
pg_cntl->ctx->dc->idle_optimizations_allowed;
if (pg_cntl->ctx->dc->debug.ignore_pg ||
pg_cntl->ctx->dc->debug.disable_hubp_power_gate ||
pg_cntl->ctx->dc->debug.disable_dpp_power_gate ||
pg_cntl->ctx->dc->idle_optimizations_allowed)
if (skip_pg && !power_on)
return;
block_enabled = pg_cntl42_hubp_dpp_pg_status(pg_cntl, hubp_dpp_inst);

View File

@@ -650,9 +650,6 @@ static struct link_encoder *dce100_link_encoder_create(
return &enc110->base;
}
if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
@@ -661,7 +658,8 @@ static struct link_encoder *dce100_link_encoder_create(
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ?
NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
}

View File

@@ -671,7 +671,7 @@ static struct link_encoder *dce110_link_encoder_create(
kzalloc_obj(struct dce110_link_encoder);
int link_regs_id;
if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
if (!enc110)
return NULL;
link_regs_id =
@@ -682,7 +682,8 @@ static struct link_encoder *dce110_link_encoder_create(
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ?
NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
}

View File

@@ -632,7 +632,7 @@ static struct link_encoder *dce112_link_encoder_create(
kzalloc_obj(struct dce110_link_encoder);
int link_regs_id;
if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
if (!enc110)
return NULL;
link_regs_id =
@@ -643,7 +643,8 @@ static struct link_encoder *dce112_link_encoder_create(
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ?
NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
}

View File

@@ -716,7 +716,7 @@ static struct link_encoder *dce120_link_encoder_create(
kzalloc_obj(struct dce110_link_encoder);
int link_regs_id;
if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
if (!enc110)
return NULL;
link_regs_id =
@@ -727,7 +727,8 @@ static struct link_encoder *dce120_link_encoder_create(
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ?
NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
}

View File

@@ -746,18 +746,16 @@ static struct link_encoder *dce60_link_encoder_create(
return &enc110->base;
}
if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
dce60_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
enc_init_data,
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ?
NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
}

View File

@@ -752,9 +752,6 @@ static struct link_encoder *dce80_link_encoder_create(
return &enc110->base;
}
if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
@@ -763,7 +760,8 @@ static struct link_encoder *dce80_link_encoder_create(
&link_enc_feature,
&link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ?
NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
}

View File

@@ -1963,6 +1963,8 @@ static bool dcn31_resource_construct(
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.disable_hbr_audio_dp2 = true;
dc->config.no_native422_support = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {

View File

@@ -1925,6 +1925,8 @@ static bool dcn315_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
dc->config.no_native422_support = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {

View File

@@ -1790,7 +1790,7 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
}
}
static int dcn401_get_power_profile(const struct dc_state *context)
int dcn401_get_power_profile(const struct dc_state *context)
{
int uclk_mhz = context->bw_ctx.bw.dcn.clk.dramclk_khz / 1000;
int dpm_level = 0;

View File

@@ -32,6 +32,7 @@ void dcn401_get_default_tiling_info(struct dc_tiling_info *tiling_info);
unsigned int dcn401_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx);
int dcn401_get_power_profile(const struct dc_state *context);
/* Following are definitions for run time init of reg offsets */
/* HUBP */

View File

@@ -760,6 +760,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.ignore_pg = true,
.disable_stutter_for_wm_program = true,
.min_deep_sleep_dcfclk_khz = 8000,
};
static const struct dc_check_config config_defaults = {
@@ -1790,8 +1791,10 @@ static struct resource_funcs dcn42_res_pool_funcs = {
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
.prepare_mcache_programming = dcn42_prepare_mcache_programming,
.build_pipe_pix_clk_params = dcn42_build_pipe_pix_clk_params,
.get_power_profile = dcn401_get_power_profile,
.get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe,
.get_max_hw_cursor_size = dcn42_get_max_hw_cursor_size,
.get_default_tiling_info = dcn10_get_default_tiling_info
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -2015,7 +2018,7 @@ static bool dcn42_resource_construct(
dc->config.dcn_override_sharpness_range.hdr_rgb_mid = 1500;
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.dc_mode_clk_limit_support = true;
dc->config.dc_mode_clk_limit_support = false;
dc->config.enable_windowed_mpo_odm = true;
/* Use psp mailbox to enable assr */
dc->config.use_assr_psp_message = true;

View File

@@ -481,6 +481,8 @@
SRI_ARR(OPTC_INPUT_CLOCK_CONTROL, ODM, inst), \
SRI_ARR(OPTC_DATA_SOURCE_SELECT, ODM, inst), \
SRI_ARR(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst), \
SRI_ARR(OPTC_RSMU_UNDERFLOW, ODM, inst), \
SRI_ARR(OPTC_UNDERFLOW_THRESHOLD, ODM, inst), \
SRI_ARR(CONTROL, VTG, inst), \
SRI_ARR(OTG_VERT_SYNC_CONTROL, OTG, inst), \
SRI_ARR(OTG_GSL_CONTROL, OTG, inst), \
@@ -584,5 +586,6 @@ enum dc_status dcn42_validate_bandwidth(struct dc *dc,
enum dc_validate_mode validate_mode);
void dcn42_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
int dcn42_get_power_profile(const struct dc_state *context);
#endif /* _DCN42_RESOURCE_H_ */

View File

@@ -93,6 +93,10 @@ static void dcn42_convert_dc_clock_table_to_soc_bb_clock_table(
}
}
vmin_limit->dispclk_khz = min(dc_clk_table->entries[0].dispclk_mhz * 1000, vmin_limit->dispclk_khz);
/* dispclk is always fine-grain */
dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels >= 2 ? 2 : 1;
dml_clk_table->dispclk.clk_values_khz[0] = 0;
dml_clk_table->dispclk.clk_values_khz[1] = dc_clk_table->entries[dc_clk_table->num_entries_per_clk.num_dispclk_levels - 1].dispclk_mhz * 1000;
}
/* dppclk */
@@ -105,6 +109,10 @@ static void dcn42_convert_dc_clock_table_to_soc_bb_clock_table(
dml_clk_table->dppclk.clk_values_khz[i] = 0;
}
}
/* dppclk is always fine-grain */
dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels >= 2 ? 2 : 1;
dml_clk_table->dppclk.clk_values_khz[0] = 0;
dml_clk_table->dppclk.clk_values_khz[1] = dc_clk_table->entries[dc_clk_table->num_entries_per_clk.num_dppclk_levels - 1].dppclk_mhz * 1000;
}
/* dtbclk */

View File

@@ -4437,6 +4437,7 @@ enum dmub_cmd_replay_general_subtype {
REPLAY_GENERAL_CMD_VIDEO_CONFERENCING,
REPLAY_GENERAL_CMD_SET_CONTINUOUSLY_RESYNC,
REPLAY_GENERAL_CMD_SET_COASTING_VTOTAL_WITHOUT_FRAME_UPDATE,
REPLAY_GENERAL_CMD_LIVE_CAPTURE_WITH_CVT,
};
struct dmub_alpm_auxless_data {

View File

@@ -39,13 +39,14 @@ void dmub_srv_dcn42_regs_init(struct dmub_srv *dmub, struct dc_context *ctx)
void dmub_dcn42_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params)
{
union dmub_fw_boot_options boot_options = {0};
union dmub_fw_boot_options cur_boot_options = {0};
cur_boot_options = dmub_dcn42_get_fw_boot_option(dmub);
if (!dmub->dpia_supported) {
dmub->dpia_supported = dmub_dcn42_get_fw_boot_option(dmub).bits.enable_dpia;
}
boot_options.bits.z10_disable = params->disable_z10;
boot_options.bits.dpia_supported = params->dpia_supported;
boot_options.bits.enable_dpia = cur_boot_options.bits.enable_dpia && !params->disable_dpia;
boot_options.bits.enable_dpia = dmub->dpia_supported && !params->disable_dpia;
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;

View File

@@ -896,6 +896,7 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq,
uint32_t hw_points_num,
const struct hw_x_point *coordinate_x)
{
(void)coordinate_x;
uint32_t i;
struct fixed31_32 output;
struct fixed31_32 *de_pq_table = mod_color_get_table(type_de_pq_table);
@@ -1339,6 +1340,7 @@ static void scale_gamma_dx(struct pwl_float_data *pwl_rgb,
const struct dc_gamma *ramp,
struct dividers dividers)
{
(void)dividers;
uint32_t i;
struct fixed31_32 min = dc_fixpt_zero;
struct fixed31_32 max = dc_fixpt_one;

View File

@@ -114,6 +114,7 @@ static unsigned int calc_duration_in_us_from_v_total(
const struct mod_vrr_params *in_vrr,
unsigned int v_total)
{
(void)in_vrr;
unsigned int duration_in_us =
(unsigned int)(div64_u64(((unsigned long long)(v_total)
* 10000) * stream->timing.h_total,
@@ -218,6 +219,7 @@ static void update_v_total_for_static_ramp(
const struct dc_stream_state *stream,
struct mod_vrr_params *in_out_vrr)
{
(void)core_freesync;
unsigned int v_total = 0;
unsigned int current_duration_in_us =
calc_duration_in_us_from_v_total(
@@ -292,6 +294,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int last_render_time_in_us,
struct mod_vrr_params *in_out_vrr)
{
(void)core_freesync;
unsigned int inserted_frame_duration_in_us = 0;
unsigned int mid_point_frames_ceil = 0;
unsigned int mid_point_frames_floor = 0;
@@ -447,6 +450,7 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
unsigned int last_render_time_in_us,
struct mod_vrr_params *in_out_vrr)
{
(void)core_freesync;
bool update = false;
unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
@@ -545,6 +549,7 @@ static bool vrr_settings_require_update(struct core_freesync *core_freesync,
unsigned int max_refresh_in_uhz,
struct mod_vrr_params *in_vrr)
{
(void)core_freesync;
if (in_vrr->state != in_config->state) {
return true;
} else if (in_vrr->state == VRR_STATE_ACTIVE_FIXED &&
@@ -946,6 +951,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
struct dc_info_packet *infopacket,
bool pack_sdp_v1_3)
{
(void)mod_freesync;
/* SPD info packet for FreeSync
* VTEM info packet for HdmiVRR
* Check if Freesync is supported. Return if false. If true,

View File

@@ -501,6 +501,7 @@ static inline void callback_in_ms(uint16_t time, struct mod_hdcp_output *output)
static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time,
struct mod_hdcp_output *output)
{
(void)hdcp;
output->watchdog_timer_needed = 1;
output->watchdog_timer_delay = time;
}

View File

@@ -9036,6 +9036,8 @@
// base address: 0x40
#define regODM1_OPTC_INPUT_GLOBAL_CONTROL 0x1ada
#define regODM1_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
#define regODM1_OPTC_RSMU_UNDERFLOW 0x1adb
#define regODM1_OPTC_RSMU_UNDERFLOW_BASE_IDX 2
#define regODM1_OPTC_UNDERFLOW_THRESHOLD 0x1adc
#define regODM1_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
#define regODM1_OPTC_DATA_SOURCE_SELECT 0x1add
@@ -9060,6 +9062,8 @@
// base address: 0x80
#define regODM2_OPTC_INPUT_GLOBAL_CONTROL 0x1aea
#define regODM2_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
#define regODM2_OPTC_RSMU_UNDERFLOW 0x1aeb
#define regODM2_OPTC_RSMU_UNDERFLOW_BASE_IDX 2
#define regODM2_OPTC_UNDERFLOW_THRESHOLD 0x1aec
#define regODM2_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
#define regODM2_OPTC_DATA_SOURCE_SELECT 0x1aed
@@ -9084,6 +9088,8 @@
// base address: 0xc0
#define regODM3_OPTC_INPUT_GLOBAL_CONTROL 0x1afa
#define regODM3_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
#define regODM3_OPTC_RSMU_UNDERFLOW 0x1afb
#define regODM3_OPTC_RSMU_UNDERFLOW_BASE_IDX 2
#define regODM3_OPTC_UNDERFLOW_THRESHOLD 0x1afc
#define regODM3_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
#define regODM3_OPTC_DATA_SOURCE_SELECT 0x1afd

View File

@@ -119,6 +119,7 @@ enum pp_clock_type {
PP_ISPXCLK,
OD_SCLK,
OD_MCLK,
OD_FCLK,
OD_VDDC_CURVE,
OD_RANGE,
OD_VDDGFX_OFFSET,
@@ -208,6 +209,7 @@ enum {
enum PP_OD_DPM_TABLE_COMMAND {
PP_OD_EDIT_SCLK_VDDC_TABLE,
PP_OD_EDIT_MCLK_VDDC_TABLE,
PP_OD_EDIT_FCLK_TABLE,
PP_OD_EDIT_CCLK_VDDC_TABLE,
PP_OD_EDIT_VDDC_CURVE,
PP_OD_RESTORE_DEFAULT_TABLE,
@@ -585,8 +587,61 @@ enum amdgpu_metrics_attr_id {
AMDGPU_METRICS_ATTR_ID_GFX_LOW_UTILIZATION_ACC,
AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_TOTAL_ACC,
AMDGPU_METRICS_ATTR_ID_TEMPERATURE_HBM,
AMDGPU_METRICS_ATTR_ID_TEMPERATURE_MID,
AMDGPU_METRICS_ATTR_ID_TEMPERATURE_AID,
AMDGPU_METRICS_ATTR_ID_TEMPERATURE_XCD,
AMDGPU_METRICS_ATTR_ID_LABEL_VERSION,
AMDGPU_METRICS_ATTR_ID_NODE_ID,
AMDGPU_METRICS_ATTR_ID_NODE_TEMP_RETIMER,
AMDGPU_METRICS_ATTR_ID_NODE_TEMP_IBC,
AMDGPU_METRICS_ATTR_ID_NODE_TEMP_IBC_2,
AMDGPU_METRICS_ATTR_ID_NODE_TEMP_VDD18_VR,
AMDGPU_METRICS_ATTR_ID_NODE_TEMP_04_HBM_B_VR,
AMDGPU_METRICS_ATTR_ID_NODE_TEMP_04_HBM_D_VR,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_SOCIO_A,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_SOCIO_C,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_X0,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_X1,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_HBM_B,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_HBM_D,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_04_HBM_B,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_04_HBM_D,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_HBM_B,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_HBM_D,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_075_HBM_B,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_075_HBM_D,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_11_GTA_A,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_11_GTA_C,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDAN_075_GTA_A,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDAN_075_GTA_C,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDCR_075_UCIE,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_065_UCIEAA,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_065_UCIEAM_A,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDIO_065_UCIEAM_C,
AMDGPU_METRICS_ATTR_ID_VR_TEMP_VDDAN_075,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_FPGA,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_FRONT,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_BACK,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_OAM7,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_IBC,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_UFPGA,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_OAM1,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_OAM_0_1_HSC,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_OAM_2_3_HSC,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_OAM_4_5_HSC,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_OAM_6_7_HSC,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_FPGA_0V72_VR,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_UBB_FPGA_3V3_VR,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_RETIMER_0_1_0V9_VR,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_RETIMER_4_5_0V9_VR,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_RETIMER_2_3_0V9_VR,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_RETIMER_6_7_0V9_VR,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_IBC_HSC,
AMDGPU_METRICS_ATTR_ID_SYSTEM_TEMP_IBC,
AMDGPU_METRICS_ATTR_ID_MAX,
};
@@ -1839,4 +1894,16 @@ enum amdgpu_xgmi_link_status {
AMDGPU_XGMI_LINK_NA = 2,
};
struct amdgpu_gpuboard_temp_metrics_v1_1 {
struct metrics_table_header common_header;
int attr_count;
struct gpu_metrics_attr metrics_attrs[];
};
struct amdgpu_baseboard_temp_metrics_v1_1 {
struct metrics_table_header common_header;
int attr_count;
struct gpu_metrics_attr metrics_attrs[];
};
#endif

View File

@@ -680,6 +680,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* - minimum(not available for Vega20 and Navi1x) and maximum memory
* clock labeled OD_MCLK
*
* - minimum and maximum fabric clock labeled OD_FCLK (SMU13)
*
* - three <frequency, voltage> points labeled OD_VDDC_CURVE.
* They can be used to calibrate the sclk voltage curve. This is
* available for Vega20 and NV1X.
@@ -715,10 +717,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* - First select manual using power_dpm_force_performance_level
*
* - For clock frequency setting, enter a new value by writing a
* string that contains "s/m index clock" to the file. The index
* string that contains "s/m/f index clock" to the file. The index
* should be 0 if to set minimum clock. And 1 if to set maximum
* clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
* "m 1 800" will update maximum mclk to be 800Mhz. For core
* "m 1 800" will update maximum mclk to be 800Mhz. "f 1 1600" will
* update maximum fabric clock to be 1600Mhz. For core
* clocks on VanGogh, the string contains "p core index clock".
* E.g., "p 2 0 800" would set the minimum core clock on core
* 2 to 800Mhz.
@@ -768,6 +771,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
type = PP_OD_EDIT_CCLK_VDDC_TABLE;
else if (*buf == 'm')
type = PP_OD_EDIT_MCLK_VDDC_TABLE;
else if (*buf == 'f')
type = PP_OD_EDIT_FCLK_TABLE;
else if (*buf == 'r')
type = PP_OD_RESTORE_DEFAULT_TABLE;
else if (*buf == 'c')
@@ -843,9 +848,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int size = 0;
int ret;
enum pp_clock_type od_clocks[6] = {
enum pp_clock_type od_clocks[] = {
OD_SCLK,
OD_MCLK,
OD_FCLK,
OD_VDDC_CURVE,
OD_RANGE,
OD_VDDGFX_OFFSET,
@@ -857,10 +863,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (ret)
return ret;
for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
if (ret)
break;
for (clk_index = 0 ; clk_index < ARRAY_SIZE(od_clocks) ; clk_index++) {
amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
}
if (size == 0)

View File

@@ -47,6 +47,7 @@
#include "smu_v14_0_0_ppt.h"
#include "smu_v14_0_2_ppt.h"
#include "smu_v15_0_0_ppt.h"
#include "smu_v15_0_8_ppt.h"
#include "amd_pcie.h"
/*
@@ -802,6 +803,10 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(15, 0, 0):
smu_v15_0_0_set_ppt_funcs(smu);
break;
case IP_VERSION(15, 0, 8):
smu_v15_0_8_set_ppt_funcs(smu);
smu->od_enabled = true;
break;
default:
return -EINVAL;
}
@@ -2965,6 +2970,7 @@ int smu_get_power_limit(void *handle,
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(15, 0, 8):
ret = smu_get_asic_power_limits(smu,
&smu->current_power_limit,
NULL, NULL, NULL);
@@ -3056,6 +3062,8 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
clk_type = SMU_OD_SCLK; break;
case OD_MCLK:
clk_type = SMU_OD_MCLK; break;
case OD_FCLK:
clk_type = SMU_OD_FCLK; break;
case OD_VDDC_CURVE:
clk_type = SMU_OD_VDDC_CURVE; break;
case OD_RANGE:

View File

@@ -0,0 +1,295 @@
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef SMU_15_0_8_DRIVER_IF_H
#define SMU_15_0_8_DRIVER_IF_H
//I2C Interface
#define NUM_I2C_CONTROLLERS 8
#define I2C_CONTROLLER_ENABLED 1
#define I2C_CONTROLLER_DISABLED 0
#define MAX_SW_I2C_COMMANDS 24
typedef enum {
I2C_CONTROLLER_PORT_0,
I2C_CONTROLLER_PORT_COUNT,
} I2cControllerPort_e;
typedef enum {
/* 50 Kbits/s not supported anymore! */
UNSUPPORTED_1,
/* 100 Kbits/s */
I2C_SPEED_STANDARD_100K,
/* 400 Kbits/s */
I2C_SPEED_FAST_400K,
/* 1 Mbits/s (in fast mode) */
I2C_SPEED_FAST_PLUS_1M,
/* 1 Mbits/s (in high speed mode) not supported anymore!*/
UNSUPPORTED_2,
/* 2.3 Mbits/s not supported anymore! */
UNSUPPORTED_3,
I2C_SPEED_COUNT,
} I2cSpeed_e;
typedef enum {
I2C_CMD_READ,
I2C_CMD_WRITE,
I2C_CMD_COUNT,
} I2cCmdType_e;
#define CMDCONFIG_STOP_BIT 0
#define CMDCONFIG_RESTART_BIT 1
/* bit should be 0 for read, 1 for write */
#define CMDCONFIG_READWRITE_BIT 2
#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT)
#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT)
#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT)
/* 64 Bit register offsets for PPSMC_MSG_McaBankDumpDW, PPSMC_MSG_McaBankCeDumpDW messages
* eg to read MCA_BANK_OFFSET_SYND for CE index, call PPSMC_MSG_McaBankCeDumpDW twice,
* (index << 16 + MCA_BANK_OFFSET_SYND*8) argument for 1st DWORD, and
* ((index << 16 ) + MCA_BANK_OFFSET_SYND*8 + 4) argument for 2nd DWORD */
typedef enum {
MCA_BANK_OFFSET_CTL = 0,
MCA_BANK_OFFSET_STATUS = 1,
MCA_BANK_OFFSET_ADDR = 2,
MCA_BANK_OFFSET_MISC = 3,
MCA_BANK_OFFSET_IPID = 5,
MCA_BANK_OFFSET_SYND = 6,
MCA_BANK_OFFSET_MAX = 16,
} MCA_BANK_OFFSET_e;
/* Firmware MP1 AID MCA Error Codes stored in MCA_MP_MP1:MCMP1_SYNDT0 errorinformation */
typedef enum {
/* MMHUB */
CODE_DAGB0 = 0,
CODE_DAGB1 = 1,
CODE_DAGB2 = 2,
CODE_DAGB3 = 3,
CODE_DAGB4 = 4,
CODE_EA0 = 5,
CODE_EA1 = 6,
CODE_EA2 = 7,
CODE_EA3 = 8,
CODE_EA4 = 9,
CODE_UTCL2_ROUTER = 10,
CODE_VML2 = 11,
CODE_VML2_WALKER = 12,
CODE_MMCANE = 13,
/* VCN VCPU */
CODE_VIDD = 14,
CODE_VIDV = 15,
/* VCN JPEG */
CODE_JPEG0S = 16,
CODE_JPEG0D = 17,
CODE_JPEG1S = 18,
CODE_JPEG1D = 19,
CODE_JPEG2S = 20,
CODE_JPEG2D = 21,
CODE_JPEG3S = 22,
CODE_JPEG3D = 23,
CODE_JPEG4S = 24,
CODE_JPEG4D = 25,
CODE_JPEG5S = 26,
CODE_JPEG5D = 27,
CODE_JPEG6S = 28,
CODE_JPEG6D = 29,
CODE_JPEG7S = 30,
CODE_JPEG7D = 31,
/* VCN MMSCH */
CODE_MMSCHD = 32,
/* SDMA */
CODE_SDMA0 = 33,
CODE_SDMA1 = 34,
CODE_SDMA2 = 35,
CODE_SDMA3 = 36,
/* SOC */
CODE_HDP = 37,
CODE_ATHUB = 38,
CODE_IH = 39,
CODE_XHUB_POISON = 40,
CODE_SMN_SLVERR = 41,
CODE_WDT = 42,
CODE_UNKNOWN = 43,
CODE_DMA = 44,
CODE_COUNT = 45,
} ERR_CODE_e;
/* Firmware MP5 XCD MCA Error Codes stored in MCA_MP_MP5:MCMP5_SYNDT0 errorinformation */
typedef enum {
/* SH POISON FED */
SH_FED_CODE = 0,
/* GCEA Pin UE_ERR regs */
GCEA_CODE = 1,
SQ_CODE = 2,
LDS_CODE = 3,
GDS_CODE = 4,
SP0_CODE = 5,
SP1_CODE = 6,
TCC_CODE = 7,
TCA_CODE = 8,
TCX_CODE = 9,
CPC_CODE = 10,
CPF_CODE = 11,
CPG_CODE = 12,
SPI_CODE = 13,
RLC_CODE = 14,
/* GCEA Pin, UE_EDC regs */
SQC_CODE = 15,
TA_CODE = 16,
TD_CODE = 17,
TCP_CODE = 18,
TCI_CODE = 19,
/* GC Router */
GC_ROUTER_CODE = 20,
VML2_CODE = 21,
VML2_WALKER_CODE = 22,
ATCL2_CODE = 23,
GC_CANE_CODE = 24,
/* SOC error codes 41-43 are common with ERR_CODE_e */
MP5_CODE_SMN_SLVERR = CODE_SMN_SLVERR,
MP5_CODE_UNKNOWN = CODE_UNKNOWN,
} GC_ERROR_CODE_e;
/* SW I2C Command Table */
typedef struct {
/* Return data for read. Data to send for write*/
uint8_t ReadWriteData;
/* Includes whether associated command should have a stop or restart command,
* and is a read or write */
uint8_t CmdConfig;
} SwI2cCmd_t;
/* SW I2C Request Table */
typedef struct {
/* CKSVII2C0(0) or //CKSVII2C1(1) */
uint8_t I2CcontrollerPort;
/* Use I2cSpeed_e to indicate speed to select */
uint8_t I2CSpeed;
/* Slave address of device */
uint8_t SlaveAddress;
/* Number of commands */
uint8_t NumCmds;
SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS];
} SwI2cRequest_t;
typedef struct {
SwI2cRequest_t SwI2cRequest;
uint32_t Spare[8];
/* SMU internal use */
uint32_t MmHubPadding[8];
} SwI2cRequestExternal_t;
typedef enum {
PPCLK_UCLK,
PPCLK_COUNT,
} PPCLK_e;
typedef enum {
GPIO_INT_POLARITY_ACTIVE_LOW,
GPIO_INT_POLARITY_ACTIVE_HIGH,
} GpioIntPolarity_e;
/* TODO confirm if this is used in MI300 PPSMC_MSG_SetUclkDpmMode */
typedef enum {
UCLK_DPM_MODE_BANDWIDTH,
UCLK_DPM_MODE_LATENCY,
} UCLK_DPM_MODE_e;
typedef struct {
/* 2 AVFS.PSM chains */
uint16_t AvgPsmCount_Chain0[13];
uint16_t AvgPsmCount_Chain1[15];
uint16_t MinPsmCount_Chain0[13];
uint16_t MinPsmCount_Chain1[15];
float MaxTemperature;
/* For voltage conversions, these are the array indexes
* 0:SOCIO
* 1:065_UCIE
* 2:075_UCIE
* 3:11_GTA
* 4:075_GTA */
float MinPsmVoltage[5];
float AvgPsmVoltage[5];
} AvfsDebugTableMid_t;
typedef struct {
/* 7 AVFS.PSM chains - not including TRO */
uint16_t AvgPsmCount_Chain0[15];
uint16_t AvgPsmCount_Chain1[15];
uint16_t AvgPsmCount_Chain2[13];
uint16_t AvgPsmCount_Chain3[13];
uint16_t AvgPsmCount_Chain4[15];
uint16_t AvgPsmCount_Chain5[15];
uint16_t AvgPsmCount_Chain6[5];
uint16_t MinPsmCount_Chain0[15];
uint16_t MinPsmCount_Chain1[15];
uint16_t MinPsmCount_Chain2[13];
uint16_t MinPsmCount_Chain3[13];
uint16_t MinPsmCount_Chain4[15];
uint16_t MinPsmCount_Chain5[15];
uint16_t MinPsmCount_Chain6[5];
float MaxTemperature;
/* For voltage conversions, these are the array indexes
* 0:VDDX */
float MinPsmVoltage;
float AvgPsmVoltage;
} AvfsDebugTableAid_t;
typedef struct {
/* 0-27 GFX, 28-29 SOC */
uint16_t avgPsmCount[30];
uint16_t minPsmCount[30];
float avgPsmVoltage[30];
float minPsmVoltage[30];
} AvfsDebugTableXcd_t;
/* Defines used for IH-based thermal interrupts to GFX driver - A/X only */
#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
#define IH_INTERRUPT_VFFLR_INT 0xA
/* thermal over-temp mask defines for IH interrup to host */
#define THROTTLER_PROCHOT_BIT 0
#define THROTTLER_RESERVED 1
/* AID, XCD, CCD throttling */
#define THROTTLER_THERMAL_SOCKET_BIT 2
/* VRHOT */
#define THROTTLER_THERMAL_VR_BIT 3
#define THROTTLER_THERMAL_HBM_BIT 4
/* UEs are always reported, set flag to 0 to prevent clearing of UEs */
#define ClearMcaOnRead_UE_FLAG_MASK 0x1
/* Enable CE logging and clearing to driver */
#define ClearMcaOnRead_CE_POLL_MASK 0x2
/* AID MMHUB client IP CE Logging and clearing */
#define ClearMcaOnRead_MMHUB_POLL_MASK 0x4
#endif

View File

@@ -0,0 +1,427 @@
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef SMU_15_0_8_PMFW_H
#define SMU_15_0_8_PMFW_H
#define NUM_VCLK_DPM_LEVELS 4
#define NUM_DCLK_DPM_LEVELS 4
#define NUM_SOCCLK_DPM_LEVELS 4
#define NUM_LCLK_DPM_LEVELS 4
#define NUM_UCLK_DPM_LEVELS 4
#define NUM_FCLK_DPM_LEVELS 4
#define NUM_XGMI_DPM_LEVELS 2
#define NUM_PCIE_BITRATES 4
#define NUM_XGMI_BITRATES 4
#define NUM_XGMI_WIDTHS 3
#define NUM_GFX_P2S_TABLES 8
#define NUM_PSM_DIDT_THRESHOLDS 3
#define NUM_XCD_XVMIN_VMIN_THRESHOLDS 3
#define PRODUCT_MODEL_NUMBER_LEN 20
#define PRODUCT_NAME_LEN 64
#define PRODUCT_SERIAL_LEN 20
#define PRODUCT_MANUFACTURER_NAME_LEN 32
#define PRODUCT_FRU_ID_LEN 32
//Feature ID list
#define FEATURE_ID_DATA_CALCULATION 1
#define FEATURE_ID_DPM_FCLK 2
#define FEATURE_ID_DPM_GFXCLK 3
#define FEATURE_ID_DPM_SPARE_4 4
#define FEATURE_ID_DPM_SPARE_5 5
#define FEATURE_ID_DPM_UCLK 6
#define FEATURE_ID_DPM_SPARE_7 7
#define FEATURE_ID_DPM_XGMI 8
#define FEATURE_ID_DS_FCLK 9
#define FEATURE_ID_DS_GFXCLK 10
#define FEATURE_ID_DS_LCLK 11
#define FEATURE_ID_DS_MP0CLK 12
#define FEATURE_ID_DS_MP1CLK 13
#define FEATURE_ID_DS_MPIOCLK 14
#define FEATURE_ID_DS_SOCCLK 15
#define FEATURE_ID_DS_VCN 16
#define FEATURE_ID_PPT 17
#define FEATURE_ID_TDC 18
#define FEATURE_ID_THERMAL 19
#define FEATURE_ID_SOC_PCC 20
#define FEATURE_ID_PROCHOT 21
#define FEATURE_ID_XVMIN0_VMIN_AID 22
#define FEATURE_ID_XVMIN1_DD_AID 23
#define FEATURE_ID_XVMIN0_VMIN_XCD 24
#define FEATURE_ID_XVMIN1_DD_XCD 25
#define FEATURE_ID_FW_CTF 26
#define FEATURE_ID_MGCG 27
#define FEATURE_ID_PSI7 28
#define FEATURE_ID_XGMI_PER_LINK_PWR_DOWN 29
#define FEATURE_ID_SOC_DC_RTC 30
#define FEATURE_ID_GFX_DC_RTC 31
#define FEATURE_ID_DVM_MIN_PSM 32
#define FEATURE_ID_PRC 33
#define FEATURE_ID_PSM_DIDT 34
#define FEATURE_ID_PIT 35
#define FEATURE_ID_DVO 36
#define FEATURE_ID_XVMIN_CLKSTOP_DS 37
#define FEATURE_ID_HBM_THROTTLE_CTRL 38
#define FEATURE_ID_DPM_GL2CLK 39
#define FEATURE_ID_GC_CAC_EDC 40
#define FEATURE_ID_DS_DMABECLK 41
#define FEATURE_ID_DS_MPIFOECLK 42
#define FEATURE_ID_DS_MPRASCLK 43
#define FEATURE_ID_DS_MPNHTCLK 44
#define FEATURE_ID_DS_FIOCLK 45
#define FEATURE_ID_DS_DXIOCLK 46
#define FEATURE_ID_PCC 47
#define FEATURE_ID_OCP 48
#define FEATURE_ID_TRO 49
#define FEATURE_ID_GL2_CAC_EDC 50
#define FEATURE_ID_SPARE_51 51
#define FEATURE_ID_GL2_CGCG 52
#define FEATURE_ID_XCAC 53
#define FEATURE_ID_DS_GL2CLK 54
#define FEATURE_ID_FCS_VIN_PCC 55
#define FEATURE_ID_FCS_VDDX_OCP_WARN 56
#define FEATURE_ID_FCS_PWRBRK 57
#define FEATURE_ID_DF_CSTATE 58
#define FEATURE_ID_ARO 59
#define FEATURE_ID_PS_PsPowerLimit 60
#define FEATURE_ID_PS_PsPowerFloor 61
#define FEATURE_ID_OCPWARNRC 62
#define FEATURE_ID_XGMI_FOLDING 63
#define FEATURE_ID_SMU_CG 64
#define NUM_FEATURES 65
//MGCG Feature ID List
#define WAFL_CG 0
#define SMU_FUSE_CG_DEEPSLEEP 1
#define SMUIO_CG 2
#define RSMU_MGCG 3
#define SMU_CLK_MGCG 4
#define MP5_CG 5
#define UMC_CG 6
#define WAFL0_CLK 7
#define WAFL1_CLK 8
#define VCN_MGCG 9
#define GL2_MGCG 10
#define MGCG_NUM_FEATURES 11
/* enum for MPIO PCIe gen speed msgs */
typedef enum {
PCIE_LINK_SPEED_INDEX_TABLE_GEN1,
PCIE_LINK_SPEED_INDEX_TABLE_GEN2,
PCIE_LINK_SPEED_INDEX_TABLE_GEN3,
PCIE_LINK_SPEED_INDEX_TABLE_GEN4,
PCIE_LINK_SPEED_INDEX_TABLE_GEN5,
PCIE_LINK_SPEED_INDEX_TABLE_GEN6,
PCIE_LINK_SPEED_INDEX_TABLE_GEN6_ESM,
PCIE_LINK_SPEED_INDEX_TABLE_COUNT
} PCIE_LINK_SPEED_INDEX_TABLE_e;
typedef enum {
GFX_GUARDBAND_OFFSET_0,
GFX_GUARDBAND_OFFSET_1,
GFX_GUARDBAND_OFFSET_2,
GFX_GUARDBAND_OFFSET_3,
GFX_GUARDBAND_OFFSET_4,
GFX_GUARDBAND_OFFSET_5,
GFX_GUARDBAND_OFFSET_6,
GFX_GUARDBAND_OFFSET_7,
GFX_GUARDBAND_OFFSET_COUNT
} GFX_GUARDBAND_OFFSET_e;
typedef enum {
GFX_DVM_MARGINHI_0,
GFX_DVM_MARGINHI_1,
GFX_DVM_MARGINHI_2,
GFX_DVM_MARGINHI_3,
GFX_DVM_MARGINHI_4,
GFX_DVM_MARGINHI_5,
GFX_DVM_MARGINHI_6,
GFX_DVM_MARGINHI_7,
GFX_DVM_MARGINLO_0,
GFX_DVM_MARGINLO_1,
GFX_DVM_MARGINLO_2,
GFX_DVM_MARGINLO_3,
GFX_DVM_MARGINLO_4,
GFX_DVM_MARGINLO_5,
GFX_DVM_MARGINLO_6,
GFX_DVM_MARGINLO_7,
GFX_DVM_MARGIN_COUNT
} GFX_DVM_MARGIN_e;
typedef enum{
SYSTEM_TEMP_UBB_FPGA,
SYSTEM_TEMP_UBB_FRONT,
SYSTEM_TEMP_UBB_BACK,
SYSTEM_TEMP_UBB_OAM7,
SYSTEM_TEMP_UBB_IBC,
SYSTEM_TEMP_UBB_UFPGA,
SYSTEM_TEMP_UBB_OAM1,
SYSTEM_TEMP_OAM_0_1_HSC,
SYSTEM_TEMP_OAM_2_3_HSC,
SYSTEM_TEMP_OAM_4_5_HSC,
SYSTEM_TEMP_OAM_6_7_HSC,
SYSTEM_TEMP_UBB_FPGA_0V72_VR,
SYSTEM_TEMP_UBB_FPGA_3V3_VR,
SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR,
SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR,
SYSTEM_TEMP_RETIMER_0_1_0V9_VR,
SYSTEM_TEMP_RETIMER_4_5_0V9_VR,
SYSTEM_TEMP_RETIMER_2_3_0V9_VR,
SYSTEM_TEMP_RETIMER_6_7_0V9_VR,
SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR,
SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR,
SYSTEM_TEMP_IBC_HSC,
SYSTEM_TEMP_IBC,
SYSTEM_TEMP_MAX_ENTRIES = 32
} SYSTEM_TEMP_e;
typedef enum{
NODE_TEMP_RETIMER,
NODE_TEMP_IBC_TEMP,
NODE_TEMP_IBC_2_TEMP,
NODE_TEMP_VDD18_VR_TEMP,
NODE_TEMP_04_HBM_B_VR_TEMP,
NODE_TEMP_04_HBM_D_VR_TEMP,
NODE_TEMP_MAX_TEMP_ENTRIES = 12
} NODE_TEMP_e;
typedef enum {
SVI_PLANE_VDDCR_X0_TEMP,
SVI_PLANE_VDDCR_X1_TEMP,
SVI_PLANE_VDDIO_HBM_B_TEMP,
SVI_PLANE_VDDIO_HBM_D_TEMP,
SVI_PLANE_VDDIO_04_HBM_B_TEMP,
SVI_PLANE_VDDIO_04_HBM_D_TEMP,
SVI_PLANE_VDDCR_HBM_B_TEMP,
SVI_PLANE_VDDCR_HBM_D_TEMP,
SVI_PLANE_VDDCR_075_HBM_B_TEMP,
SVI_PLANE_VDDCR_075_HBM_D_TEMP,
SVI_PLANE_VDDIO_11_GTA_A_TEMP,
SVI_PLANE_VDDIO_11_GTA_C_TEMP,
SVI_PLANE_VDDAN_075_GTA_A_TEMP,
SVI_PLANE_VDDAN_075_GTA_C_TEMP,
SVI_PLANE_VDDCR_075_UCIE_TEMP,
SVI_PLANE_VDDIO_065_UCIEAA_TEMP,
SVI_PLANE_VDDIO_065_UCIEAM_A_TEMP,
SVI_PLANE_VDDIO_065_UCIEAM_C_TEMP,
SVI_PLANE_VDDCR_SOCIO_A_TEMP,
SVI_PLANE_VDDCR_SOCIO_C_TEMP,
SVI_PLANE_VDDAN_075_TEMP,
SVI_MAX_TEMP_ENTRIES, //22
} SVI_TEMP_e;
typedef enum{
SYSTEM_POWER_UBB_POWER,
SYSTEM_POWER_UBB_POWER_THRESHOLD,
SYSTEM_POWER_MAX_ENTRIES_WO_RESERVED,
SYSTEM_POWER_MAX_ENTRIES = 4
} SYSTEM_POWER_e;
#define SMU_METRICS_TABLE_VERSION 0xF
typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccumulationCounter;
//TEMPERATURE
uint32_t MaxSocketTemperature;
uint32_t MaxVrTemperature;
uint32_t HbmTemperature[12];
uint64_t MaxSocketTemperatureAcc;
uint64_t MaxVrTemperatureAcc;
uint64_t HbmTemperatureAcc[12];
uint32_t MidTemperature[2];
uint32_t AidTemperature[2];
uint32_t XcdTemperature[8];
//POWER
uint32_t SocketPowerLimit;
uint32_t SocketPower;
//ENERGY
uint64_t Timestamp;
uint64_t SocketEnergyAcc;
uint64_t HbmEnergyAcc;
//FREQUENCY
uint32_t GfxclkFrequencyLimit;
uint32_t FclkFrequency[2];
uint32_t UclkFrequency[2];
uint64_t GfxclkFrequencyAcc[8];
uint32_t GfxclkFrequency[8];
uint32_t SocclkFrequency[2];
uint32_t VclkFrequency[4];
uint32_t DclkFrequency[4];
uint32_t LclkFrequency[2];
//XGMI:
uint32_t XgmiWidth;
uint32_t XgmiBitrate;
uint64_t XgmiReadBandwidthAcc;
uint64_t XgmiWriteBandwidthAcc;
//ACTIVITY:
uint32_t SocketGfxBusy;
uint32_t DramBandwidthUtilization;
uint64_t SocketGfxBusyAcc;
uint64_t DramBandwidthAcc;
uint32_t MaxDramBandwidth;
uint64_t DramBandwidthUtilizationAcc;
uint64_t PcieBandwidthAcc[2];
//THROTTLERS
uint64_t ProchotResidencyAcc;
uint64_t PptResidencyAcc;
uint64_t SocketThmResidencyAcc;
uint64_t VrThmResidencyAcc;
uint64_t HbmThmResidencyAcc;
//PCIE BW Data and error count
uint32_t PcieBandwidth[2];
uint64_t PCIeL0ToRecoveryCountAcc;
uint64_t PCIenReplayAAcc;
uint64_t PCIenReplayARolloverCountAcc;
uint64_t PCIeNAKSentCountAcc;
uint64_t PCIeNAKReceivedCountAcc;
uint64_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
// VCN/JPEG ACTIVITY
uint32_t VcnBusy[4];
uint32_t JpegBusy[40];
// PCIE LINK Speed and width
uint32_t PCIeLinkSpeed;
uint32_t PCIeLinkWidth;
// PER XCD ACTIVITY
uint32_t GfxBusy[8];
uint64_t GfxBusyAcc[8];
//NVML-Parity: Total App Clock Counter
uint64_t GfxclkBelowHostLimitPptAcc[8];
uint64_t GfxclkBelowHostLimitThmAcc[8];
uint64_t GfxclkBelowHostLimitTotalAcc[8];
uint64_t GfxclkLowUtilizationAcc[8];
} MetricsTable_t;
#define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1
#pragma pack(push, 4)
typedef struct {
uint64_t AccumulationCounter; // Last update timestamp
uint16_t LabelVersion; //Defaults to 0.
uint16_t NodeIdentifier;
int16_t SystemTemperatures[SYSTEM_TEMP_MAX_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
int16_t NodeTemperatures[NODE_TEMP_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
int16_t VrTemperatures[SVI_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, 13 entries,
int16_t spare[7];
//NPM: NODE POWER MANAGEMENT
uint32_t NodePowerLimit;
uint32_t NodePower;
uint32_t GlobalPPTResidencyAcc;
uint16_t SystemPower[SYSTEM_POWER_MAX_ENTRIES]; // UBB Current Power and Power Threshold
} SystemMetricsTable_t;
#pragma pack(pop)
#define SMU_VF_METRICS_TABLE_VERSION 0x5
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
uint32_t InstGfxclk_TargFreq;
uint64_t AccGfxclk_TargFreq;
uint64_t AccGfxRsmuDpm_Busy;
uint64_t AccGfxclkBelowHostLimit;
} VfMetricsTable_t;
/* FRU product information */
typedef struct __attribute__((aligned(4))) {
uint8_t ModelNumber[PRODUCT_MODEL_NUMBER_LEN];
uint8_t Name[PRODUCT_NAME_LEN];
uint8_t Serial[PRODUCT_SERIAL_LEN];
uint8_t ManufacturerName[PRODUCT_MANUFACTURER_NAME_LEN];
uint8_t FruId[PRODUCT_FRU_ID_LEN];
} FRUProductInfo_t;
#define SMU_STATIC_METRICS_TABLE_VERSION 0x1
#pragma pack(push, 4)
typedef struct {
//FRU PRODUCT INFO
FRUProductInfo_t ProductInfo; //from i2c
//POWER
uint32_t MaxSocketPowerLimit;
//FREQUENCY RANGE
uint32_t MaxGfxclkFrequency;
uint32_t MinGfxclkFrequency;
uint32_t MaxFclkFrequency;
uint32_t MinFclkFrequency;
uint32_t MaxGl2clkFrequency;
uint32_t MinGl2clkFrequency;
uint32_t UclkFrequencyTable[4];
uint32_t SocclkFrequency;
uint32_t LclkFrequency;
uint32_t VclkFrequency;
uint32_t DclkFrequency;
//CTF limits
uint32_t CTFLimit_MID;
uint32_t CTFLimit_AID;
uint32_t CTFLimit_XCD;
uint32_t CTFLimit_HBM;
//Thermal Throttling limits
uint32_t ThermalLimit_MID;
uint32_t ThermalLimit_AID;
uint32_t ThermalLimit_XCD;
uint32_t ThermalLimit_HBM;
//PSNs
uint64_t PublicSerialNumber_MID[2];
uint64_t PublicSerialNumber_AID[2];
uint64_t PublicSerialNumber_XCD[8];
//XGMI
uint32_t MaxXgmiWidth;
uint32_t MaxXgmiBitrate;
// Telemetry
uint32_t InputTelemetryVoltageInmV;
// General info
uint32_t pldmVersion[2];
uint32_t PPT1Max;
uint32_t PPT1Min;
uint32_t PPT1Default;
} StaticMetricsTable_t;
#pragma pack(pop)
#endif

View File

@@ -0,0 +1,100 @@
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef SMU_15_0_8_PPSMC_H
#define SMU_15_0_8_PPSMC_H
/* SMU Response Codes */
#define PPSMC_Result_OK 0x1
#define PPSMC_Result_Failed 0xFF
#define PPSMC_Result_UnknownCmd 0xFE
#define PPSMC_Result_CmdRejectedPrereq 0xFD
#define PPSMC_Result_CmdRejectedBusy 0xFC
/* Message Definitions */
#define PPSMC_MSG_TestMessage 0x1
#define PPSMC_MSG_GetSmuVersion 0x2
#define PPSMC_MSG_GfxDriverReset 0x3
#define PPSMC_MSG_GetDriverIfVersion 0x4
#define PPSMC_MSG_EnableAllSmuFeatures 0x5
#define PPSMC_MSG_GetMetricsVersion 0x6
#define PPSMC_MSG_GetMetricsTable 0x7
#define PPSMC_MSG_GetEnabledSmuFeatures 0x8
#define PPSMC_MSG_SetDriverDramAddr 0x9 //ARG0: low address, ARG1: high address
#define PPSMC_MSG_SetToolsDramAddr 0xA //ARG0: low address, ARG1: high address
//#define PPSMC_MSG_SetSystemVirtualDramAddr 0xB
#define PPSMC_MSG_SetSoftMaxByFreq 0xC
#define PPSMC_MSG_SetPptLimit 0xD
#define PPSMC_MSG_GetPptLimit 0xE
#define PPSMC_MSG_DramLogSetDramAddr 0xF //ARG0: low address, ARG1: high address, ARG2: size
#define PPSMC_MSG_HeavySBR 0x10
#define PPSMC_MSG_DFCstateControl 0x11
#define PPSMC_MSG_GfxDriverResetRecovery 0x12
#define PPSMC_MSG_TriggerVFFLR 0x13
#define PPSMC_MSG_SetSoftMinGfxClk 0x14
#define PPSMC_MSG_SetSoftMaxGfxClk 0x15
#define PPSMC_MSG_PrepareForDriverUnload 0x16
#define PPSMC_MSG_QueryValidMcaCount 0x17
#define PPSMC_MSG_McaBankDumpDW 0x18
#define PPSMC_MSG_ClearMcaOnRead 0x19
#define PPSMC_MSG_QueryValidMcaCeCount 0x1A
#define PPSMC_MSG_McaBankCeDumpDW 0x1B
#define PPSMC_MSG_SelectPLPDMode 0x1C
#define PPSMC_MSG_SetThrottlingPolicy 0x1D
#define PPSMC_MSG_ResetSDMA 0x1E
#define PPSMC_MSG_GetRasTableVersion 0x1F
#define PPSMC_MSG_GetRmaStatus 0x20
#define PPSMC_MSG_GetBadPageCount 0x21
#define PPSMC_MSG_GetBadPageMcaAddress 0x22
#define PPSMC_MSG_GetBadPagePaAddress 0x23
#define PPSMC_MSG_SetTimestamp 0x24
#define PPSMC_MSG_GetTimestamp 0x25
#define PPSMC_MSG_GetRasPolicy 0x26
#define PPSMC_MSG_GetBadPageIpIdLoHi 0x27
#define PPSMC_MSG_EraseRasTable 0x28
#define PPSMC_MSG_GetStaticMetricsTable 0x29
#define PPSMC_MSG_ResetVfArbitersByIndex 0x2A
#define PPSMC_MSG_GetBadPageSeverity 0x2B
#define PPSMC_MSG_GetSystemMetricsTable 0x2C
#define PPSMC_MSG_GetSystemMetricsVersion 0x2D
#define PPSMC_MSG_ResetVCN 0x2E
#define PPSMC_MSG_SetFastPptLimit 0x2F
#define PPSMC_MSG_GetFastPptLimit 0x30
#define PPSMC_MSG_SetSoftMinGl2clk 0x31
#define PPSMC_MSG_SetSoftMaxGl2clk 0x32
#define PPSMC_MSG_SetSoftMinFclk 0x33
#define PPSMC_MSG_SetSoftMaxFclk 0x34
#define PPSMC_Message_Count 0x35
/* PSMC Reset Types for driver msg argument */
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2
#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3
/* PLPD modes */
#define PPSMC_PLPD_MODE_DEFAULT 0x1
#define PPSMC_PLPD_MODE_OPTIMIZED 0x2
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_MSG;
#endif

View File

@@ -42,8 +42,10 @@
__SMU_DUMMY_MAP(SetPptLimit), \
__SMU_DUMMY_MAP(SetDriverDramAddrHigh), \
__SMU_DUMMY_MAP(SetDriverDramAddrLow), \
__SMU_DUMMY_MAP(SetDriverDramAddr), \
__SMU_DUMMY_MAP(SetToolsDramAddrHigh), \
__SMU_DUMMY_MAP(SetToolsDramAddrLow), \
__SMU_DUMMY_MAP(SetToolsDramAddr), \
__SMU_DUMMY_MAP(TransferTableSmu2Dram), \
__SMU_DUMMY_MAP(TransferTableDram2Smu), \
__SMU_DUMMY_MAP(UseDefaultPPTable), \
@@ -292,7 +294,12 @@
__SMU_DUMMY_MAP(AllowZstates), \
__SMU_DUMMY_MAP(GetSmartShiftStatus), \
__SMU_DUMMY_MAP(EnableLSdma), \
__SMU_DUMMY_MAP(DisableLSdma),
__SMU_DUMMY_MAP(DisableLSdma), \
__SMU_DUMMY_MAP(InitializeGfx), \
__SMU_DUMMY_MAP(SetSoftMaxFclk), \
__SMU_DUMMY_MAP(SetSoftMaxGl2clk), \
__SMU_DUMMY_MAP(SetSoftMinGl2clk), \
__SMU_DUMMY_MAP(GetSystemMetricsVersion),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -324,6 +331,7 @@ enum smu_clk_type {
SMU_OD_CCLK,
SMU_OD_SCLK,
SMU_OD_MCLK,
SMU_OD_FCLK,
SMU_OD_VDDC_CURVE,
SMU_OD_RANGE,
SMU_OD_VDDGFX_OFFSET,
@@ -334,6 +342,7 @@ enum smu_clk_type {
SMU_OD_FAN_MINIMUM_PWM,
SMU_OD_FAN_ZERO_RPM_ENABLE,
SMU_OD_FAN_ZERO_RPM_STOP_TEMP,
SMU_GL2CLK,
SMU_CLK_COUNT,
};
@@ -472,6 +481,14 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \
__SMU_DUMMY_MAP(FAN_ABNORMAL), \
__SMU_DUMMY_MAP(PIT), \
__SMU_DUMMY_MAP(DS_DMABECLK), \
__SMU_DUMMY_MAP(DS_MPIFOECLK), \
__SMU_DUMMY_MAP(DS_MPRASCLK), \
__SMU_DUMMY_MAP(DS_MPNHTCLK), \
__SMU_DUMMY_MAP(DS_FIOCLK), \
__SMU_DUMMY_MAP(DS_DXIOCLK), \
__SMU_DUMMY_MAP(DS_GL2CLK), \
__SMU_DUMMY_MAP(DPM_GL2CLK), \
__SMU_DUMMY_MAP(HROM_EN),
#undef __SMU_DUMMY_MAP

View File

@@ -41,7 +41,10 @@
#define smnMP1_FIRMWARE_FLAGS 0x3010024
#define smnMP1_PUB_CTRL 0x3010d10
#define MAX_DPM_LEVELS 16
#define SMU15_DRIVER_IF_VERSION_SMU_V15_0_8 0x007D0000
#define FEATURE_MASK(feature) (1ULL << feature)
#define MAX_PCIE_CONF 3
#define SMU15_TOOL_SIZE 0x19000
@@ -65,45 +68,28 @@ struct smu_15_0_max_sustainable_clocks {
uint32_t soc_clock;
};
struct smu_15_0_dpm_clk_level {
bool enabled;
uint32_t value;
};
struct smu_15_0_dpm_table {
uint32_t min; /* MHz */
uint32_t max; /* MHz */
uint32_t count;
bool is_fine_grained;
struct smu_15_0_dpm_clk_level dpm_levels[MAX_DPM_LEVELS];
};
struct smu_15_0_pcie_table {
uint8_t pcie_gen[MAX_PCIE_CONF];
uint8_t pcie_lane[MAX_PCIE_CONF];
uint16_t clk_freq[MAX_PCIE_CONF];
uint32_t num_of_link_levels;
};
struct smu_15_0_dpm_tables {
struct smu_15_0_dpm_table soc_table;
struct smu_15_0_dpm_table gfx_table;
struct smu_15_0_dpm_table uclk_table;
struct smu_15_0_dpm_table eclk_table;
struct smu_15_0_dpm_table vclk_table;
struct smu_15_0_dpm_table dclk_table;
struct smu_15_0_dpm_table dcef_table;
struct smu_15_0_dpm_table pixel_table;
struct smu_15_0_dpm_table display_table;
struct smu_15_0_dpm_table phy_table;
struct smu_15_0_dpm_table fclk_table;
struct smu_15_0_pcie_table pcie_table;
struct smu_dpm_table soc_table;
struct smu_dpm_table gfx_table;
struct smu_dpm_table uclk_table;
struct smu_dpm_table eclk_table;
struct smu_dpm_table vclk_table;
struct smu_dpm_table dclk_table;
struct smu_dpm_table dcef_table;
struct smu_dpm_table pixel_table;
struct smu_dpm_table display_table;
struct smu_dpm_table phy_table;
struct smu_dpm_table fclk_table;
struct smu_pcie_table pcie_table;
struct smu_dpm_table gl2_table;
};
struct smu_15_0_dpm_context {
struct smu_15_0_dpm_tables dpm_tables;
uint32_t workload_policy_mask;
uint32_t dcef_min_ds_clk;
uint64_t caps;
uint32_t board_volt;
};
enum smu_15_0_power_state {
@@ -118,6 +104,7 @@ struct smu_15_0_power_context {
uint32_t power_source;
uint8_t in_power_limit_boost_mode;
enum smu_15_0_power_state power_state;
atomic_t throttle_status;
};
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
@@ -197,7 +184,7 @@ int smu_v15_0_set_power_source(struct smu_context *smu,
int smu_v15_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_15_0_dpm_table *single_dpm_table);
struct smu_dpm_table *single_dpm_table);
int smu_v15_0_gfx_ulv_control(struct smu_context *smu,
bool enablement);

View File

@@ -262,7 +262,6 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_info(smu->adev->dev, "SMU driver if version not matched\n");
}
return ret;

View File

@@ -101,7 +101,6 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_info(smu->adev->dev, "SMU driver if version not matched\n");
}
return ret;

View File

@@ -2466,4 +2466,6 @@ void smu_v13_0_reset_custom_level(struct smu_context *smu)
pstate_table->uclk_pstate.custom.max = 0;
pstate_table->gfxclk_pstate.custom.min = 0;
pstate_table->gfxclk_pstate.custom.max = 0;
pstate_table->fclk_pstate.custom.min = 0;
pstate_table->fclk_pstate.custom.max = 0;
}

View File

@@ -59,6 +59,10 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu,
int od_feature_bit,
int32_t *min, int32_t *max);
static const struct smu_feature_bits smu_v13_0_0_dpm_features = {
.bits = {
SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
@@ -1043,8 +1047,35 @@ static bool smu_v13_0_0_is_od_feature_supported(struct smu_context *smu,
PPTable_t *pptable = smu->smu_table.driver_pptable;
const OverDriveLimits_t * const overdrive_upperlimits =
&pptable->SkuTable.OverDriveLimitsBasicMax;
int32_t min_value, max_value;
bool feature_enabled;
return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
switch (od_feature_bit) {
case PP_OD_FEATURE_FAN_CURVE_BIT:
feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
if (feature_enabled) {
smu_v13_0_0_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_TEMP,
&min_value, &max_value);
if (!min_value && !max_value) {
feature_enabled = false;
goto out;
}
smu_v13_0_0_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_PWM,
&min_value, &max_value);
if (!min_value && !max_value) {
feature_enabled = false;
goto out;
}
}
break;
default:
feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
break;
}
out:
return feature_enabled;
}
static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu,

View File

@@ -461,6 +461,7 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
if ((pgm == 0 && fw_ver >= 0x00558200) ||
(pgm == 4 && fw_ver >= 0x04557100) ||
(pgm == 7 && fw_ver >= 0x07551400))
smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
}
@@ -1199,6 +1200,7 @@ static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
pstate_table->gfxclk_pstate.min = SMU_DPM_TABLE_MIN(gfx_table);
@@ -1216,6 +1218,12 @@ static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
pstate_table->socclk_pstate.curr.min = SMU_DPM_TABLE_MIN(soc_table);
pstate_table->socclk_pstate.curr.max = SMU_DPM_TABLE_MAX(soc_table);
pstate_table->fclk_pstate.min = SMU_DPM_TABLE_MIN(fclk_table);
pstate_table->fclk_pstate.peak = SMU_DPM_TABLE_MAX(fclk_table);
pstate_table->fclk_pstate.curr.min = SMU_DPM_TABLE_MIN(fclk_table);
pstate_table->fclk_pstate.curr.max = SMU_DPM_TABLE_MAX(fclk_table);
pstate_table->fclk_pstate.standard = SMU_DPM_TABLE_MIN(fclk_table);
if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL &&
mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL &&
soc_table->count > SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL) {
@@ -1394,14 +1402,22 @@ static int smu_v13_0_6_emit_clk_levels(struct smu_context *smu,
break;
case SMU_OD_MCLK:
if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SET_UCLK_MAX)))
return 0;
return -EOPNOTSUPP;
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
pstate_table->uclk_pstate.curr.min,
pstate_table->uclk_pstate.curr.max);
break;
case SMU_OD_FCLK:
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT))
return -EOPNOTSUPP;
size += sysfs_emit_at(buf, size, "%s:\n", "OD_FCLK");
size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
pstate_table->fclk_pstate.curr.min,
pstate_table->fclk_pstate.curr.max);
break;
case SMU_SCLK:
case SMU_GFXCLK:
single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
@@ -2043,7 +2059,7 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
int ret = 0;
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK &&
clk_type != SMU_UCLK)
clk_type != SMU_UCLK && clk_type != SMU_FCLK)
return -EINVAL;
if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
@@ -2084,6 +2100,15 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
pstate_table->uclk_pstate.curr.max = max;
}
if (clk_type == SMU_FCLK) {
if (max == pstate_table->fclk_pstate.curr.max)
return 0;
ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_FCLK, 0, max, false);
if (!ret)
pstate_table->fclk_pstate.curr.max = max;
}
return ret;
}
@@ -2125,6 +2150,8 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
struct smu_dpm_table *uclk_table = &dpm_context->dpm_tables.uclk_table;
struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
uint32_t min_clk;
uint32_t max_clk;
@@ -2205,6 +2232,40 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
pstate_table->uclk_pstate.custom.max = input[1];
}
break;
case PP_OD_EDIT_FCLK_TABLE:
if (size != 2) {
dev_err(smu->adev->dev,
"Input parameter number not correct\n");
return -EINVAL;
}
if (!smu_cmn_feature_is_enabled(smu,
SMU_FEATURE_DPM_FCLK_BIT)) {
dev_warn(smu->adev->dev,
"FCLK limits setting not supported!\n");
return -EOPNOTSUPP;
}
max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.fclk_table);
if (input[0] == 0) {
dev_info(smu->adev->dev,
"Setting min FCLK level is not supported\n");
return -EOPNOTSUPP;
} else if (input[0] == 1) {
if (input[1] > max_clk) {
dev_warn(smu->adev->dev,
"Maximum FCLK (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
input[1], max_clk);
pstate_table->fclk_pstate.custom.max =
pstate_table->fclk_pstate.curr.max;
return -EINVAL;
}
pstate_table->fclk_pstate.custom.max = input[1];
} else {
return -EINVAL;
}
break;
case PP_OD_RESTORE_DEFAULT_TABLE:
if (size != 0) {
@@ -2224,14 +2285,27 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
if (ret)
return ret;
min_clk = SMU_DPM_TABLE_MIN(
&dpm_context->dpm_tables.uclk_table);
max_clk = SMU_DPM_TABLE_MAX(
&dpm_context->dpm_tables.uclk_table);
ret = smu_v13_0_6_set_soft_freq_limited_range(
smu, SMU_UCLK, min_clk, max_clk, false);
if (ret)
return ret;
if (SMU_DPM_TABLE_MAX(uclk_table) !=
pstate_table->uclk_pstate.curr.max) {
min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.uclk_table);
max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.uclk_table);
ret = smu_v13_0_6_set_soft_freq_limited_range(smu,
SMU_UCLK, min_clk,
max_clk, false);
if (ret)
return ret;
}
if (SMU_DPM_TABLE_MAX(fclk_table) !=
pstate_table->fclk_pstate.curr.max) {
max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.fclk_table);
min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.fclk_table);
ret = smu_v13_0_6_set_soft_freq_limited_range(smu,
SMU_FCLK, min_clk,
max_clk, false);
if (ret)
return ret;
}
smu_v13_0_reset_custom_level(smu);
}
break;
@@ -2258,6 +2332,16 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
if (ret)
return ret;
if (pstate_table->fclk_pstate.custom.max) {
min_clk = pstate_table->fclk_pstate.curr.min;
max_clk = pstate_table->fclk_pstate.custom.max;
ret = smu_v13_0_6_set_soft_freq_limited_range(smu,
SMU_FCLK, min_clk,
max_clk, false);
if (ret)
return ret;
}
if (!pstate_table->uclk_pstate.custom.max)
return 0;

View File

@@ -59,6 +59,10 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu,
int od_feature_bit,
int32_t *min, int32_t *max);
static const struct smu_feature_bits smu_v13_0_7_dpm_features = {
.bits = {
SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
@@ -1053,8 +1057,35 @@ static bool smu_v13_0_7_is_od_feature_supported(struct smu_context *smu,
PPTable_t *pptable = smu->smu_table.driver_pptable;
const OverDriveLimits_t * const overdrive_upperlimits =
&pptable->SkuTable.OverDriveLimitsBasicMax;
int32_t min_value, max_value;
bool feature_enabled;
return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
switch (od_feature_bit) {
case PP_OD_FEATURE_FAN_CURVE_BIT:
feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
if (feature_enabled) {
smu_v13_0_7_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_TEMP,
&min_value, &max_value);
if (!min_value && !max_value) {
feature_enabled = false;
goto out;
}
smu_v13_0_7_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_PWM,
&min_value, &max_value);
if (!min_value && !max_value) {
feature_enabled = false;
goto out;
}
}
break;
default:
feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
break;
}
out:
return feature_enabled;
}
static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu,

View File

@@ -284,7 +284,6 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_info(adev->dev, "SMU driver if version not matched\n");
}
return ret;

View File

@@ -56,6 +56,10 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
int od_feature_bit,
int32_t *min, int32_t *max);
static const struct smu_feature_bits smu_v14_0_2_dpm_features = {
.bits = { SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT),
SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT),
@@ -922,8 +926,35 @@ static bool smu_v14_0_2_is_od_feature_supported(struct smu_context *smu,
PPTable_t *pptable = smu->smu_table.driver_pptable;
const OverDriveLimits_t * const overdrive_upperlimits =
&pptable->SkuTable.OverDriveLimitsBasicMax;
int32_t min_value, max_value;
bool feature_enabled;
return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
switch (od_feature_bit) {
case PP_OD_FEATURE_FAN_CURVE_BIT:
feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
if (feature_enabled) {
smu_v14_0_2_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_TEMP,
&min_value, &max_value);
if (!min_value && !max_value) {
feature_enabled = false;
goto out;
}
smu_v14_0_2_get_od_setting_limits(smu, PP_OD_FEATURE_FAN_CURVE_PWM,
&min_value, &max_value);
if (!min_value && !max_value) {
feature_enabled = false;
goto out;
}
}
break;
default:
feature_enabled = !!(overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit));
break;
}
out:
return feature_enabled;
}
static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,

View File

@@ -23,7 +23,7 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
SMU15_MGR = smu_v15_0.o smu_v15_0_0_ppt.o
SMU15_MGR = smu_v15_0.o smu_v15_0_0_ppt.o smu_v15_0_8_ppt.o
AMD_SWSMU_SMU15MGR = $(addprefix $(AMD_SWSMU_PATH)/smu15/,$(SMU15_MGR))

View File

@@ -589,71 +589,52 @@ int smu_v15_0_notify_memory_pool_location(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *memory_pool = &smu_table->memory_pool;
int ret = 0;
uint64_t address;
uint32_t address_low, address_high;
struct smu_msg_args args = {
.msg = SMU_MSG_DramLogSetDramAddr,
.num_args = 3,
.num_out_args = 0,
};
if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
return ret;
return 0;
address = memory_pool->mc_address;
address_high = (uint32_t)upper_32_bits(address);
address_low = (uint32_t)lower_32_bits(address);
/* SMU_MSG_DramLogSetDramAddr: ARG0=low, ARG1=high, ARG2=size */
args.args[0] = lower_32_bits(memory_pool->mc_address);
args.args[1] = upper_32_bits(memory_pool->mc_address);
args.args[2] = (u32)memory_pool->size;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
address_high, NULL);
if (ret)
return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
address_low, NULL);
if (ret)
return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
(uint32_t)memory_pool->size, NULL);
if (ret)
return ret;
return ret;
return smu->msg_ctl.ops->send_msg(&smu->msg_ctl, &args);
}
int smu_v15_0_set_driver_table_location(struct smu_context *smu)
{
struct smu_table *driver_table = &smu->smu_table.driver_table;
int ret = 0;
struct smu_msg_args args = {
.msg = SMU_MSG_SetDriverDramAddr,
.num_args = 2,
.num_out_args = 0,
};
if (driver_table->mc_address) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
upper_32_bits(driver_table->mc_address),
NULL);
if (!ret)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
lower_32_bits(driver_table->mc_address),
NULL);
}
args.args[0] = lower_32_bits(driver_table->mc_address);
args.args[1] = upper_32_bits(driver_table->mc_address);
return ret;
return smu->msg_ctl.ops->send_msg(&smu->msg_ctl, &args);
}
int smu_v15_0_set_tool_table_location(struct smu_context *smu)
{
int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
struct smu_msg_args args = {
.msg = SMU_MSG_SetToolsDramAddr,
.num_args = 2,
.num_out_args = 0,
};
if (tool_table->mc_address) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh,
upper_32_bits(tool_table->mc_address),
NULL);
if (!ret)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrLow,
lower_32_bits(tool_table->mc_address),
NULL);
}
/* SMU_MSG_SetToolsDramAddr: ARG0=low, ARG1=high */
args.args[0] = lower_32_bits(tool_table->mc_address);
args.args[1] = upper_32_bits(tool_table->mc_address);
return ret;
return smu->msg_ctl.ops->send_msg(&smu->msg_ctl, &args);
}
int smu_v15_0_set_allowed_mask(struct smu_context *smu)
@@ -700,8 +681,7 @@ int smu_v15_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
int smu_v15_0_system_features_control(struct smu_context *smu,
bool en)
int smu_v15_0_system_features_control(struct smu_context *smu, bool en)
{
return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
SMU_MSG_DisableAllSmuFeatures), NULL);
@@ -905,7 +885,8 @@ static int smu_v15_0_wait_for_reset_complete(struct smu_context *smu,
return ret;
}
int smu_v15_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
int smu_v15_0_wait_for_event(struct smu_context *smu,
enum smu_event_type event,
uint64_t event_arg)
{
int ret = -EINVAL;
@@ -1077,18 +1058,12 @@ int smu_v15_0_set_performance_level(struct smu_context *smu,
{
struct smu_15_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
struct smu_15_0_dpm_table *gfx_table =
&dpm_context->dpm_tables.gfx_table;
struct smu_15_0_dpm_table *mem_table =
&dpm_context->dpm_tables.uclk_table;
struct smu_15_0_dpm_table *soc_table =
&dpm_context->dpm_tables.soc_table;
struct smu_15_0_dpm_table *vclk_table =
&dpm_context->dpm_tables.vclk_table;
struct smu_15_0_dpm_table *dclk_table =
&dpm_context->dpm_tables.dclk_table;
struct smu_15_0_dpm_table *fclk_table =
&dpm_context->dpm_tables.fclk_table;
struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
struct smu_dpm_table *vclk_table = &dpm_context->dpm_tables.vclk_table;
struct smu_dpm_table *dclk_table = &dpm_context->dpm_tables.dclk_table;
struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
struct amdgpu_device *adev = smu->adev;
@@ -1103,34 +1078,34 @@ int smu_v15_0_set_performance_level(struct smu_context *smu,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
sclk_min = sclk_max = gfx_table->max;
mclk_min = mclk_max = mem_table->max;
socclk_min = socclk_max = soc_table->max;
vclk_min = vclk_max = vclk_table->max;
dclk_min = dclk_max = dclk_table->max;
fclk_min = fclk_max = fclk_table->max;
sclk_min = sclk_max = SMU_DPM_TABLE_MAX(gfx_table);
mclk_min = mclk_max = SMU_DPM_TABLE_MAX(mem_table);
socclk_min = socclk_max = SMU_DPM_TABLE_MAX(soc_table);
vclk_min = vclk_max = SMU_DPM_TABLE_MAX(vclk_table);
dclk_min = dclk_max = SMU_DPM_TABLE_MAX(dclk_table);
fclk_min = fclk_max = SMU_DPM_TABLE_MAX(fclk_table);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
sclk_min = sclk_max = gfx_table->min;
mclk_min = mclk_max = mem_table->min;
socclk_min = socclk_max = soc_table->min;
vclk_min = vclk_max = vclk_table->min;
dclk_min = dclk_max = dclk_table->min;
fclk_min = fclk_max = fclk_table->min;
sclk_min = sclk_max = SMU_DPM_TABLE_MIN(gfx_table);
mclk_min = mclk_max = SMU_DPM_TABLE_MIN(mem_table);
socclk_min = socclk_max = SMU_DPM_TABLE_MIN(soc_table);
vclk_min = vclk_max = SMU_DPM_TABLE_MIN(vclk_table);
dclk_min = dclk_max = SMU_DPM_TABLE_MIN(dclk_table);
fclk_min = fclk_max = SMU_DPM_TABLE_MIN(fclk_table);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
sclk_min = gfx_table->min;
sclk_max = gfx_table->max;
mclk_min = mem_table->min;
mclk_max = mem_table->max;
socclk_min = soc_table->min;
socclk_max = soc_table->max;
vclk_min = vclk_table->min;
vclk_max = vclk_table->max;
dclk_min = dclk_table->min;
dclk_max = dclk_table->max;
fclk_min = fclk_table->min;
fclk_max = fclk_table->max;
sclk_min = SMU_DPM_TABLE_MIN(gfx_table);
sclk_max = SMU_DPM_TABLE_MAX(gfx_table);
mclk_min = SMU_DPM_TABLE_MIN(mem_table);
mclk_max = SMU_DPM_TABLE_MAX(mem_table);
socclk_min = SMU_DPM_TABLE_MIN(soc_table);
socclk_max = SMU_DPM_TABLE_MAX(soc_table);
vclk_min = SMU_DPM_TABLE_MIN(vclk_table);
vclk_max = SMU_DPM_TABLE_MAX(vclk_table);
dclk_min = SMU_DPM_TABLE_MIN(dclk_table);
dclk_max = SMU_DPM_TABLE_MAX(dclk_table);
fclk_min = SMU_DPM_TABLE_MIN(fclk_table);
fclk_max = SMU_DPM_TABLE_MAX(fclk_table);
auto_level = true;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
@@ -1352,10 +1327,11 @@ static int smu_v15_0_get_fine_grained_status(struct smu_context *smu,
int smu_v15_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_15_0_dpm_table *single_dpm_table)
struct smu_dpm_table *single_dpm_table)
{
int ret = 0;
uint32_t clk;
bool is_fine_grained;
int i;
ret = smu_v15_0_get_dpm_level_count(smu,
@@ -1368,12 +1344,15 @@ int smu_v15_0_set_single_dpm_table(struct smu_context *smu,
ret = smu_v15_0_get_fine_grained_status(smu,
clk_type,
&single_dpm_table->is_fine_grained);
&is_fine_grained);
if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
return ret;
}
if (is_fine_grained)
single_dpm_table->flags |= SMU_DPM_TABLE_FINE_GRAINED;
for (i = 0; i < single_dpm_table->count; i++) {
ret = smu_v15_0_get_dpm_freq_by_index(smu,
clk_type,
@@ -1386,11 +1365,6 @@ int smu_v15_0_set_single_dpm_table(struct smu_context *smu,
single_dpm_table->dpm_levels[i].value = clk;
single_dpm_table->dpm_levels[i].enabled = true;
if (i == 0)
single_dpm_table->min = clk;
else if (i == single_dpm_table->count - 1)
single_dpm_table->max = clk;
}
return 0;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,313 @@
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __SMU_15_0_8_PPT_H__
#define __SMU_15_0_8_PPT_H__
#define SMU_15_0_8_NUM_XGMI_LINKS 8
#define SMU_15_0_8_MAX_GFX_CLKS 8
#define SMU_15_0_8_MAX_CLKS 4
#define SMU_15_0_8_MAX_XCC 8
#define SMU_15_0_8_MAX_VCN 4
#define SMU_15_0_8_MAX_JPEG 40
#define SMU_15_0_8_MAX_AID 2
#define SMU_15_0_8_MAX_MID 2
#define SMU_15_0_8_MAX_HBM_STACKS 12
extern void smu_v15_0_8_set_ppt_funcs(struct smu_context *smu);
typedef struct {
uint32_t MaxSocketPowerLimit;
uint32_t MaxGfxclkFrequency;
uint32_t MinGfxclkFrequency;
uint32_t MaxFclkFrequency;
uint32_t MinFclkFrequency;
uint32_t MaxGl2clkFrequency;
uint32_t MinGl2clkFrequency;
uint32_t UclkFrequencyTable[4];
uint32_t SocclkFrequency;
uint32_t LclkFrequency;
uint32_t VclkFrequency;
uint32_t DclkFrequency;
uint32_t CTFLimitMID;
uint32_t CTFLimitAID;
uint32_t CTFLimitXCD;
uint32_t CTFLimitHBM;
uint32_t ThermalLimitMID;
uint32_t ThermalLimitAID;
uint32_t ThermalLimitXCD;
uint32_t ThermalLimitHBM;
uint64_t PublicSerialNumberMID;
uint64_t PublicSerialNumberAID;
uint64_t PublicSerialNumberXCD;
uint32_t PPT1Max;
uint32_t PPT1Min;
uint32_t PPT1Default;
bool init;
} PPTable_t;
#if defined(SWSMU_CODE_LAYER_L2)
#include "smu_cmn.h"
/* SMUv 15.0.8 GPU metrics*/
#define SMU_15_0_8_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
SMU_SCALAR(SMU_MATTR(TEMPERATURE_HOTSPOT), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(U16), temperature_hotspot); \
SMU_SCALAR(SMU_MATTR(TEMPERATURE_MEM), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(U16), temperature_mem); \
SMU_SCALAR(SMU_MATTR(TEMPERATURE_VRSOC), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(U16), temperature_vrsoc); \
SMU_ARRAY(SMU_MATTR(TEMPERATURE_HBM), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(U16), temperature_hbm, \
SMU_15_0_8_MAX_HBM_STACKS); \
SMU_ARRAY(SMU_MATTR(TEMPERATURE_MID), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(U16), temperature_mid, SMU_15_0_8_MAX_MID); \
SMU_ARRAY(SMU_MATTR(TEMPERATURE_AID), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(U16), temperature_aid, SMU_15_0_8_MAX_AID); \
SMU_ARRAY(SMU_MATTR(TEMPERATURE_XCD), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(U16), temperature_xcd, SMU_15_0_8_MAX_XCC); \
SMU_SCALAR(SMU_MATTR(CURR_SOCKET_POWER), SMU_MUNIT(POWER_1), \
SMU_MTYPE(U16), curr_socket_power); \
SMU_SCALAR(SMU_MATTR(AVERAGE_GFX_ACTIVITY), SMU_MUNIT(PERCENT), \
SMU_MTYPE(U16), average_gfx_activity); \
SMU_SCALAR(SMU_MATTR(AVERAGE_UMC_ACTIVITY), SMU_MUNIT(PERCENT), \
SMU_MTYPE(U16), average_umc_activity); \
SMU_SCALAR(SMU_MATTR(MEM_MAX_BANDWIDTH), SMU_MUNIT(BW_1), \
SMU_MTYPE(U64), mem_max_bandwidth); \
SMU_SCALAR(SMU_MATTR(ENERGY_ACCUMULATOR), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), energy_accumulator); \
SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1), \
SMU_MTYPE(U64), system_clock_counter); \
SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), accumulation_counter); \
SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), prochot_residency_acc); \
SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), ppt_residency_acc); \
SMU_SCALAR(SMU_MATTR(SOCKET_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), socket_thm_residency_acc); \
SMU_SCALAR(SMU_MATTR(VR_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), vr_thm_residency_acc); \
SMU_SCALAR(SMU_MATTR(HBM_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), hbm_thm_residency_acc); \
SMU_SCALAR(SMU_MATTR(GFXCLK_LOCK_STATUS), SMU_MUNIT(NONE), \
SMU_MTYPE(U32), gfxclk_lock_status); \
SMU_SCALAR(SMU_MATTR(PCIE_LINK_WIDTH), SMU_MUNIT(NONE), \
SMU_MTYPE(U16), pcie_link_width); \
SMU_SCALAR(SMU_MATTR(PCIE_LINK_SPEED), SMU_MUNIT(SPEED_2), \
SMU_MTYPE(U16), pcie_link_speed); \
SMU_SCALAR(SMU_MATTR(XGMI_LINK_WIDTH), SMU_MUNIT(NONE), \
SMU_MTYPE(U16), xgmi_link_width); \
SMU_SCALAR(SMU_MATTR(XGMI_LINK_SPEED), SMU_MUNIT(SPEED_1), \
SMU_MTYPE(U16), xgmi_link_speed); \
SMU_SCALAR(SMU_MATTR(GFX_ACTIVITY_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), gfx_activity_acc); \
SMU_SCALAR(SMU_MATTR(MEM_ACTIVITY_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), mem_activity_acc); \
SMU_ARRAY(SMU_MATTR(PCIE_BANDWIDTH_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), pcie_bandwidth_acc, SMU_15_0_8_MAX_MID); \
SMU_ARRAY(SMU_MATTR(PCIE_BANDWIDTH_INST), SMU_MUNIT(BW_1), \
SMU_MTYPE(U32), pcie_bandwidth_inst, SMU_15_0_8_MAX_MID); \
SMU_SCALAR(SMU_MATTR(PCIE_L0_TO_RECOV_COUNT_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), pcie_l0_to_recov_count_acc); \
SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_COUNT_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), pcie_replay_count_acc); \
SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_ROVER_COUNT_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), pcie_replay_rover_count_acc); \
SMU_SCALAR(SMU_MATTR(PCIE_NAK_SENT_COUNT_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), pcie_nak_sent_count_acc); \
SMU_SCALAR(SMU_MATTR(PCIE_NAK_RCVD_COUNT_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), pcie_nak_rcvd_count_acc); \
SMU_ARRAY(SMU_MATTR(XGMI_LINK_STATUS), SMU_MUNIT(NONE), \
SMU_MTYPE(U16), xgmi_link_status, \
SMU_15_0_8_NUM_XGMI_LINKS); \
SMU_SCALAR(SMU_MATTR(XGMI_READ_DATA_ACC), SMU_MUNIT(DATA_1), \
SMU_MTYPE(U64), xgmi_read_data_acc); \
SMU_SCALAR(SMU_MATTR(XGMI_WRITE_DATA_ACC), SMU_MUNIT(DATA_1), \
SMU_MTYPE(U64), xgmi_write_data_acc); \
SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2), \
SMU_MTYPE(U64), firmware_timestamp); \
SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \
SMU_MTYPE(U16), current_gfxclk, SMU_15_0_8_MAX_GFX_CLKS); \
SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \
SMU_MTYPE(U16), current_socclk, SMU_15_0_8_MAX_MID); \
SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \
SMU_MTYPE(U16), current_vclk0, SMU_15_0_8_MAX_VCN); \
SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \
SMU_MTYPE(U16), current_dclk0, SMU_15_0_8_MAX_VCN); \
SMU_ARRAY(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \
SMU_MTYPE(U16), current_uclk, SMU_15_0_8_MAX_AID); \
SMU_SCALAR(SMU_MATTR(PCIE_LC_PERF_OTHER_END_RECOVERY), \
SMU_MUNIT(NONE), SMU_MTYPE(U64), \
pcie_lc_perf_other_end_recovery); \
SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \
SMU_MTYPE(U32), gfx_busy_inst, SMU_15_0_8_MAX_XCC); \
SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
jpeg_busy, SMU_15_0_8_MAX_JPEG); \
SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
vcn_busy, SMU_15_0_8_MAX_VCN); \
SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(NONE), SMU_MTYPE(U64), \
gfx_busy_acc, SMU_15_0_8_MAX_XCC); \
SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \
SMU_15_0_8_MAX_XCC); \
SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \
SMU_15_0_8_MAX_XCC); \
SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), gfx_low_utilization_acc, \
SMU_15_0_8_MAX_XCC); \
SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \
SMU_15_0_8_MAX_XCC);
DECLARE_SMU_METRICS_CLASS(smu_v15_0_8_gpu_metrics, SMU_15_0_8_METRICS_FIELDS);
/* Maximum temperature sensor counts for system metrics */
#define SMU_15_0_8_MAX_SYSTEM_TEMP_ENTRIES 32
#define SMU_15_0_8_MAX_NODE_TEMP_ENTRIES 12
#define SMU_15_0_8_MAX_VR_TEMP_ENTRIES 22
/* SMUv 15.0.8 GPU board temperature metrics */
#define SMU_15_0_8_GPUBOARD_TEMP_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), accumulation_counter); \
SMU_SCALAR(SMU_MATTR(LABEL_VERSION), SMU_MUNIT(NONE), \
SMU_MTYPE(U16), label_version); \
SMU_SCALAR(SMU_MATTR(NODE_ID), SMU_MUNIT(NONE), \
SMU_MTYPE(U16), node_id); \
SMU_SCALAR(SMU_MATTR(NODE_TEMP_RETIMER), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), node_temp_retimer); \
SMU_SCALAR(SMU_MATTR(NODE_TEMP_IBC), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), node_temp_ibc); \
SMU_SCALAR(SMU_MATTR(NODE_TEMP_IBC_2), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), node_temp_ibc_2); \
SMU_SCALAR(SMU_MATTR(NODE_TEMP_VDD18_VR), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), node_temp_vdd18_vr); \
SMU_SCALAR(SMU_MATTR(NODE_TEMP_04_HBM_B_VR), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), node_temp_04_hbm_b_vr); \
SMU_SCALAR(SMU_MATTR(NODE_TEMP_04_HBM_D_VR), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), node_temp_04_hbm_d_vr); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_SOCIO_A), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddcr_socio_a); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_SOCIO_C), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddcr_socio_c); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_X0), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddcr_x0); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_X1), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddcr_x1); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_HBM_B), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddio_hbm_b); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_HBM_D), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddio_hbm_d); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_04_HBM_B), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddio_04_hbm_b); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_04_HBM_D), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddio_04_hbm_d); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_HBM_B), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddcr_hbm_b); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_HBM_D), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddcr_hbm_d); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_075_HBM_B), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddcr_075_hbm_b); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_075_HBM_D), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddcr_075_hbm_d); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_11_GTA_A), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddio_11_gta_a); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_11_GTA_C), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddio_11_gta_c); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDAN_075_GTA_A), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddan_075_gta_a); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDAN_075_GTA_C), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddan_075_gta_c); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDCR_075_UCIE), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddcr_075_ucie); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_065_UCIEAA), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddio_065_ucieaa); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_065_UCIEAM_A), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddio_065_ucieam_a); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDIO_065_UCIEAM_C), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddio_065_ucieam_c); \
SMU_SCALAR(SMU_MATTR(VR_TEMP_VDDAN_075), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), vr_temp_vddan_075);
DECLARE_SMU_METRICS_CLASS(smu_v15_0_8_gpuboard_temp_metrics,
SMU_15_0_8_GPUBOARD_TEMP_METRICS_FIELDS);
/* SMUv 15.0.8 Baseboard temperature metrics - ID-based approach */
#define SMU_15_0_8_BASEBOARD_TEMP_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), accumulation_counter); \
SMU_SCALAR(SMU_MATTR(LABEL_VERSION), SMU_MUNIT(NONE), \
SMU_MTYPE(U16), label_version); \
SMU_SCALAR(SMU_MATTR(NODE_ID), SMU_MUNIT(NONE), \
SMU_MTYPE(U16), node_id); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_FPGA), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_ubb_fpga); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_FRONT), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_ubb_front); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_BACK), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_ubb_back); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_OAM7), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_ubb_oam7); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_IBC), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_ubb_ibc); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_UFPGA), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_ubb_ufpga); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_OAM1), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_ubb_oam1); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_0_1_HSC), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_oam_0_1_hsc); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_2_3_HSC), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_oam_2_3_hsc); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_4_5_HSC), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_oam_4_5_hsc); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_6_7_HSC), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_oam_6_7_hsc); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_FPGA_0V72_VR), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_ubb_fpga_0v72_vr); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_UBB_FPGA_3V3_VR), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_ubb_fpga_3v3_vr); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_retimer_0_1_2_3_1v2_vr); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_retimer_4_5_6_7_1v2_vr); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_0_1_0V9_VR), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_retimer_0_1_0v9_vr); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_4_5_0V9_VR), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_retimer_4_5_0v9_vr); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_2_3_0V9_VR), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_retimer_2_3_0v9_vr); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_RETIMER_6_7_0V9_VR), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_retimer_6_7_0v9_vr); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_oam_0_1_2_3_3v3_vr); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_oam_4_5_6_7_3v3_vr); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_IBC_HSC), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_ibc_hsc); \
SMU_SCALAR(SMU_MATTR(SYSTEM_TEMP_IBC), SMU_MUNIT(TEMP_1), \
SMU_MTYPE(S16), system_temp_ibc);
DECLARE_SMU_METRICS_CLASS(smu_v15_0_8_baseboard_temp_metrics,
SMU_15_0_8_BASEBOARD_TEMP_METRICS_FIELDS);
#endif
#endif

Some files were not shown because too many files have changed in this diff Show More