mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-08 05:43:28 -04:00
Merge tag 'drm-next-2021-08-31-1' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie:
"Highlights:
- i915 has seen a lot of refactoring and uAPI cleanups due to a
change in the upstream direction going forward
This has all been audited with known userspace, but there may be
some pitfalls that were missed.
- i915 now uses common TTM to enable discrete memory on DG1/2 GPUs
- i915 enables Jasper and Elkhart Lake by default and has preliminary
XeHP/DG2 support
- amdgpu adds support for Cyan Skillfish
- lots of implicit fencing rules documented and fixed up in drivers
- msm now uses the core scheduler
- the irq midlayer has been removed for non-legacy drivers
- the sysfb code now works on more than x86.
Otherwise the usual smattering of stuff everywhere, panels, bridges,
refactorings.
Detailed summary:
core:
- extract i915 eDP backlight into core
- DP aux bus support
- drm_device.irq_enabled removed
- port drivers to native irq interfaces
- export gem shadow plane handling for vgem
- print proper driver name in framebuffer registration
- driver fixes for implicit fencing rules
- ARM fixed rate compression modifier added
- updated fb damage handling
- rmfb ioctl logging/docs
- drop drm_gem_object_put_locked
- define DRM_FORMAT_MAX_PLANES
- add gem fb vmap/vunmap helpers
- add lockdep_assert(once) helpers
- mark drm irq midlayer as legacy
- use offset adjusted bo mapping conversion
vgaarb:
- cleanups
fbdev:
- extend efifb handling to all arches
- div by 0 fixes for multiple drivers
udmabuf:
- add hugepage mapping support
dma-buf:
- non-dynamic exporter fixups
- document implicit fencing rules
amdgpu:
- Initial Cyan Skillfish support
- switch virtual DCE over to vkms based atomic
- VCN/JPEG power down fixes
- NAVI PCIE link handling fixes
- AMD HDMI freesync fixes
- Yellow Carp + Beige Goby fixes
- Clockgating/S0ix/SMU/EEPROM fixes
- embed hw fence in job
- rework dma-resv handling
- ensure eviction to system ram
amdkfd:
- uapi: SVM address range query added
- sysfs leak fix
- GPUVM TLB optimizations
- vmfault/migration counters
i915:
- Enable JSL and EHL by default
- preliminary XeHP/DG2 support
- remove all CNL support (never shipped)
- move to TTM for discrete memory support
- allow mixed object mmap handling
- GEM uAPI spring cleaning
- add I915_MMAP_OBJECT_FIXED
- reinstate ADL-P mmap ioctls
- drop a bunch of unused by userspace features
- disable and remove GPU relocations
- revert some i915 misfeatures
- major refactoring of GuC for Gen11+
- execbuffer object locking separate step
- reject caching/set-domain on discrete
- Enable pipe DMC loading on XE-LPD and ADL-P
- add PSF GV point support
- Refactor and fix DDI buffer translations
- Clean up FBC CFB allocation code
- Finish INTEL_GEN() and friends macro conversions
nouveau:
- add eDP backlight support
- implicit fence fix
msm:
- a680/7c3 support
- drm/scheduler conversion
panfrost:
- rework GPU reset
virtio:
- fix fencing for planes
ast:
- add detect support
bochs:
- move to tiny GPU driver
vc4:
- use hotplug irqs
- HDMI codec support
vmwgfx:
- use internal vmware device headers
ingenic:
- demidlayering irq
rcar-du:
- shutdown fixes
- convert to bridge connector helpers
zynqmp-dsub:
- misc fixes
mgag200:
- convert PLL handling to atomic
mediatek:
- MT8133 AAL support
- gem mmap object support
- MT8167 support
etnaviv:
- NXP Layerscape LS1028A SoC support
- GEM mmap cleanups
tegra:
- new user API
exynos:
- missing unlock fix
- build warning fix
- use refcount_t"
* tag 'drm-next-2021-08-31-1' of git://anongit.freedesktop.org/drm/drm: (1318 commits)
drm/amd/display: Move AllowDRAMSelfRefreshOrDRAMClockChangeInVblank to bounding box
drm/amd/display: Remove duplicate dml init
drm/amd/display: Update bounding box states (v2)
drm/amd/display: Update number of DCN3 clock states
drm/amdgpu: disable GFX CGCG in aldebaran
drm/amdgpu: Clear RAS interrupt status on aldebaran
drm/amdgpu: Add support for RAS XGMI err query
drm/amdkfd: Account for SH/SE count when setting up cu masks.
drm/amdgpu: rename amdgpu_bo_get_preferred_pin_domain
drm/amdgpu: drop redundant cancel_delayed_work_sync call
drm/amdgpu: add missing cleanups for more ASICs on UVD/VCE suspend
drm/amdgpu: add missing cleanups for Polaris12 UVD/VCE on suspend
drm/amdkfd: map SVM range with correct access permission
drm/amdkfd: check access permisson to restore retry fault
drm/amdgpu: Update RAS XGMI Error Query
drm/amdgpu: Add driver infrastructure for MCA RAS
drm/amd/display: Add Logging for HDMI color depth information
drm/amd/amdgpu: consolidate PSP TA init shared buf functions
drm/amd/amdgpu: add name field back to ras_common_if
drm/amdgpu: Fix build with missing pm_suspend_target_state module export
...
This commit is contained in:
@@ -35,6 +35,11 @@ config DRM_MIPI_DSI
|
||||
bool
|
||||
depends on DRM
|
||||
|
||||
config DRM_DP_AUX_BUS
|
||||
tristate
|
||||
depends on DRM
|
||||
depends on OF
|
||||
|
||||
config DRM_DP_AUX_CHARDEV
|
||||
bool "DRM DP AUX Interface"
|
||||
depends on DRM
|
||||
@@ -251,7 +256,6 @@ config DRM_AMDGPU
|
||||
select HWMON
|
||||
select BACKLIGHT_CLASS_DEVICE
|
||||
select INTERVAL_TREE
|
||||
select CHASH
|
||||
help
|
||||
Choose this option if you have a recent AMD Radeon graphics card.
|
||||
|
||||
@@ -317,8 +321,6 @@ source "drivers/gpu/drm/tilcdc/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/qxl/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/bochs/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/virtio/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/msm/Kconfig"
|
||||
|
||||
@@ -33,6 +33,8 @@ drm-$(CONFIG_PCI) += drm_pci.o
|
||||
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
|
||||
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
|
||||
|
||||
obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
|
||||
|
||||
drm_vram_helper-y := drm_gem_vram_helper.o
|
||||
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
|
||||
|
||||
@@ -96,7 +98,6 @@ obj-y += omapdrm/
|
||||
obj-$(CONFIG_DRM_SUN4I) += sun4i/
|
||||
obj-y += tilcdc/
|
||||
obj-$(CONFIG_DRM_QXL) += qxl/
|
||||
obj-$(CONFIG_DRM_BOCHS) += bochs/
|
||||
obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio/
|
||||
obj-$(CONFIG_DRM_MSM) += msm/
|
||||
obj-$(CONFIG_DRM_TEGRA) += tegra/
|
||||
|
||||
@@ -57,7 +57,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
||||
amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
|
||||
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
|
||||
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
|
||||
amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o
|
||||
amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o \
|
||||
amdgpu_eeprom.o amdgpu_mca.o
|
||||
|
||||
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
|
||||
|
||||
@@ -75,7 +76,7 @@ amdgpu-y += \
|
||||
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
|
||||
arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
|
||||
nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o \
|
||||
beige_goby_reg_init.o yellow_carp_reg_init.o
|
||||
beige_goby_reg_init.o yellow_carp_reg_init.o cyan_skillfish_reg_init.o
|
||||
|
||||
# add DF block
|
||||
amdgpu-y += \
|
||||
@@ -111,6 +112,7 @@ amdgpu-y += \
|
||||
psp_v3_1.o \
|
||||
psp_v10_0.o \
|
||||
psp_v11_0.o \
|
||||
psp_v11_0_8.o \
|
||||
psp_v12_0.o \
|
||||
psp_v13_0.o
|
||||
|
||||
@@ -118,7 +120,7 @@ amdgpu-y += \
|
||||
amdgpu-y += \
|
||||
dce_v10_0.o \
|
||||
dce_v11_0.o \
|
||||
dce_virtual.o
|
||||
amdgpu_vkms.o
|
||||
|
||||
# add GFX block
|
||||
amdgpu-y += \
|
||||
@@ -187,6 +189,10 @@ amdgpu-y += \
|
||||
amdgpu-y += \
|
||||
amdgpu_reset.o
|
||||
|
||||
# add MCA block
|
||||
amdgpu-y += \
|
||||
mca_v3_0.o
|
||||
|
||||
# add amdkfd interfaces
|
||||
amdgpu-y += amdgpu_amdkfd.o
|
||||
|
||||
|
||||
@@ -108,6 +108,7 @@
|
||||
#include "amdgpu_df.h"
|
||||
#include "amdgpu_smuio.h"
|
||||
#include "amdgpu_fdinfo.h"
|
||||
#include "amdgpu_mca.h"
|
||||
|
||||
#define MAX_GPU_INSTANCE 16
|
||||
|
||||
@@ -916,6 +917,7 @@ struct amdgpu_device {
|
||||
|
||||
/* display */
|
||||
bool enable_virtual_display;
|
||||
struct amdgpu_vkms_output *amdgpu_vkms_output;
|
||||
struct amdgpu_mode_info mode_info;
|
||||
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
|
||||
struct work_struct hotplug_work;
|
||||
@@ -1008,6 +1010,9 @@ struct amdgpu_device {
|
||||
/* df */
|
||||
struct amdgpu_df df;
|
||||
|
||||
/* MCA */
|
||||
struct amdgpu_mca mca;
|
||||
|
||||
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
|
||||
uint32_t harvest_ip_mask;
|
||||
int num_ip_blocks;
|
||||
@@ -1108,8 +1113,13 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
|
||||
void *buf, size_t size, bool write);
|
||||
size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
|
||||
void *buf, size_t size, bool write);
|
||||
|
||||
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
|
||||
uint32_t *buf, size_t size, bool write);
|
||||
void *buf, size_t size, bool write);
|
||||
uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
|
||||
uint32_t reg, uint32_t acc_flags);
|
||||
void amdgpu_device_wreg(struct amdgpu_device *adev,
|
||||
@@ -1265,6 +1275,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
|
||||
|
||||
#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
|
||||
|
||||
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
|
||||
|
||||
/* Common functions */
|
||||
bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
|
||||
@@ -1385,12 +1397,12 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
|
||||
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
|
||||
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev);
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
|
||||
void amdgpu_acpi_detect(void);
|
||||
#else
|
||||
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
|
||||
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
|
||||
static inline bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { return false; }
|
||||
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
|
||||
static inline void amdgpu_acpi_detect(void) { }
|
||||
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
|
||||
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
|
||||
|
||||
@@ -160,17 +160,28 @@ static int acp_poweron(struct generic_pm_domain *genpd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct device *get_mfd_cell_dev(const char *device_name, int r)
|
||||
static int acp_genpd_add_device(struct device *dev, void *data)
|
||||
{
|
||||
char auto_dev_name[25];
|
||||
struct device *dev;
|
||||
struct generic_pm_domain *gpd = data;
|
||||
int ret;
|
||||
|
||||
snprintf(auto_dev_name, sizeof(auto_dev_name),
|
||||
"%s.%d.auto", device_name, r);
|
||||
dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
|
||||
dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
|
||||
ret = pm_genpd_add_device(gpd, dev);
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to add dev to genpd %d\n", ret);
|
||||
|
||||
return dev;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int acp_genpd_remove_device(struct device *dev, void *data)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = pm_genpd_remove_device(dev);
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to remove dev from genpd %d\n", ret);
|
||||
|
||||
/* Continue to remove */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -181,11 +192,10 @@ static struct device *get_mfd_cell_dev(const char *device_name, int r)
|
||||
*/
|
||||
static int acp_hw_init(void *handle)
|
||||
{
|
||||
int r, i;
|
||||
int r;
|
||||
uint64_t acp_base;
|
||||
u32 val = 0;
|
||||
u32 count = 0;
|
||||
struct device *dev;
|
||||
struct i2s_platform_data *i2s_pdata = NULL;
|
||||
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@@ -341,15 +351,10 @@ static int acp_hw_init(void *handle)
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
for (i = 0; i < ACP_DEVS ; i++) {
|
||||
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
|
||||
r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
|
||||
if (r) {
|
||||
dev_err(dev, "Failed to add dev to genpd\n");
|
||||
goto failure;
|
||||
}
|
||||
}
|
||||
|
||||
r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
|
||||
acp_genpd_add_device);
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
/* Assert Soft reset of ACP */
|
||||
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
|
||||
@@ -410,10 +415,8 @@ static int acp_hw_init(void *handle)
|
||||
*/
|
||||
static int acp_hw_fini(void *handle)
|
||||
{
|
||||
int i, ret;
|
||||
u32 val = 0;
|
||||
u32 count = 0;
|
||||
struct device *dev;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* return early if no ACP */
|
||||
@@ -458,13 +461,8 @@ static int acp_hw_fini(void *handle)
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
for (i = 0; i < ACP_DEVS ; i++) {
|
||||
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
|
||||
ret = pm_genpd_remove_device(dev);
|
||||
/* If removal fails, dont giveup and try rest */
|
||||
if (ret)
|
||||
dev_err(dev, "remove dev from genpd failed\n");
|
||||
}
|
||||
device_for_each_child(adev->acp.parent, NULL,
|
||||
acp_genpd_remove_device);
|
||||
|
||||
mfd_remove_devices(adev->acp.parent);
|
||||
kfree(adev->acp.acp_res);
|
||||
|
||||
@@ -854,8 +854,8 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
|
||||
if (amdgpu_device_has_dc_support(adev)) {
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
if (dm->backlight_dev)
|
||||
atif->bd = dm->backlight_dev;
|
||||
if (dm->backlight_dev[0])
|
||||
atif->bd = dm->backlight_dev[0];
|
||||
#endif
|
||||
} else {
|
||||
struct drm_encoder *tmp;
|
||||
@@ -1032,13 +1032,13 @@ void amdgpu_acpi_detect(void)
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_acpi_is_s0ix_supported
|
||||
* amdgpu_acpi_is_s0ix_active
|
||||
*
|
||||
* @adev: amdgpu_device_pointer
|
||||
*
|
||||
* returns true if supported, false if not.
|
||||
*/
|
||||
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
|
||||
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
*/
|
||||
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amd_pcie.h"
|
||||
#include "amd_shared.h"
|
||||
|
||||
#include "amdgpu.h"
|
||||
@@ -553,6 +554,88 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
|
||||
return (uint8_t)ret;
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)dst, *peer_adev;
|
||||
int num_links;
|
||||
|
||||
if (adev->asic_type != CHIP_ALDEBARAN)
|
||||
return 0;
|
||||
|
||||
if (src)
|
||||
peer_adev = (struct amdgpu_device *)src;
|
||||
|
||||
/* num links returns 0 for indirect peers since indirect route is unknown. */
|
||||
num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev);
|
||||
if (num_links < 0) {
|
||||
DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n",
|
||||
adev->gmc.xgmi.physical_node_id,
|
||||
peer_adev->gmc.xgmi.physical_node_id, num_links);
|
||||
num_links = 0;
|
||||
}
|
||||
|
||||
/* Aldebaran xGMI DPM is defeatured so assume x16 x 25Gbps for bandwidth. */
|
||||
return (num_links * 16 * 25000)/BITS_PER_BYTE;
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)dev;
|
||||
int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
|
||||
fls(adev->pm.pcie_mlw_mask)) - 1;
|
||||
int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask &
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) :
|
||||
fls(adev->pm.pcie_gen_mask &
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_MASK)) - 1;
|
||||
uint32_t num_lanes_mask = 1 << num_lanes_shift;
|
||||
uint32_t gen_speed_mask = 1 << gen_speed_shift;
|
||||
int num_lanes_factor = 0, gen_speed_mbits_factor = 0;
|
||||
|
||||
switch (num_lanes_mask) {
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1:
|
||||
num_lanes_factor = 1;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2:
|
||||
num_lanes_factor = 2;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4:
|
||||
num_lanes_factor = 4;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8:
|
||||
num_lanes_factor = 8;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12:
|
||||
num_lanes_factor = 12;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16:
|
||||
num_lanes_factor = 16;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32:
|
||||
num_lanes_factor = 32;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (gen_speed_mask) {
|
||||
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1:
|
||||
gen_speed_mbits_factor = 2500;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2:
|
||||
gen_speed_mbits_factor = 5000;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3:
|
||||
gen_speed_mbits_factor = 8000;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4:
|
||||
gen_speed_mbits_factor = 16000;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5:
|
||||
gen_speed_mbits_factor = 32000;
|
||||
break;
|
||||
}
|
||||
|
||||
return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE;
|
||||
}
|
||||
|
||||
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
@@ -631,7 +714,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
|
||||
ret = dma_fence_wait(f, false);
|
||||
|
||||
err_ib_sched:
|
||||
dma_fence_put(f);
|
||||
amdgpu_job_free(job);
|
||||
err:
|
||||
return ret;
|
||||
|
||||
@@ -226,6 +226,8 @@ uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
|
||||
uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
|
||||
int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd);
|
||||
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
|
||||
int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min);
|
||||
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min);
|
||||
|
||||
/* Read user wptr from a specified user address space with page fault
|
||||
* disabled. The memory must be pinned and mapped to the hardware when
|
||||
@@ -269,7 +271,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
|
||||
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
|
||||
uint64_t *size);
|
||||
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
|
||||
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed);
|
||||
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
|
||||
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
|
||||
int amdgpu_amdkfd_gpuvm_sync_memory(
|
||||
@@ -330,7 +332,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd);
|
||||
int kgd2kfd_post_reset(struct kfd_dev *kfd);
|
||||
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
|
||||
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask);
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
|
||||
#else
|
||||
static inline int kgd2kfd_init(void)
|
||||
{
|
||||
@@ -389,7 +391,7 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
|
||||
}
|
||||
|
||||
static inline
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -44,4 +44,5 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
|
||||
.get_atc_vmid_pasid_mapping_info =
|
||||
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
|
||||
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
|
||||
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
|
||||
};
|
||||
|
||||
@@ -305,5 +305,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
|
||||
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
|
||||
.set_vm_context_page_table_base =
|
||||
kgd_gfx_v9_set_vm_context_page_table_base,
|
||||
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy
|
||||
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
|
||||
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
|
||||
};
|
||||
|
||||
@@ -560,6 +560,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
|
||||
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
|
||||
type = RESET_WAVES;
|
||||
break;
|
||||
case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
|
||||
type = SAVE_WAVES;
|
||||
break;
|
||||
default:
|
||||
type = DRAIN_PIPE;
|
||||
break;
|
||||
@@ -754,6 +757,33 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
|
||||
}
|
||||
|
||||
static void program_trap_handler_settings(struct kgd_dev *kgd,
|
||||
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
|
||||
{
|
||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||
|
||||
lock_srbm(kgd, 0, 0, 0, vmid);
|
||||
|
||||
/*
|
||||
* Program TBA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
|
||||
lower_32_bits(tba_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
|
||||
upper_32_bits(tba_addr >> 8) |
|
||||
(1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
|
||||
|
||||
/*
|
||||
* Program TMA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
|
||||
lower_32_bits(tma_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
|
||||
upper_32_bits(tma_addr >> 8));
|
||||
|
||||
unlock_srbm(kgd);
|
||||
}
|
||||
|
||||
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
|
||||
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
||||
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
||||
@@ -774,4 +804,5 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
|
||||
.get_atc_vmid_pasid_mapping_info =
|
||||
get_atc_vmid_pasid_mapping_info,
|
||||
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
||||
.program_trap_handler_settings = program_trap_handler_settings,
|
||||
};
|
||||
|
||||
@@ -537,6 +537,9 @@ static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
|
||||
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
|
||||
type = RESET_WAVES;
|
||||
break;
|
||||
case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
|
||||
type = SAVE_WAVES;
|
||||
break;
|
||||
default:
|
||||
type = DRAIN_PIPE;
|
||||
break;
|
||||
@@ -658,6 +661,33 @@ static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t v
|
||||
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
|
||||
}
|
||||
|
||||
static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd,
|
||||
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
|
||||
{
|
||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||
|
||||
lock_srbm(kgd, 0, 0, 0, vmid);
|
||||
|
||||
/*
|
||||
* Program TBA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
|
||||
lower_32_bits(tba_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
|
||||
upper_32_bits(tba_addr >> 8) |
|
||||
(1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
|
||||
|
||||
/*
|
||||
* Program TMA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
|
||||
lower_32_bits(tma_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
|
||||
upper_32_bits(tma_addr >> 8));
|
||||
|
||||
unlock_srbm(kgd);
|
||||
}
|
||||
|
||||
#if 0
|
||||
uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd,
|
||||
uint32_t trap_debug_wave_launch_mode,
|
||||
@@ -820,6 +850,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
|
||||
.address_watch_get_offset = address_watch_get_offset_v10_3,
|
||||
.get_atc_vmid_pasid_mapping_info = NULL,
|
||||
.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
|
||||
.program_trap_handler_settings = program_trap_handler_settings_v10_3,
|
||||
#if 0
|
||||
.enable_debug_trap = enable_debug_trap_v10_3,
|
||||
.disable_debug_trap = disable_debug_trap_v10_3,
|
||||
|
||||
@@ -42,7 +42,8 @@
|
||||
enum hqd_dequeue_request_type {
|
||||
NO_ACTION = 0,
|
||||
DRAIN_PIPE,
|
||||
RESET_WAVES
|
||||
RESET_WAVES,
|
||||
SAVE_WAVES
|
||||
};
|
||||
|
||||
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
||||
@@ -566,6 +567,9 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
|
||||
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
|
||||
type = RESET_WAVES;
|
||||
break;
|
||||
case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
|
||||
type = SAVE_WAVES;
|
||||
break;
|
||||
default:
|
||||
type = DRAIN_PIPE;
|
||||
break;
|
||||
@@ -878,6 +882,32 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
|
||||
adev->gfx.cu_info.max_waves_per_simd;
|
||||
}
|
||||
|
||||
void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
|
||||
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
|
||||
{
|
||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||
|
||||
lock_srbm(kgd, 0, 0, 0, vmid);
|
||||
|
||||
/*
|
||||
* Program TBA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
|
||||
lower_32_bits(tba_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
|
||||
upper_32_bits(tba_addr >> 8));
|
||||
|
||||
/*
|
||||
* Program TMA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
|
||||
lower_32_bits(tma_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
|
||||
upper_32_bits(tma_addr >> 8));
|
||||
|
||||
unlock_srbm(kgd);
|
||||
}
|
||||
|
||||
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
|
||||
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
|
||||
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
|
||||
@@ -899,4 +929,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
|
||||
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
|
||||
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
|
||||
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
|
||||
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
|
||||
};
|
||||
|
||||
@@ -65,3 +65,5 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
|
||||
uint32_t vmid, uint64_t page_table_base);
|
||||
void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
|
||||
int *pasid_wave_cnt, int *max_waves_per_cu);
|
||||
void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
|
||||
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);
|
||||
|
||||
@@ -1057,7 +1057,8 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
|
||||
|
||||
static int update_gpuvm_pte(struct kgd_mem *mem,
|
||||
struct kfd_mem_attachment *entry,
|
||||
struct amdgpu_sync *sync)
|
||||
struct amdgpu_sync *sync,
|
||||
bool *table_freed)
|
||||
{
|
||||
struct amdgpu_bo_va *bo_va = entry->bo_va;
|
||||
struct amdgpu_device *adev = entry->adev;
|
||||
@@ -1068,7 +1069,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
|
||||
return ret;
|
||||
|
||||
/* Update the page tables */
|
||||
ret = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
|
||||
if (ret) {
|
||||
pr_err("amdgpu_vm_bo_update failed\n");
|
||||
return ret;
|
||||
@@ -1080,7 +1081,8 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
|
||||
static int map_bo_to_gpuvm(struct kgd_mem *mem,
|
||||
struct kfd_mem_attachment *entry,
|
||||
struct amdgpu_sync *sync,
|
||||
bool no_update_pte)
|
||||
bool no_update_pte,
|
||||
bool *table_freed)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -1097,7 +1099,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
|
||||
if (no_update_pte)
|
||||
return 0;
|
||||
|
||||
ret = update_gpuvm_pte(mem, entry, sync);
|
||||
ret = update_gpuvm_pte(mem, entry, sync, table_freed);
|
||||
if (ret) {
|
||||
pr_err("update_gpuvm_pte() failed\n");
|
||||
goto update_gpuvm_pte_failed;
|
||||
@@ -1285,11 +1287,22 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
|
||||
if (avm->process_info)
|
||||
return -EINVAL;
|
||||
|
||||
/* Free the original amdgpu allocated pasid,
|
||||
* will be replaced with kfd allocated pasid.
|
||||
*/
|
||||
if (avm->pasid) {
|
||||
amdgpu_pasid_free(avm->pasid);
|
||||
amdgpu_vm_set_pasid(adev, avm, 0);
|
||||
}
|
||||
|
||||
/* Convert VM into a compute VM */
|
||||
ret = amdgpu_vm_make_compute(adev, avm, pasid);
|
||||
ret = amdgpu_vm_make_compute(adev, avm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = amdgpu_vm_set_pasid(adev, avm, pasid);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* Initialize KFD part of the VM and process info */
|
||||
ret = init_kfd_vm(avm, process_info, ef);
|
||||
if (ret)
|
||||
@@ -1594,7 +1607,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
|
||||
struct kgd_dev *kgd, struct kgd_mem *mem,
|
||||
void *drm_priv, bool *table_freed)
|
||||
{
|
||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
|
||||
@@ -1682,7 +1696,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
entry->va, entry->va + bo_size, entry);
|
||||
|
||||
ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
|
||||
is_invalid_userptr);
|
||||
is_invalid_userptr, table_freed);
|
||||
if (ret) {
|
||||
pr_err("Failed to map bo to gpuvm\n");
|
||||
goto out_unreserve;
|
||||
@@ -1706,6 +1720,12 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
true);
|
||||
ret = unreserve_bo_and_vms(&ctx, false, false);
|
||||
|
||||
/* Only apply no TLB flush on Aldebaran to
|
||||
* workaround regressions on other Asics.
|
||||
*/
|
||||
if (table_freed && (adev->asic_type != CHIP_ALDEBARAN))
|
||||
*table_freed = true;
|
||||
|
||||
goto out;
|
||||
|
||||
out_unreserve:
|
||||
@@ -2132,7 +2152,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
|
||||
continue;
|
||||
|
||||
kfd_mem_dmaunmap_attachment(mem, attachment);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
|
||||
if (ret) {
|
||||
pr_err("%s: update PTE failed\n", __func__);
|
||||
/* make sure this gets validated again */
|
||||
@@ -2338,7 +2358,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
continue;
|
||||
|
||||
kfd_mem_dmaunmap_attachment(mem, attachment);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
|
||||
if (ret) {
|
||||
pr_debug("Memory eviction: update PTE failed. Try again\n");
|
||||
goto validate_map_fail;
|
||||
|
||||
@@ -34,6 +34,7 @@ struct amdgpu_fpriv;
|
||||
struct amdgpu_bo_list_entry {
|
||||
struct ttm_validate_buffer tv;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct dma_fence_chain *chain;
|
||||
uint32_t priority;
|
||||
struct page **user_pages;
|
||||
bool user_invalidated;
|
||||
|
||||
@@ -572,6 +572,20 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
goto out;
|
||||
}
|
||||
|
||||
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
|
||||
|
||||
e->bo_va = amdgpu_vm_bo_find(vm, bo);
|
||||
|
||||
if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) {
|
||||
e->chain = dma_fence_chain_alloc();
|
||||
if (!e->chain) {
|
||||
r = -ENOMEM;
|
||||
goto error_validate;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
|
||||
&p->bytes_moved_vis_threshold);
|
||||
p->bytes_moved = 0;
|
||||
@@ -599,15 +613,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
gws = p->bo_list->gws_obj;
|
||||
oa = p->bo_list->oa_obj;
|
||||
|
||||
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
|
||||
|
||||
/* Make sure we use the exclusive slot for shared BOs */
|
||||
if (bo->prime_shared_count)
|
||||
e->tv.num_shared = 0;
|
||||
e->bo_va = amdgpu_vm_bo_find(vm, bo);
|
||||
}
|
||||
|
||||
if (gds) {
|
||||
p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
|
||||
p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
|
||||
@@ -629,8 +634,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
}
|
||||
|
||||
error_validate:
|
||||
if (r)
|
||||
if (r) {
|
||||
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
|
||||
dma_fence_chain_free(e->chain);
|
||||
e->chain = NULL;
|
||||
}
|
||||
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
||||
}
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
@@ -670,9 +680,17 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (error && backoff)
|
||||
if (error && backoff) {
|
||||
struct amdgpu_bo_list_entry *e;
|
||||
|
||||
amdgpu_bo_list_for_each_entry(e, parser->bo_list) {
|
||||
dma_fence_chain_free(e->chain);
|
||||
e->chain = NULL;
|
||||
}
|
||||
|
||||
ttm_eu_backoff_reservation(&parser->ticket,
|
||||
&parser->validated);
|
||||
}
|
||||
|
||||
for (i = 0; i < parser->num_post_deps; i++) {
|
||||
drm_syncobj_put(parser->post_deps[i].syncobj);
|
||||
@@ -781,7 +799,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
|
||||
r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@@ -792,7 +810,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
|
||||
bo_va = fpriv->csa_va;
|
||||
BUG_ON(!bo_va);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@@ -811,7 +829,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (bo_va == NULL)
|
||||
continue;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@@ -1109,7 +1127,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
|
||||
|
||||
dep->chain = NULL;
|
||||
if (syncobj_deps[i].point) {
|
||||
dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
|
||||
dep->chain = dma_fence_chain_alloc();
|
||||
if (!dep->chain)
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -1117,7 +1135,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
|
||||
dep->syncobj = drm_syncobj_find(p->filp,
|
||||
syncobj_deps[i].handle);
|
||||
if (!dep->syncobj) {
|
||||
kfree(dep->chain);
|
||||
dma_fence_chain_free(dep->chain);
|
||||
return -EINVAL;
|
||||
}
|
||||
dep->point = syncobj_deps[i].point;
|
||||
@@ -1245,6 +1263,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
|
||||
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
|
||||
|
||||
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
|
||||
struct dma_resv *resv = e->tv.bo->base.resv;
|
||||
struct dma_fence_chain *chain = e->chain;
|
||||
|
||||
if (!chain)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Work around dma_resv shortcommings by wrapping up the
|
||||
* submission in a dma_fence_chain and add it as exclusive
|
||||
* fence, but first add the submission as shared fence to make
|
||||
* sure that shared fences never signal before the exclusive
|
||||
* one.
|
||||
*/
|
||||
dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
|
||||
dma_fence_get(p->fence), 1);
|
||||
|
||||
dma_resv_add_shared_fence(resv, p->fence);
|
||||
rcu_assign_pointer(resv->fence_excl, &chain->base);
|
||||
e->chain = NULL;
|
||||
}
|
||||
|
||||
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
|
||||
mutex_unlock(&p->adev->notifier_lock);
|
||||
|
||||
|
||||
@@ -1414,7 +1414,7 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
|
||||
continue;
|
||||
}
|
||||
job = to_amdgpu_job(s_job);
|
||||
if (preempted && job->fence == fence)
|
||||
if (preempted && (&job->hw_fence) == fence)
|
||||
/* mark the job as preempted */
|
||||
job->preemption_status |= AMDGPU_IB_PREEMPTED;
|
||||
}
|
||||
|
||||
@@ -116,6 +116,7 @@ const char *amdgpu_asic_name[] = {
|
||||
"RENOIR",
|
||||
"ALDEBARAN",
|
||||
"NAVI10",
|
||||
"CYAN_SKILLFISH",
|
||||
"NAVI14",
|
||||
"NAVI12",
|
||||
"SIENNA_CICHLID",
|
||||
@@ -287,7 +288,7 @@ bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
|
||||
*/
|
||||
|
||||
/**
|
||||
* amdgpu_device_vram_access - read/write a buffer in vram
|
||||
* amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @pos: offset of the buffer in vram
|
||||
@@ -295,22 +296,65 @@ bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
|
||||
* @size: read/write size, sizeof(@buf) must > @size
|
||||
* @write: true - write to vram, otherwise - read from vram
|
||||
*/
|
||||
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
|
||||
uint32_t *buf, size_t size, bool write)
|
||||
void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
|
||||
void *buf, size_t size, bool write)
|
||||
{
|
||||
unsigned long flags;
|
||||
uint32_t hi = ~0;
|
||||
uint32_t hi = ~0, tmp = 0;
|
||||
uint32_t *data = buf;
|
||||
uint64_t last;
|
||||
int idx;
|
||||
|
||||
if (!drm_dev_enter(&adev->ddev, &idx))
|
||||
return;
|
||||
|
||||
BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
|
||||
|
||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||
for (last = pos + size; pos < last; pos += 4) {
|
||||
tmp = pos >> 31;
|
||||
|
||||
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
|
||||
if (tmp != hi) {
|
||||
WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
|
||||
hi = tmp;
|
||||
}
|
||||
if (write)
|
||||
WREG32_NO_KIQ(mmMM_DATA, *data++);
|
||||
else
|
||||
*data++ = RREG32_NO_KIQ(mmMM_DATA);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||
drm_dev_exit(idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_vram_access - access vram by vram aperature
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @pos: offset of the buffer in vram
|
||||
* @buf: virtual address of the buffer in system memory
|
||||
* @size: read/write size, sizeof(@buf) must > @size
|
||||
* @write: true - write to vram, otherwise - read from vram
|
||||
*
|
||||
* The return value means how many bytes have been transferred.
|
||||
*/
|
||||
size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
|
||||
void *buf, size_t size, bool write)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
void __iomem *addr;
|
||||
size_t count = 0;
|
||||
uint64_t last;
|
||||
|
||||
if (!adev->mman.aper_base_kaddr)
|
||||
return 0;
|
||||
|
||||
last = min(pos + size, adev->gmc.visible_vram_size);
|
||||
if (last > pos) {
|
||||
void __iomem *addr = adev->mman.aper_base_kaddr + pos;
|
||||
size_t count = last - pos;
|
||||
addr = adev->mman.aper_base_kaddr + pos;
|
||||
count = last - pos;
|
||||
|
||||
if (write) {
|
||||
memcpy_toio(addr, buf, count);
|
||||
@@ -322,35 +366,37 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
|
||||
memcpy_fromio(buf, addr, count);
|
||||
}
|
||||
|
||||
if (count == size)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
return count;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_vram_access - read/write a buffer in vram
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @pos: offset of the buffer in vram
|
||||
* @buf: virtual address of the buffer in system memory
|
||||
* @size: read/write size, sizeof(@buf) must > @size
|
||||
* @write: true - write to vram, otherwise - read from vram
|
||||
*/
|
||||
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
|
||||
void *buf, size_t size, bool write)
|
||||
{
|
||||
size_t count;
|
||||
|
||||
/* try to using vram apreature to access vram first */
|
||||
count = amdgpu_device_aper_access(adev, pos, buf, size, write);
|
||||
size -= count;
|
||||
if (size) {
|
||||
/* using MM to access rest vram */
|
||||
pos += count;
|
||||
buf += count / 4;
|
||||
size -= count;
|
||||
buf += count;
|
||||
amdgpu_device_mm_access(adev, pos, buf, size, write);
|
||||
}
|
||||
#endif
|
||||
|
||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||
for (last = pos + size; pos < last; pos += 4) {
|
||||
uint32_t tmp = pos >> 31;
|
||||
|
||||
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
|
||||
if (tmp != hi) {
|
||||
WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
|
||||
hi = tmp;
|
||||
}
|
||||
if (write)
|
||||
WREG32_NO_KIQ(mmMM_DATA, *buf++);
|
||||
else
|
||||
*buf++ = RREG32_NO_KIQ(mmMM_DATA);
|
||||
}
|
||||
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
exit:
|
||||
#endif
|
||||
drm_dev_exit(idx);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -518,7 +564,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
|
||||
adev->gfx.rlc.funcs &&
|
||||
adev->gfx.rlc.funcs->is_rlcg_access_range) {
|
||||
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
|
||||
return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0, 0);
|
||||
return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
|
||||
} else {
|
||||
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
|
||||
}
|
||||
@@ -1266,15 +1312,16 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
|
||||
/**
|
||||
* amdgpu_device_vga_set_decode - enable/disable vga decode
|
||||
*
|
||||
* @cookie: amdgpu_device pointer
|
||||
* @pdev: PCI device pointer
|
||||
* @state: enable/disable vga decode
|
||||
*
|
||||
* Enable/disable vga decode (all asics).
|
||||
* Returns VGA resource flags.
|
||||
*/
|
||||
static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
|
||||
static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
|
||||
bool state)
|
||||
{
|
||||
struct amdgpu_device *adev = cookie;
|
||||
struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
|
||||
amdgpu_asic_set_vga_state(adev, state);
|
||||
if (state)
|
||||
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
|
||||
@@ -1394,6 +1441,10 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
if (adev->pdev->device == 0x13FE)
|
||||
adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -2100,6 +2151,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
if (adev->asic_type == CHIP_VANGOGH)
|
||||
adev->family = AMDGPU_FAMILY_VGH;
|
||||
else if (adev->asic_type == CHIP_YELLOW_CARP)
|
||||
@@ -3594,9 +3646,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
|
||||
fence_driver_init:
|
||||
/* Fence driver */
|
||||
r = amdgpu_fence_driver_init(adev);
|
||||
r = amdgpu_fence_driver_sw_init(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
|
||||
dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
|
||||
goto failed;
|
||||
}
|
||||
@@ -3623,6 +3675,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
goto release_ras_con;
|
||||
}
|
||||
|
||||
amdgpu_fence_driver_hw_init(adev);
|
||||
|
||||
dev_info(adev->dev,
|
||||
"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
|
||||
adev->gfx.config.max_shader_engines,
|
||||
@@ -3714,7 +3768,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
/* this will fail for cards that aren't VGA class devices, just
|
||||
* ignore it */
|
||||
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
|
||||
vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
|
||||
vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
|
||||
|
||||
if (amdgpu_device_supports_px(ddev)) {
|
||||
px = true;
|
||||
@@ -3771,7 +3825,10 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
||||
{
|
||||
dev_info(adev->dev, "amdgpu: finishing device.\n");
|
||||
flush_delayed_work(&adev->delayed_init_work);
|
||||
ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
||||
if (adev->mman.initialized) {
|
||||
flush_delayed_work(&adev->mman.bdev.wq);
|
||||
ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
||||
}
|
||||
adev->shutdown = true;
|
||||
|
||||
/* make sure IB test finished before entering exclusive mode
|
||||
@@ -3790,7 +3847,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
||||
else
|
||||
drm_atomic_helper_shutdown(adev_to_drm(adev));
|
||||
}
|
||||
amdgpu_fence_driver_fini_hw(adev);
|
||||
amdgpu_fence_driver_hw_fini(adev);
|
||||
|
||||
if (adev->pm_sysfs_en)
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
@@ -3812,7 +3869,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
||||
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_device_ip_fini(adev);
|
||||
amdgpu_fence_driver_fini_sw(adev);
|
||||
amdgpu_fence_driver_sw_fini(adev);
|
||||
release_firmware(adev->firmware.gpu_info_fw);
|
||||
adev->firmware.gpu_info_fw = NULL;
|
||||
adev->accel_working = false;
|
||||
@@ -3833,7 +3890,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
|
||||
vga_switcheroo_fini_domain_pm_ops(adev->dev);
|
||||
}
|
||||
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
|
||||
vga_client_register(adev->pdev, NULL, NULL, NULL);
|
||||
vga_client_unregister(adev->pdev);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PERF_EVENTS))
|
||||
amdgpu_pmu_fini(adev);
|
||||
@@ -3887,7 +3944,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
||||
/* evict vram memory */
|
||||
amdgpu_bo_evict_vram(adev);
|
||||
|
||||
amdgpu_fence_driver_suspend(adev);
|
||||
amdgpu_fence_driver_hw_fini(adev);
|
||||
|
||||
amdgpu_device_ip_suspend_phase2(adev);
|
||||
/* evict remaining vram memory
|
||||
@@ -3932,8 +3989,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
|
||||
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
amdgpu_fence_driver_resume(adev);
|
||||
|
||||
amdgpu_fence_driver_hw_init(adev);
|
||||
|
||||
r = amdgpu_device_ip_late_init(adev);
|
||||
if (r)
|
||||
@@ -4394,7 +4450,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
|
||||
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
int i, r = 0;
|
||||
int i, j, r = 0;
|
||||
struct amdgpu_job *job = NULL;
|
||||
bool need_full_reset =
|
||||
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
|
||||
@@ -4418,11 +4474,22 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
||||
if (!ring || !ring->sched.thread)
|
||||
continue;
|
||||
|
||||
/*clear job fence from fence drv to avoid force_completion
|
||||
*leave NULL and vm flush fence in fence drv */
|
||||
for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
|
||||
struct dma_fence *old, **ptr;
|
||||
|
||||
ptr = &ring->fence_drv.fences[j];
|
||||
old = rcu_dereference_protected(*ptr, 1);
|
||||
if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
|
||||
RCU_INIT_POINTER(*ptr, NULL);
|
||||
}
|
||||
}
|
||||
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
}
|
||||
|
||||
if(job)
|
||||
if (job && job->vm)
|
||||
drm_sched_increase_karma(&job->base);
|
||||
|
||||
r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
|
||||
@@ -4886,7 +4953,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
|
||||
job ? job->base.id : -1, hive->hive_id);
|
||||
amdgpu_put_xgmi_hive(hive);
|
||||
if (job)
|
||||
if (job && job->vm)
|
||||
drm_sched_increase_karma(&job->base);
|
||||
return 0;
|
||||
}
|
||||
@@ -4910,7 +4977,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
job ? job->base.id : -1);
|
||||
|
||||
/* even we skipped this reset, still need to set the job to guilty */
|
||||
if (job)
|
||||
if (job && job->vm)
|
||||
drm_sched_increase_karma(&job->base);
|
||||
goto skip_recovery;
|
||||
}
|
||||
@@ -5277,6 +5344,10 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
|
||||
adev->nbio.funcs->enable_doorbell_interrupt)
|
||||
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
|
||||
|
||||
if (amdgpu_passthrough(adev) &&
|
||||
adev->nbio.funcs->clear_doorbell_interrupt)
|
||||
adev->nbio.funcs->clear_doorbell_interrupt(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -42,48 +42,6 @@
|
||||
#include <linux/pci-p2pdma.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
static int
|
||||
__dma_resv_make_exclusive(struct dma_resv *obj)
|
||||
{
|
||||
struct dma_fence **fences;
|
||||
unsigned int count;
|
||||
int r;
|
||||
|
||||
if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
|
||||
return 0;
|
||||
|
||||
r = dma_resv_get_fences(obj, NULL, &count, &fences);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (count == 0) {
|
||||
/* Now that was unexpected. */
|
||||
} else if (count == 1) {
|
||||
dma_resv_add_excl_fence(obj, fences[0]);
|
||||
dma_fence_put(fences[0]);
|
||||
kfree(fences);
|
||||
} else {
|
||||
struct dma_fence_array *array;
|
||||
|
||||
array = dma_fence_array_create(count, fences,
|
||||
dma_fence_context_alloc(1), 0,
|
||||
false);
|
||||
if (!array)
|
||||
goto err_fences_put;
|
||||
|
||||
dma_resv_add_excl_fence(obj, &array->base);
|
||||
dma_fence_put(&array->base);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_fences_put:
|
||||
while (count--)
|
||||
dma_fence_put(fences[count]);
|
||||
kfree(fences);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
|
||||
*
|
||||
@@ -110,24 +68,6 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
|
||||
if (r < 0)
|
||||
goto out;
|
||||
|
||||
r = amdgpu_bo_reserve(bo, false);
|
||||
if (unlikely(r != 0))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We only create shared fences for internal use, but importers
|
||||
* of the dmabuf rely on exclusive fences for implicitly
|
||||
* tracking write hazards. As any of the current fences may
|
||||
* correspond to a write, we need to convert all existing
|
||||
* fences on the reservation object into a single exclusive
|
||||
* fence.
|
||||
*/
|
||||
r = __dma_resv_make_exclusive(bo->tbo.base.resv);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
bo->prime_shared_count++;
|
||||
amdgpu_bo_unreserve(bo);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
@@ -150,9 +90,6 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
|
||||
if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
|
||||
bo->prime_shared_count--;
|
||||
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
}
|
||||
@@ -418,8 +355,6 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
|
||||
bo = gem_to_amdgpu_bo(gobj);
|
||||
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
|
||||
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
|
||||
if (dma_buf->ops != &amdgpu_dmabuf_ops)
|
||||
bo->prime_shared_count = 1;
|
||||
|
||||
dma_resv_unlock(resv);
|
||||
return gobj;
|
||||
|
||||
@@ -870,11 +870,10 @@ MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legac
|
||||
module_param_named(reset_method, amdgpu_reset_method, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: bad_page_threshold (int)
|
||||
* Bad page threshold is to specify the threshold value of faulty pages
|
||||
* detected by RAS ECC, that may result in GPU entering bad status if total
|
||||
* faulty pages by ECC exceed threshold value and leave it for user's further
|
||||
* check.
|
||||
* DOC: bad_page_threshold (int) Bad page threshold is specifies the
|
||||
* threshold value of faulty pages detected by RAS ECC, which may
|
||||
* result in the GPU entering bad status when the number of total
|
||||
* faulty pages by ECC exceeds the threshold value.
|
||||
*/
|
||||
MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement)");
|
||||
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
|
||||
@@ -1213,6 +1212,9 @@ static const struct pci_device_id pciidlist[] = {
|
||||
{0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
|
||||
|
||||
/* CYAN_SKILLFISH */
|
||||
{0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
|
||||
|
||||
/* BEIGE_GOBY */
|
||||
{0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
|
||||
{0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
|
||||
@@ -1236,7 +1238,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
|
||||
int ret, retry = 0;
|
||||
bool supports_atomic = false;
|
||||
|
||||
if (!amdgpu_virtual_display &&
|
||||
if (amdgpu_virtual_display ||
|
||||
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
|
||||
supports_atomic = true;
|
||||
|
||||
@@ -1292,7 +1294,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
|
||||
#endif
|
||||
|
||||
/* Get rid of things like offb */
|
||||
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb");
|
||||
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1474,7 +1476,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
int r;
|
||||
|
||||
if (amdgpu_acpi_is_s0ix_supported(adev))
|
||||
if (amdgpu_acpi_is_s0ix_active(adev))
|
||||
adev->in_s0ix = true;
|
||||
adev->in_s3 = true;
|
||||
r = amdgpu_device_suspend(drm_dev, true);
|
||||
@@ -1490,7 +1492,7 @@ static int amdgpu_pmops_resume(struct device *dev)
|
||||
int r;
|
||||
|
||||
r = amdgpu_device_resume(drm_dev, true);
|
||||
if (amdgpu_acpi_is_s0ix_supported(adev))
|
||||
if (amdgpu_acpi_is_s0ix_active(adev))
|
||||
adev->in_s0ix = false;
|
||||
return r;
|
||||
}
|
||||
@@ -1784,7 +1786,6 @@ static const struct drm_driver amdgpu_kms_driver = {
|
||||
.open = amdgpu_driver_open_kms,
|
||||
.postclose = amdgpu_driver_postclose_kms,
|
||||
.lastclose = amdgpu_driver_lastclose_kms,
|
||||
.irq_handler = amdgpu_irq_handler,
|
||||
.ioctls = amdgpu_ioctls_kms,
|
||||
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
|
||||
.dumb_create = amdgpu_mode_dumb_create,
|
||||
|
||||
239
drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
Normal file
239
drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
Normal file
@@ -0,0 +1,239 @@
|
||||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "amdgpu_eeprom.h"
|
||||
#include "amdgpu.h"
|
||||
|
||||
/* AT24CM02 and M24M02-R have a 256-byte write page size.
|
||||
*/
|
||||
#define EEPROM_PAGE_BITS 8
|
||||
#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS)
|
||||
#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1)
|
||||
|
||||
#define EEPROM_OFFSET_SIZE 2
|
||||
|
||||
/* EEPROM memory addresses are 19-bits long, which can
|
||||
* be partitioned into 3, 8, 8 bits, for a total of 19.
|
||||
* The upper 3 bits are sent as part of the 7-bit
|
||||
* "Device Type Identifier"--an I2C concept, which for EEPROM devices
|
||||
* is hard-coded as 1010b, indicating that it is an EEPROM
|
||||
* device--this is the wire format, followed by the upper
|
||||
* 3 bits of the 19-bit address, followed by the direction,
|
||||
* followed by two bytes holding the rest of the 16-bits of
|
||||
* the EEPROM memory address. The format on the wire for EEPROM
|
||||
* devices is: 1010XYZD, A15:A8, A7:A0,
|
||||
* Where D is the direction and sequenced out by the hardware.
|
||||
* Bits XYZ are memory address bits 18, 17 and 16.
|
||||
* These bits are compared to how pins 1-3 of the part are connected,
|
||||
* depending on the size of the part, more on that later.
|
||||
*
|
||||
* Note that of this wire format, a client is in control
|
||||
* of, and needs to specify only XYZ, A15:A8, A7:0, bits,
|
||||
* which is exactly the EEPROM memory address, or offset,
|
||||
* in order to address up to 8 EEPROM devices on the I2C bus.
|
||||
*
|
||||
* For instance, a 2-Mbit I2C EEPROM part, addresses all its bytes,
|
||||
* using an 18-bit address, bit 17 to 0 and thus would use all but one bit of
|
||||
* the 19 bits previously mentioned. The designer would then not connect
|
||||
* pins 1 and 2, and pin 3 usually named "A_2" or "E2", would be connected to
|
||||
* either Vcc or GND. This would allow for up to two 2-Mbit parts on
|
||||
* the same bus, where one would be addressable with bit 18 as 1, and
|
||||
* the other with bit 18 of the address as 0.
|
||||
*
|
||||
* For a 2-Mbit part, bit 18 is usually known as the "Chip Enable" or
|
||||
* "Hardware Address Bit". This bit is compared to the load on pin 3
|
||||
* of the device, described above, and if there is a match, then this
|
||||
* device responds to the command. This way, you can connect two
|
||||
* 2-Mbit EEPROM devices on the same bus, but see one contiguous
|
||||
* memory from 0 to 7FFFFh, where address 0 to 3FFFF is in the device
|
||||
* whose pin 3 is connected to GND, and address 40000 to 7FFFFh is in
|
||||
* the 2nd device, whose pin 3 is connected to Vcc.
|
||||
*
|
||||
* This addressing you encode in the 32-bit "eeprom_addr" below,
|
||||
* namely the 19-bits "XYZ,A15:A0", as a single 19-bit address. For
|
||||
* instance, eeprom_addr = 0x6DA01, is 110_1101_1010_0000_0001, where
|
||||
* XYZ=110b, and A15:A0=DA01h. The XYZ bits become part of the device
|
||||
* address, and the rest of the address bits are sent as the memory
|
||||
* address bytes.
|
||||
*
|
||||
* That is, for an I2C EEPROM driver everything is controlled by
|
||||
* the "eeprom_addr".
|
||||
*
|
||||
* P.S. If you need to write, lock and read the Identification Page,
|
||||
* (M24M02-DR device only, which we do not use), change the "7" to
|
||||
* "0xF" in the macro below, and let the client set bit 20 to 1 in
|
||||
* "eeprom_addr", and set A10 to 0 to write into it, and A10 and A1 to
|
||||
* 1 to lock it permanently.
|
||||
*/
|
||||
#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 7))
|
||||
|
||||
static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
|
||||
u8 *eeprom_buf, u16 buf_size, bool read)
|
||||
{
|
||||
u8 eeprom_offset_buf[EEPROM_OFFSET_SIZE];
|
||||
struct i2c_msg msgs[] = {
|
||||
{
|
||||
.flags = 0,
|
||||
.len = EEPROM_OFFSET_SIZE,
|
||||
.buf = eeprom_offset_buf,
|
||||
},
|
||||
{
|
||||
.flags = read ? I2C_M_RD : 0,
|
||||
},
|
||||
};
|
||||
const u8 *p = eeprom_buf;
|
||||
int r;
|
||||
u16 len;
|
||||
|
||||
for (r = 0; buf_size > 0;
|
||||
buf_size -= len, eeprom_addr += len, eeprom_buf += len) {
|
||||
/* Set the EEPROM address we want to write to/read from.
|
||||
*/
|
||||
msgs[0].addr = MAKE_I2C_ADDR(eeprom_addr);
|
||||
msgs[1].addr = msgs[0].addr;
|
||||
msgs[0].buf[0] = (eeprom_addr >> 8) & 0xff;
|
||||
msgs[0].buf[1] = eeprom_addr & 0xff;
|
||||
|
||||
if (!read) {
|
||||
/* Write the maximum amount of data, without
|
||||
* crossing the device's page boundary, as per
|
||||
* its spec. Partial page writes are allowed,
|
||||
* starting at any location within the page,
|
||||
* so long as the page boundary isn't crossed
|
||||
* over (actually the page pointer rolls
|
||||
* over).
|
||||
*
|
||||
* As per the AT24CM02 EEPROM spec, after
|
||||
* writing into a page, the I2C driver should
|
||||
* terminate the transfer, i.e. in
|
||||
* "i2c_transfer()" below, with a STOP
|
||||
* condition, so that the self-timed write
|
||||
* cycle begins. This is implied for the
|
||||
* "i2c_transfer()" abstraction.
|
||||
*/
|
||||
len = min(EEPROM_PAGE_SIZE - (eeprom_addr &
|
||||
EEPROM_PAGE_MASK),
|
||||
(u32)buf_size);
|
||||
} else {
|
||||
/* Reading from the EEPROM has no limitation
|
||||
* on the number of bytes read from the EEPROM
|
||||
* device--they are simply sequenced out.
|
||||
*/
|
||||
len = buf_size;
|
||||
}
|
||||
msgs[1].len = len;
|
||||
msgs[1].buf = eeprom_buf;
|
||||
|
||||
/* This constitutes a START-STOP transaction.
|
||||
*/
|
||||
r = i2c_transfer(i2c_adap, msgs, ARRAY_SIZE(msgs));
|
||||
if (r != ARRAY_SIZE(msgs))
|
||||
break;
|
||||
|
||||
if (!read) {
|
||||
/* According to EEPROM specs the length of the
|
||||
* self-writing cycle, tWR (tW), is 10 ms.
|
||||
*
|
||||
* TODO: Use polling on ACK, aka Acknowledge
|
||||
* Polling, to minimize waiting for the
|
||||
* internal write cycle to complete, as it is
|
||||
* usually smaller than tWR (tW).
|
||||
*/
|
||||
msleep(10);
|
||||
}
|
||||
}
|
||||
|
||||
return r < 0 ? r : eeprom_buf - p;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_eeprom_xfer -- Read/write from/to an I2C EEPROM device
|
||||
* @i2c_adap: pointer to the I2C adapter to use
|
||||
* @eeprom_addr: EEPROM address from which to read/write
|
||||
* @eeprom_buf: pointer to data buffer to read into/write from
|
||||
* @buf_size: the size of @eeprom_buf
|
||||
* @read: True if reading from the EEPROM, false if writing
|
||||
*
|
||||
* Returns the number of bytes read/written; -errno on error.
|
||||
*/
|
||||
static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
|
||||
u8 *eeprom_buf, u16 buf_size, bool read)
|
||||
{
|
||||
const struct i2c_adapter_quirks *quirks = i2c_adap->quirks;
|
||||
u16 limit;
|
||||
|
||||
if (!quirks)
|
||||
limit = 0;
|
||||
else if (read)
|
||||
limit = quirks->max_read_len;
|
||||
else
|
||||
limit = quirks->max_write_len;
|
||||
|
||||
if (limit == 0) {
|
||||
return __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr,
|
||||
eeprom_buf, buf_size, read);
|
||||
} else if (limit <= EEPROM_OFFSET_SIZE) {
|
||||
dev_err_ratelimited(&i2c_adap->dev,
|
||||
"maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d",
|
||||
eeprom_addr, buf_size,
|
||||
read ? "read" : "write", EEPROM_OFFSET_SIZE);
|
||||
return -EINVAL;
|
||||
} else {
|
||||
u16 ps; /* Partial size */
|
||||
int res = 0, r;
|
||||
|
||||
/* The "limit" includes all data bytes sent/received,
|
||||
* which would include the EEPROM_OFFSET_SIZE bytes.
|
||||
* Account for them here.
|
||||
*/
|
||||
limit -= EEPROM_OFFSET_SIZE;
|
||||
for ( ; buf_size > 0;
|
||||
buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) {
|
||||
ps = min(limit, buf_size);
|
||||
|
||||
r = __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr,
|
||||
eeprom_buf, ps, read);
|
||||
if (r < 0)
|
||||
return r;
|
||||
res += r;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes)
|
||||
{
|
||||
return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
|
||||
true);
|
||||
}
|
||||
|
||||
int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes)
|
||||
{
|
||||
return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
|
||||
false);
|
||||
}
|
||||
37
drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h
Normal file
37
drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h
Normal file
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _AMDGPU_EEPROM_H
|
||||
#define _AMDGPU_EEPROM_H
|
||||
|
||||
#include <linux/i2c.h>
|
||||
|
||||
int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes);
|
||||
|
||||
int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes);
|
||||
|
||||
#endif
|
||||
@@ -273,9 +273,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (abo) {
|
||||
|
||||
}
|
||||
if (fb && ret) {
|
||||
drm_gem_object_put(gobj);
|
||||
drm_framebuffer_unregister_private(fb);
|
||||
@@ -344,7 +341,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
/* disable all the possible outputs/crtcs before entering KMS mode */
|
||||
if (!amdgpu_device_has_dc_support(adev))
|
||||
if (!amdgpu_device_has_dc_support(adev) && !amdgpu_virtual_display)
|
||||
drm_helper_disable_unused_functions(adev_to_drm(adev));
|
||||
|
||||
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
|
||||
|
||||
@@ -129,30 +129,50 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
|
||||
*
|
||||
* @ring: ring the fence is associated with
|
||||
* @f: resulting fence object
|
||||
* @job: job the fence is embedded in
|
||||
* @flags: flags to pass into the subordinate .emit_fence() call
|
||||
*
|
||||
* Emits a fence command on the requested ring (all asics).
|
||||
* Returns 0 on success, -ENOMEM on failure.
|
||||
*/
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
|
||||
unsigned flags)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_fence *fence;
|
||||
struct dma_fence *fence;
|
||||
struct amdgpu_fence *am_fence;
|
||||
struct dma_fence __rcu **ptr;
|
||||
uint32_t seq;
|
||||
int r;
|
||||
|
||||
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
|
||||
if (fence == NULL)
|
||||
return -ENOMEM;
|
||||
if (job == NULL) {
|
||||
/* create a sperate hw fence */
|
||||
am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
|
||||
if (am_fence == NULL)
|
||||
return -ENOMEM;
|
||||
fence = &am_fence->base;
|
||||
am_fence->ring = ring;
|
||||
} else {
|
||||
/* take use of job-embedded fence */
|
||||
fence = &job->hw_fence;
|
||||
}
|
||||
|
||||
seq = ++ring->fence_drv.sync_seq;
|
||||
fence->ring = ring;
|
||||
dma_fence_init(&fence->base, &amdgpu_fence_ops,
|
||||
&ring->fence_drv.lock,
|
||||
adev->fence_context + ring->idx,
|
||||
seq);
|
||||
if (job != NULL && job->job_run_counter) {
|
||||
/* reinit seq for resubmitted jobs */
|
||||
fence->seqno = seq;
|
||||
} else {
|
||||
dma_fence_init(fence, &amdgpu_fence_ops,
|
||||
&ring->fence_drv.lock,
|
||||
adev->fence_context + ring->idx,
|
||||
seq);
|
||||
}
|
||||
|
||||
if (job != NULL) {
|
||||
/* mark this fence has a parent job */
|
||||
set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
|
||||
}
|
||||
|
||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||
seq, flags | AMDGPU_FENCE_FLAG_INT);
|
||||
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
|
||||
@@ -175,9 +195,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
|
||||
/* This function can't be called concurrently anyway, otherwise
|
||||
* emitting the fence would mess up the hardware ring buffer.
|
||||
*/
|
||||
rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
|
||||
rcu_assign_pointer(*ptr, dma_fence_get(fence));
|
||||
|
||||
*f = &fence->base;
|
||||
*f = fence;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -417,9 +437,6 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
||||
}
|
||||
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
|
||||
|
||||
if (irq_src)
|
||||
amdgpu_irq_get(adev, irq_src, irq_type);
|
||||
|
||||
ring->fence_drv.irq_src = irq_src;
|
||||
ring->fence_drv.irq_type = irq_type;
|
||||
ring->fence_drv.initialized = true;
|
||||
@@ -490,7 +507,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
|
||||
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||
num_hw_submission, amdgpu_job_hang_limit,
|
||||
timeout, sched_score, ring->name);
|
||||
timeout, NULL, sched_score, ring->name);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
||||
ring->name);
|
||||
@@ -501,7 +518,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_init - init the fence driver
|
||||
* amdgpu_fence_driver_sw_init - init the fence driver
|
||||
* for all possible rings.
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
@@ -512,20 +529,20 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
* amdgpu_fence_driver_start_ring().
|
||||
* Returns 0 for success.
|
||||
*/
|
||||
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
|
||||
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_fini - tear down the fence driver
|
||||
* amdgpu_fence_driver_hw_fini - tear down the fence driver
|
||||
* for all possible rings.
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
*
|
||||
* Tear down the fence driver for all possible rings (all asics).
|
||||
*/
|
||||
void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
|
||||
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
@@ -534,8 +551,7 @@ void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
|
||||
|
||||
if (!ring || !ring->fence_drv.initialized)
|
||||
continue;
|
||||
if (!ring->no_scheduler)
|
||||
drm_sched_fini(&ring->sched);
|
||||
|
||||
/* You can't wait for HW to signal if it's gone */
|
||||
if (!drm_dev_is_unplugged(&adev->ddev))
|
||||
r = amdgpu_fence_wait_empty(ring);
|
||||
@@ -553,7 +569,7 @@ void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
|
||||
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned int i, j;
|
||||
|
||||
@@ -563,6 +579,9 @@ void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
|
||||
if (!ring || !ring->fence_drv.initialized)
|
||||
continue;
|
||||
|
||||
if (!ring->no_scheduler)
|
||||
drm_sched_fini(&ring->sched);
|
||||
|
||||
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
|
||||
dma_fence_put(ring->fence_drv.fences[j]);
|
||||
kfree(ring->fence_drv.fences);
|
||||
@@ -572,49 +591,18 @@ void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_suspend - suspend the fence driver
|
||||
* amdgpu_fence_driver_hw_init - enable the fence driver
|
||||
* for all possible rings.
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
*
|
||||
* Suspend the fence driver for all possible rings (all asics).
|
||||
*/
|
||||
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
if (!ring || !ring->fence_drv.initialized)
|
||||
continue;
|
||||
|
||||
/* wait for gpu to finish processing current batch */
|
||||
r = amdgpu_fence_wait_empty(ring);
|
||||
if (r) {
|
||||
/* delay GPU reset to resume */
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
}
|
||||
|
||||
/* disable the interrupt */
|
||||
if (ring->fence_drv.irq_src)
|
||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_resume - resume the fence driver
|
||||
* for all possible rings.
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
*
|
||||
* Resume the fence driver for all possible rings (all asics).
|
||||
* Enable the fence driver for all possible rings (all asics).
|
||||
* Not all asics have all rings, so each asic will only
|
||||
* start the fence driver on the rings it has using
|
||||
* amdgpu_fence_driver_start_ring().
|
||||
* Returns 0 for success.
|
||||
*/
|
||||
void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
|
||||
void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -653,8 +641,16 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
|
||||
|
||||
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
return (const char *)fence->ring->name;
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
|
||||
ring = to_amdgpu_ring(job->base.sched);
|
||||
} else {
|
||||
ring = to_amdgpu_fence(f)->ring;
|
||||
}
|
||||
return (const char *)ring->name;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -667,13 +663,20 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
|
||||
*/
|
||||
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
struct amdgpu_ring *ring = fence->ring;
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
|
||||
ring = to_amdgpu_ring(job->base.sched);
|
||||
} else {
|
||||
ring = to_amdgpu_fence(f)->ring;
|
||||
}
|
||||
|
||||
if (!timer_pending(&ring->fence_drv.fallback_timer))
|
||||
amdgpu_fence_schedule_fallback(ring);
|
||||
|
||||
DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
|
||||
DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -688,8 +691,20 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
|
||||
static void amdgpu_fence_free(struct rcu_head *rcu)
|
||||
{
|
||||
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
kmem_cache_free(amdgpu_fence_slab, fence);
|
||||
|
||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
||||
/* free job if fence has a parent job */
|
||||
struct amdgpu_job *job;
|
||||
|
||||
job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
kfree(job);
|
||||
} else {
|
||||
/* free fence_slab if it's separated fence*/
|
||||
struct amdgpu_fence *fence;
|
||||
|
||||
fence = to_amdgpu_fence(f);
|
||||
kmem_cache_free(amdgpu_fence_slab, fence);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -712,6 +727,7 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
|
||||
.release = amdgpu_fence_release,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Fence debugfs
|
||||
*/
|
||||
|
||||
@@ -27,10 +27,10 @@
|
||||
#include "smu_v11_0_i2c.h"
|
||||
#include "atom.h"
|
||||
#include "amdgpu_fru_eeprom.h"
|
||||
#include "amdgpu_eeprom.h"
|
||||
|
||||
#define I2C_PRODUCT_INFO_ADDR 0xAC
|
||||
#define I2C_PRODUCT_INFO_ADDR_SIZE 0x2
|
||||
#define I2C_PRODUCT_INFO_OFFSET 0xC0
|
||||
#define FRU_EEPROM_MADDR 0x60000
|
||||
#define I2C_PRODUCT_INFO_OFFSET 0xC0
|
||||
|
||||
static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
@@ -62,19 +62,11 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
|
||||
unsigned char *buff)
|
||||
unsigned char *buff)
|
||||
{
|
||||
int ret, size;
|
||||
struct i2c_msg msg = {
|
||||
.addr = I2C_PRODUCT_INFO_ADDR,
|
||||
.flags = I2C_M_RD,
|
||||
.buf = buff,
|
||||
};
|
||||
buff[0] = 0;
|
||||
buff[1] = addrptr;
|
||||
msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + 1;
|
||||
ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
|
||||
|
||||
ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr, buff, 1);
|
||||
if (ret < 1) {
|
||||
DRM_WARN("FRU: Failed to get size field");
|
||||
return ret;
|
||||
@@ -83,13 +75,9 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
|
||||
/* The size returned by the i2c requires subtraction of 0xC0 since the
|
||||
* size apparently always reports as 0xC0+actual size.
|
||||
*/
|
||||
size = buff[2] - I2C_PRODUCT_INFO_OFFSET;
|
||||
/* Add 1 since address field was 1 byte */
|
||||
buff[1] = addrptr + 1;
|
||||
|
||||
msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + size;
|
||||
ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
|
||||
size = buff[0] - I2C_PRODUCT_INFO_OFFSET;
|
||||
|
||||
ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr + 1, buff, size);
|
||||
if (ret < 1) {
|
||||
DRM_WARN("FRU: Failed to get data field");
|
||||
return ret;
|
||||
@@ -101,8 +89,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
|
||||
int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned char buff[34];
|
||||
int addrptr, size;
|
||||
int len;
|
||||
u32 addrptr;
|
||||
int size, len;
|
||||
|
||||
if (!is_fru_eeprom_supported(adev))
|
||||
return 0;
|
||||
@@ -125,7 +113,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
|
||||
* Bytes 8-a are all 1-byte and refer to the size of the entire struct,
|
||||
* and the language field, so just start from 0xb, manufacturer size
|
||||
*/
|
||||
addrptr = 0xb;
|
||||
addrptr = FRU_EEPROM_MADDR + 0xb;
|
||||
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
|
||||
if (size < 1) {
|
||||
DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
|
||||
|
||||
@@ -76,7 +76,7 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
|
||||
if (adev->dummy_page_addr)
|
||||
return 0;
|
||||
adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) {
|
||||
dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
|
||||
adev->dummy_page_addr = 0;
|
||||
@@ -96,8 +96,8 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (!adev->dummy_page_addr)
|
||||
return;
|
||||
pci_unmap_page(adev->pdev, adev->dummy_page_addr,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
adev->dummy_page_addr = 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -621,7 +621,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
||||
|
||||
if (operation == AMDGPU_VA_OP_MAP ||
|
||||
operation == AMDGPU_VA_OP_REPLACE) {
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
|
||||
if (r)
|
||||
goto error;
|
||||
}
|
||||
@@ -838,7 +838,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
||||
break;
|
||||
}
|
||||
case AMDGPU_GEM_OP_SET_PLACEMENT:
|
||||
if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
|
||||
if (robj->tbo.base.import_attach &&
|
||||
args->value & AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
r = -EINVAL;
|
||||
amdgpu_bo_unreserve(robj);
|
||||
break;
|
||||
@@ -903,7 +904,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
||||
DIV_ROUND_UP(args->bpp, 8), 0);
|
||||
args->size = (u64)args->pitch * args->height;
|
||||
args->size = ALIGN(args->size, PAGE_SIZE);
|
||||
domain = amdgpu_bo_get_preferred_pin_domain(adev,
|
||||
domain = amdgpu_bo_get_preferred_domain(adev,
|
||||
amdgpu_display_supported_domains(adev, flags));
|
||||
r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
|
||||
ttm_bo_type_device, NULL, &gobj);
|
||||
|
||||
@@ -629,7 +629,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
|
||||
adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
|
||||
adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->gfx.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->gfx.ras_if->name, "gfx");
|
||||
}
|
||||
fs_info.head = ih_info.head = *adev->gfx.ras_if;
|
||||
r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
|
||||
|
||||
@@ -471,6 +471,27 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->mca.mp0.ras_funcs &&
|
||||
adev->mca.mp0.ras_funcs->ras_late_init) {
|
||||
r = adev->mca.mp0.ras_funcs->ras_late_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->mca.mp1.ras_funcs &&
|
||||
adev->mca.mp1.ras_funcs->ras_late_init) {
|
||||
r = adev->mca.mp1.ras_funcs->ras_late_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->mca.mpio.ras_funcs &&
|
||||
adev->mca.mpio.ras_funcs->ras_late_init) {
|
||||
r = adev->mca.mpio.ras_funcs->ras_late_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -132,14 +132,11 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
|
||||
struct amdgpu_gtt_node *node;
|
||||
int r;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT &&
|
||||
atomic64_read(&mgr->available) < num_pages) {
|
||||
spin_unlock(&mgr->lock);
|
||||
if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
|
||||
atomic64_add_return(num_pages, &mgr->used) > man->size) {
|
||||
atomic64_sub(num_pages, &mgr->used);
|
||||
return -ENOSPC;
|
||||
}
|
||||
atomic64_sub(num_pages, &mgr->available);
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
|
||||
if (!node) {
|
||||
@@ -175,7 +172,8 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
|
||||
kfree(node);
|
||||
|
||||
err_out:
|
||||
atomic64_add(num_pages, &mgr->available);
|
||||
if (!(place->flags & TTM_PL_FLAG_TEMPORARY))
|
||||
atomic64_sub(num_pages, &mgr->used);
|
||||
|
||||
return r;
|
||||
}
|
||||
@@ -198,7 +196,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
|
||||
if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
|
||||
drm_mm_remove_node(&node->base.mm_nodes[0]);
|
||||
spin_unlock(&mgr->lock);
|
||||
atomic64_add(res->num_pages, &mgr->available);
|
||||
|
||||
if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
|
||||
atomic64_sub(res->num_pages, &mgr->used);
|
||||
|
||||
kfree(node);
|
||||
}
|
||||
@@ -213,9 +213,8 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
|
||||
uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
|
||||
{
|
||||
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
|
||||
s64 result = man->size - atomic64_read(&mgr->available);
|
||||
|
||||
return (result > 0 ? result : 0) * PAGE_SIZE;
|
||||
return atomic64_read(&mgr->used) * PAGE_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -265,9 +264,8 @@ static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man,
|
||||
drm_mm_print(&mgr->mm, printer);
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
|
||||
man->size, (u64)atomic64_read(&mgr->available),
|
||||
amdgpu_gtt_mgr_usage(man) >> 20);
|
||||
drm_printf(printer, "man size:%llu pages, gtt used:%llu pages\n",
|
||||
man->size, atomic64_read(&mgr->used));
|
||||
}
|
||||
|
||||
static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = {
|
||||
@@ -299,7 +297,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
|
||||
size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
|
||||
drm_mm_init(&mgr->mm, start, size);
|
||||
spin_lock_init(&mgr->lock);
|
||||
atomic64_set(&mgr->available, gtt_size >> PAGE_SHIFT);
|
||||
atomic64_set(&mgr->used, 0);
|
||||
|
||||
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager);
|
||||
ttm_resource_manager_set_used(man, true);
|
||||
|
||||
@@ -41,7 +41,6 @@ int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev)
|
||||
adev->hdp.ras_if->block = AMDGPU_RAS_BLOCK__HDP;
|
||||
adev->hdp.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->hdp.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->hdp.ras_if->name, "hdp");
|
||||
}
|
||||
ih_info.head = fs_info.head = *adev->hdp.ras_if;
|
||||
r = amdgpu_ras_late_init(adev, adev->hdp.ras_if,
|
||||
|
||||
@@ -339,7 +339,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
|
||||
void
|
||||
amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
|
||||
{
|
||||
u8 val;
|
||||
u8 val = 0;
|
||||
|
||||
if (!amdgpu_connector->router.ddc_valid)
|
||||
return;
|
||||
|
||||
@@ -262,7 +262,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
|
||||
}
|
||||
|
||||
r = amdgpu_fence_emit(ring, f, fence_flags);
|
||||
r = amdgpu_fence_emit(ring, f, job, fence_flags);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
||||
if (job && job->vmid)
|
||||
|
||||
@@ -46,7 +46,6 @@
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_irq.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <drm/drm_drv.h>
|
||||
@@ -184,7 +183,7 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
|
||||
* Returns:
|
||||
* result of handling the IRQ, as defined by &irqreturn_t
|
||||
*/
|
||||
irqreturn_t amdgpu_irq_handler(int irq, void *arg)
|
||||
static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
@@ -307,6 +306,7 @@ static void amdgpu_restore_msix(struct amdgpu_device *adev)
|
||||
int amdgpu_irq_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r = 0;
|
||||
unsigned int irq;
|
||||
|
||||
spin_lock_init(&adev->irq.lock);
|
||||
|
||||
@@ -349,15 +349,22 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
||||
INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
|
||||
INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
|
||||
|
||||
adev->irq.installed = true;
|
||||
/* Use vector 0 for MSI-X */
|
||||
r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0));
|
||||
/* Use vector 0 for MSI-X. */
|
||||
r = pci_irq_vector(adev->pdev, 0);
|
||||
if (r < 0)
|
||||
return r;
|
||||
irq = r;
|
||||
|
||||
/* PCI devices require shared interrupts. */
|
||||
r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
|
||||
adev_to_drm(adev));
|
||||
if (r) {
|
||||
adev->irq.installed = false;
|
||||
if (!amdgpu_device_has_dc_support(adev))
|
||||
flush_work(&adev->hotplug_work);
|
||||
return r;
|
||||
}
|
||||
adev->irq.installed = true;
|
||||
adev->irq.irq = irq;
|
||||
adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
|
||||
|
||||
DRM_DEBUG("amdgpu: irq initialized.\n");
|
||||
@@ -368,7 +375,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
||||
void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->irq.installed) {
|
||||
drm_irq_uninstall(&adev->ddev);
|
||||
free_irq(adev->irq.irq, adev_to_drm(adev));
|
||||
adev->irq.installed = false;
|
||||
if (adev->irq.msi_enabled)
|
||||
pci_free_irq_vectors(adev->pdev);
|
||||
@@ -584,7 +591,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j, k;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
|
||||
amdgpu_restore_msix(adev);
|
||||
|
||||
for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
|
||||
@@ -617,7 +624,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
|
||||
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
unsigned type)
|
||||
{
|
||||
if (!adev_to_drm(adev)->irq_enabled)
|
||||
if (!adev->irq.installed)
|
||||
return -ENOENT;
|
||||
|
||||
if (type >= src->num_types)
|
||||
@@ -647,7 +654,7 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
unsigned type)
|
||||
{
|
||||
if (!adev_to_drm(adev)->irq_enabled)
|
||||
if (!adev->irq.installed)
|
||||
return -ENOENT;
|
||||
|
||||
if (type >= src->num_types)
|
||||
@@ -678,7 +685,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
unsigned type)
|
||||
{
|
||||
if (!adev_to_drm(adev)->irq_enabled)
|
||||
if (!adev->irq.installed)
|
||||
return false;
|
||||
|
||||
if (type >= src->num_types)
|
||||
|
||||
@@ -80,6 +80,7 @@ struct amdgpu_irq_src_funcs {
|
||||
|
||||
struct amdgpu_irq {
|
||||
bool installed;
|
||||
unsigned int irq;
|
||||
spinlock_t lock;
|
||||
/* interrupt sources */
|
||||
struct amdgpu_irq_client client[AMDGPU_IRQ_CLIENTID_MAX];
|
||||
@@ -100,7 +101,6 @@ struct amdgpu_irq {
|
||||
};
|
||||
|
||||
void amdgpu_irq_disable_all(struct amdgpu_device *adev);
|
||||
irqreturn_t amdgpu_irq_handler(int irq, void *arg);
|
||||
|
||||
int amdgpu_irq_init(struct amdgpu_device *adev);
|
||||
void amdgpu_irq_fini_sw(struct amdgpu_device *adev);
|
||||
|
||||
@@ -127,11 +127,16 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
|
||||
struct dma_fence *f;
|
||||
struct dma_fence *hw_fence;
|
||||
unsigned i;
|
||||
|
||||
/* use sched fence if available */
|
||||
f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
|
||||
if (job->hw_fence.ops == NULL)
|
||||
hw_fence = job->external_hw_fence;
|
||||
else
|
||||
hw_fence = &job->hw_fence;
|
||||
|
||||
/* use sched fence if available */
|
||||
f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence;
|
||||
for (i = 0; i < job->num_ibs; ++i)
|
||||
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
|
||||
}
|
||||
@@ -142,20 +147,27 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
|
||||
|
||||
drm_sched_job_cleanup(s_job);
|
||||
|
||||
dma_fence_put(job->fence);
|
||||
amdgpu_sync_free(&job->sync);
|
||||
amdgpu_sync_free(&job->sched_sync);
|
||||
kfree(job);
|
||||
|
||||
/* only put the hw fence if has embedded fence */
|
||||
if (job->hw_fence.ops != NULL)
|
||||
dma_fence_put(&job->hw_fence);
|
||||
else
|
||||
kfree(job);
|
||||
}
|
||||
|
||||
void amdgpu_job_free(struct amdgpu_job *job)
|
||||
{
|
||||
amdgpu_job_free_resources(job);
|
||||
|
||||
dma_fence_put(job->fence);
|
||||
amdgpu_sync_free(&job->sync);
|
||||
amdgpu_sync_free(&job->sched_sync);
|
||||
kfree(job);
|
||||
|
||||
/* only put the hw fence if has embedded fence */
|
||||
if (job->hw_fence.ops != NULL)
|
||||
dma_fence_put(&job->hw_fence);
|
||||
else
|
||||
kfree(job);
|
||||
}
|
||||
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
@@ -184,11 +196,14 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
|
||||
job->base.sched = &ring->sched;
|
||||
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
|
||||
job->fence = dma_fence_get(*fence);
|
||||
/* record external_hw_fence for direct submit */
|
||||
job->external_hw_fence = dma_fence_get(*fence);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
dma_fence_put(*fence);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -246,10 +261,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
||||
if (r)
|
||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
}
|
||||
/* if gpu reset, hw fence will be replaced here */
|
||||
dma_fence_put(job->fence);
|
||||
job->fence = dma_fence_get(fence);
|
||||
|
||||
if (!job->job_run_counter)
|
||||
dma_fence_get(fence);
|
||||
else if (finished->error < 0)
|
||||
dma_fence_put(&job->hw_fence);
|
||||
job->job_run_counter++;
|
||||
amdgpu_job_free_resources(job);
|
||||
|
||||
fence = r ? ERR_PTR(r) : fence;
|
||||
|
||||
@@ -46,7 +46,8 @@ struct amdgpu_job {
|
||||
struct amdgpu_sync sync;
|
||||
struct amdgpu_sync sched_sync;
|
||||
struct amdgpu_ib *ibs;
|
||||
struct dma_fence *fence; /* the hw fence */
|
||||
struct dma_fence hw_fence;
|
||||
struct dma_fence *external_hw_fence;
|
||||
uint32_t preamble_status;
|
||||
uint32_t preemption_status;
|
||||
uint32_t num_ibs;
|
||||
@@ -62,6 +63,9 @@ struct amdgpu_job {
|
||||
/* user fence handling */
|
||||
uint64_t uf_addr;
|
||||
uint64_t uf_sequence;
|
||||
|
||||
/* job_run_counter >= 1 means a resubmit job */
|
||||
uint32_t job_run_counter;
|
||||
};
|
||||
|
||||
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
|
||||
@@ -47,8 +47,6 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&adev->jpeg.idle_work);
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
@@ -341,27 +341,27 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
||||
switch (query_fw->index) {
|
||||
case TA_FW_TYPE_PSP_XGMI:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_xgmi_ucode_version;
|
||||
fw_info->feature = adev->psp.xgmi.feature_version;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_RAS:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_ras_ucode_version;
|
||||
fw_info->feature = adev->psp.ras.feature_version;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_HDCP:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_hdcp_ucode_version;
|
||||
fw_info->feature = adev->psp.hdcp.feature_version;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_DTM:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_dtm_ucode_version;
|
||||
fw_info->feature = adev->psp.dtm.feature_version;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_RAP:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_rap_ucode_version;
|
||||
fw_info->feature = adev->psp.rap.feature_version;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_SECUREDISPLAY:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_securedisplay_ucode_version;
|
||||
fw_info->feature = adev->psp.securedisplay.feature_version;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@@ -374,12 +374,12 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
||||
fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_SOS:
|
||||
fw_info->ver = adev->psp.sos_fw_version;
|
||||
fw_info->feature = adev->psp.sos_feature_version;
|
||||
fw_info->ver = adev->psp.sos.fw_version;
|
||||
fw_info->feature = adev->psp.sos.feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_ASD:
|
||||
fw_info->ver = adev->psp.asd_fw_version;
|
||||
fw_info->feature = adev->psp.asd_feature_version;
|
||||
fw_info->ver = adev->psp.asd.fw_version;
|
||||
fw_info->feature = adev->psp.asd.feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_DMCU:
|
||||
fw_info->ver = adev->dm.dmcu_fw_version;
|
||||
@@ -390,8 +390,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
||||
fw_info->feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_TOC:
|
||||
fw_info->ver = adev->psp.toc_fw_version;
|
||||
fw_info->feature = adev->psp.toc_feature_version;
|
||||
fw_info->ver = adev->psp.toc.fw_version;
|
||||
fw_info->feature = adev->psp.toc.feature_version;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@@ -1179,10 +1179,14 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||
pasid = 0;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_init(adev, &fpriv->vm, pasid);
|
||||
r = amdgpu_vm_init(adev, &fpriv->vm);
|
||||
if (r)
|
||||
goto error_pasid;
|
||||
|
||||
r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
|
||||
if (r)
|
||||
goto error_vm;
|
||||
|
||||
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
|
||||
if (!fpriv->prt_va) {
|
||||
r = -ENOMEM;
|
||||
@@ -1210,8 +1214,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||
amdgpu_vm_fini(adev, &fpriv->vm);
|
||||
|
||||
error_pasid:
|
||||
if (pasid)
|
||||
if (pasid) {
|
||||
amdgpu_pasid_free(pasid);
|
||||
amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
|
||||
}
|
||||
|
||||
kfree(fpriv);
|
||||
|
||||
|
||||
117
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
Normal file
117
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
Normal file
@@ -0,0 +1,117 @@
|
||||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "amdgpu_ras.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_mca.h"
|
||||
|
||||
#include "umc/umc_6_7_0_offset.h"
|
||||
#include "umc/umc_6_7_0_sh_mask.h"
|
||||
|
||||
void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr,
|
||||
unsigned long *error_count)
|
||||
{
|
||||
uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
|
||||
|
||||
if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
||||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
|
||||
*error_count += 1;
|
||||
}
|
||||
|
||||
void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr,
|
||||
unsigned long *error_count)
|
||||
{
|
||||
uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
|
||||
|
||||
if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
|
||||
(REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
|
||||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
|
||||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
|
||||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
|
||||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
|
||||
*error_count += 1;
|
||||
}
|
||||
|
||||
void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr)
|
||||
{
|
||||
WREG64_PCIE(mc_status_addr * 4, 0x0ULL);
|
||||
}
|
||||
|
||||
void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr,
|
||||
void *ras_error_status)
|
||||
{
|
||||
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
|
||||
|
||||
amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count));
|
||||
amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count));
|
||||
|
||||
amdgpu_mca_reset_error_count(adev, mc_status_addr);
|
||||
}
|
||||
|
||||
int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
|
||||
struct amdgpu_mca_ras *mca_dev)
|
||||
{
|
||||
int r;
|
||||
struct ras_ih_if ih_info = {
|
||||
.cb = NULL,
|
||||
};
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = mca_dev->ras_funcs->sysfs_name,
|
||||
};
|
||||
|
||||
if (!mca_dev->ras_if) {
|
||||
mca_dev->ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
|
||||
if (!mca_dev->ras_if)
|
||||
return -ENOMEM;
|
||||
mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block;
|
||||
mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
mca_dev->ras_if->sub_block_index = 0;
|
||||
}
|
||||
ih_info.head = fs_info.head = *mca_dev->ras_if;
|
||||
r = amdgpu_ras_late_init(adev, mca_dev->ras_if,
|
||||
&fs_info, &ih_info);
|
||||
if (r || !amdgpu_ras_is_supported(adev, mca_dev->ras_if->block)) {
|
||||
kfree(mca_dev->ras_if);
|
||||
mca_dev->ras_if = NULL;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
void amdgpu_mca_ras_fini(struct amdgpu_device *adev,
|
||||
struct amdgpu_mca_ras *mca_dev)
|
||||
{
|
||||
struct ras_ih_if ih_info = {
|
||||
.cb = NULL,
|
||||
};
|
||||
|
||||
if (!mca_dev->ras_if)
|
||||
return;
|
||||
|
||||
amdgpu_ras_late_fini(adev, mca_dev->ras_if, &ih_info);
|
||||
kfree(mca_dev->ras_if);
|
||||
mca_dev->ras_if = NULL;
|
||||
}
|
||||
72
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
Normal file
72
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
Normal file
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef __AMDGPU_MCA_H__
|
||||
#define __AMDGPU_MCA_H__
|
||||
|
||||
struct amdgpu_mca_ras_funcs {
|
||||
int (*ras_late_init)(struct amdgpu_device *adev);
|
||||
void (*ras_fini)(struct amdgpu_device *adev);
|
||||
void (*query_ras_error_count)(struct amdgpu_device *adev,
|
||||
void *ras_error_status);
|
||||
void (*query_ras_error_address)(struct amdgpu_device *adev,
|
||||
void *ras_error_status);
|
||||
uint32_t ras_block;
|
||||
const char* sysfs_name;
|
||||
};
|
||||
|
||||
struct amdgpu_mca_ras {
|
||||
struct ras_common_if *ras_if;
|
||||
const struct amdgpu_mca_ras_funcs *ras_funcs;
|
||||
};
|
||||
|
||||
struct amdgpu_mca_funcs {
|
||||
void (*init)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
struct amdgpu_mca {
|
||||
const struct amdgpu_mca_funcs *funcs;
|
||||
struct amdgpu_mca_ras mp0;
|
||||
struct amdgpu_mca_ras mp1;
|
||||
struct amdgpu_mca_ras mpio;
|
||||
};
|
||||
|
||||
void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr,
|
||||
unsigned long *error_count);
|
||||
|
||||
void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr,
|
||||
unsigned long *error_count);
|
||||
|
||||
void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr);
|
||||
|
||||
void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr,
|
||||
void *ras_error_status);
|
||||
|
||||
int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
|
||||
struct amdgpu_mca_ras *mca_dev);
|
||||
|
||||
void amdgpu_mca_ras_fini(struct amdgpu_device *adev,
|
||||
struct amdgpu_mca_ras *mca_dev);
|
||||
|
||||
#endif
|
||||
@@ -41,7 +41,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
|
||||
adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB;
|
||||
adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->mmhub.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->mmhub.ras_if->name, "mmhub");
|
||||
}
|
||||
ih_info.head = fs_info.head = *adev->mmhub.ras_if;
|
||||
r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if,
|
||||
|
||||
@@ -39,7 +39,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
|
||||
adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF;
|
||||
adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->nbio.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->nbio.ras_if->name, "pcie_bif");
|
||||
}
|
||||
ih_info.head = fs_info.head = *adev->nbio.ras_if;
|
||||
r = amdgpu_ras_late_init(adev, adev->nbio.ras_if,
|
||||
|
||||
@@ -95,6 +95,7 @@ struct amdgpu_nbio_funcs {
|
||||
void (*program_aspm)(struct amdgpu_device *adev);
|
||||
void (*apply_lc_spc_mode_wa)(struct amdgpu_device *adev);
|
||||
void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev);
|
||||
void (*clear_doorbell_interrupt)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
struct amdgpu_nbio {
|
||||
|
||||
@@ -196,7 +196,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
||||
c++;
|
||||
}
|
||||
|
||||
BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
|
||||
BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
|
||||
|
||||
placement->num_placement = c;
|
||||
placement->placement = places;
|
||||
@@ -731,7 +731,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
|
||||
/**
|
||||
* amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
|
||||
*
|
||||
* @bo: BO that will be inserted into the shadow list
|
||||
* @vmbo: BO that will be inserted into the shadow list
|
||||
*
|
||||
* Insert a BO to the shadow list.
|
||||
*/
|
||||
@@ -913,7 +913,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
return -EINVAL;
|
||||
|
||||
/* A shared bo cannot be migrated to VRAM */
|
||||
if (bo->prime_shared_count || bo->tbo.base.import_attach) {
|
||||
if (bo->tbo.base.import_attach) {
|
||||
if (domain & AMDGPU_GEM_DOMAIN_GTT)
|
||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
else
|
||||
@@ -947,7 +947,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
/* This assumes only APU display buffers are pinned with (VRAM|GTT).
|
||||
* See function amdgpu_display_supported_domains()
|
||||
*/
|
||||
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
|
||||
domain = amdgpu_bo_get_preferred_domain(adev, domain);
|
||||
|
||||
if (bo->tbo.base.import_attach)
|
||||
dma_buf_pin(bo->tbo.base.import_attach);
|
||||
@@ -1518,14 +1518,14 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
|
||||
* amdgpu_bo_get_preferred_domain - get preferred domain
|
||||
* @adev: amdgpu device object
|
||||
* @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
|
||||
*
|
||||
* Returns:
|
||||
* Which of the allowed domains is preferred for pinning the BO for scanout.
|
||||
* Which of the allowed domains is preferred for allocating the BO.
|
||||
*/
|
||||
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
|
||||
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
|
||||
uint32_t domain)
|
||||
{
|
||||
if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
|
||||
|
||||
@@ -100,7 +100,6 @@ struct amdgpu_bo {
|
||||
struct ttm_buffer_object tbo;
|
||||
struct ttm_bo_kmap_obj kmap;
|
||||
u64 flags;
|
||||
unsigned prime_shared_count;
|
||||
/* per VM structure for page tables and with virtual addresses */
|
||||
struct amdgpu_vm_bo_base *vm_bo;
|
||||
/* Constant after initialization */
|
||||
@@ -334,7 +333,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
|
||||
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
|
||||
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
|
||||
struct dma_fence **fence);
|
||||
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
|
||||
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
|
||||
uint32_t domain);
|
||||
|
||||
/*
|
||||
|
||||
@@ -80,12 +80,17 @@ static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
|
||||
* Calculate feedback and reference divider for a given post divider. Makes
|
||||
* sure we stay within the limits.
|
||||
*/
|
||||
static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
|
||||
unsigned fb_div_max, unsigned ref_div_max,
|
||||
unsigned *fb_div, unsigned *ref_div)
|
||||
static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int nom,
|
||||
unsigned int den, unsigned int post_div,
|
||||
unsigned int fb_div_max, unsigned int ref_div_max,
|
||||
unsigned int *fb_div, unsigned int *ref_div)
|
||||
{
|
||||
|
||||
/* limit reference * post divider to a maximum */
|
||||
ref_div_max = min(128 / post_div, ref_div_max);
|
||||
if (adev->family == AMDGPU_FAMILY_SI)
|
||||
ref_div_max = min(100 / post_div, ref_div_max);
|
||||
else
|
||||
ref_div_max = min(128 / post_div, ref_div_max);
|
||||
|
||||
/* get matching reference and feedback divider */
|
||||
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
|
||||
@@ -112,7 +117,8 @@ static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_
|
||||
* Try to calculate the PLL parameters to generate the given frequency:
|
||||
* dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
|
||||
*/
|
||||
void amdgpu_pll_compute(struct amdgpu_pll *pll,
|
||||
void amdgpu_pll_compute(struct amdgpu_device *adev,
|
||||
struct amdgpu_pll *pll,
|
||||
u32 freq,
|
||||
u32 *dot_clock_p,
|
||||
u32 *fb_div_p,
|
||||
@@ -199,7 +205,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll,
|
||||
|
||||
for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
|
||||
unsigned diff;
|
||||
amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
|
||||
amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max,
|
||||
ref_div_max, &fb_div, &ref_div);
|
||||
diff = abs(target_clock - (pll->reference_freq * fb_div) /
|
||||
(ref_div * post_div));
|
||||
@@ -214,7 +220,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll,
|
||||
post_div = post_div_best;
|
||||
|
||||
/* get the feedback and reference divider for the optimal value */
|
||||
amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
|
||||
amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max, ref_div_max,
|
||||
&fb_div, &ref_div);
|
||||
|
||||
/* reduce the numbers to a simpler ratio once more */
|
||||
|
||||
@@ -24,7 +24,8 @@
|
||||
#ifndef __AMDGPU_PLL_H__
|
||||
#define __AMDGPU_PLL_H__
|
||||
|
||||
void amdgpu_pll_compute(struct amdgpu_pll *pll,
|
||||
void amdgpu_pll_compute(struct amdgpu_device *adev,
|
||||
struct amdgpu_pll *pll,
|
||||
u32 freq,
|
||||
u32 *dot_clock_p,
|
||||
u32 *fb_div_p,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -48,11 +48,15 @@
|
||||
struct psp_context;
|
||||
struct psp_xgmi_node_info;
|
||||
struct psp_xgmi_topology_info;
|
||||
struct psp_bin_desc;
|
||||
|
||||
enum psp_bootloader_cmd {
|
||||
PSP_BL__LOAD_SYSDRV = 0x10000,
|
||||
PSP_BL__LOAD_SOSDRV = 0x20000,
|
||||
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
|
||||
PSP_BL__LOAD_SOCDRV = 0xB0000,
|
||||
PSP_BL__LOAD_INTFDRV = 0xC0000,
|
||||
PSP_BL__LOAD_DBGDRV = 0xD0000,
|
||||
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
|
||||
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
|
||||
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
|
||||
@@ -93,6 +97,9 @@ struct psp_funcs
|
||||
int (*bootloader_load_kdb)(struct psp_context *psp);
|
||||
int (*bootloader_load_spl)(struct psp_context *psp);
|
||||
int (*bootloader_load_sysdrv)(struct psp_context *psp);
|
||||
int (*bootloader_load_soc_drv)(struct psp_context *psp);
|
||||
int (*bootloader_load_intf_drv)(struct psp_context *psp);
|
||||
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
|
||||
int (*bootloader_load_sos)(struct psp_context *psp);
|
||||
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
|
||||
int (*ring_create)(struct psp_context *psp,
|
||||
@@ -106,7 +113,7 @@ struct psp_funcs
|
||||
int (*mem_training)(struct psp_context *psp, uint32_t ops);
|
||||
uint32_t (*ring_get_wptr)(struct psp_context *psp);
|
||||
void (*ring_set_wptr)(struct psp_context *psp, uint32_t value);
|
||||
int (*load_usbc_pd_fw)(struct psp_context *psp, dma_addr_t dma_addr);
|
||||
int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
|
||||
int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
|
||||
};
|
||||
|
||||
@@ -116,6 +123,7 @@ struct psp_xgmi_node_info {
|
||||
uint8_t num_hops;
|
||||
uint8_t is_sharing_enabled;
|
||||
enum ta_xgmi_assigned_sdma_engine sdma_engine;
|
||||
uint8_t num_links;
|
||||
};
|
||||
|
||||
struct psp_xgmi_topology_info {
|
||||
@@ -128,59 +136,32 @@ struct psp_asd_context {
|
||||
uint32_t session_id;
|
||||
};
|
||||
|
||||
struct ta_mem_context {
|
||||
struct amdgpu_bo *shared_bo;
|
||||
uint64_t shared_mc_addr;
|
||||
void *shared_buf;
|
||||
};
|
||||
|
||||
struct ta_context {
|
||||
bool initialized;
|
||||
uint32_t session_id;
|
||||
struct ta_mem_context mem_context;
|
||||
};
|
||||
|
||||
struct ta_cp_context {
|
||||
struct ta_context context;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
struct psp_xgmi_context {
|
||||
uint8_t initialized;
|
||||
uint32_t session_id;
|
||||
struct amdgpu_bo *xgmi_shared_bo;
|
||||
uint64_t xgmi_shared_mc_addr;
|
||||
void *xgmi_shared_buf;
|
||||
struct ta_context context;
|
||||
struct psp_xgmi_topology_info top_info;
|
||||
bool supports_extended_data;
|
||||
};
|
||||
|
||||
struct psp_ras_context {
|
||||
/*ras fw*/
|
||||
bool ras_initialized;
|
||||
uint32_t session_id;
|
||||
struct amdgpu_bo *ras_shared_bo;
|
||||
uint64_t ras_shared_mc_addr;
|
||||
void *ras_shared_buf;
|
||||
struct amdgpu_ras *ras;
|
||||
};
|
||||
|
||||
struct psp_hdcp_context {
|
||||
bool hdcp_initialized;
|
||||
uint32_t session_id;
|
||||
struct amdgpu_bo *hdcp_shared_bo;
|
||||
uint64_t hdcp_shared_mc_addr;
|
||||
void *hdcp_shared_buf;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
struct psp_dtm_context {
|
||||
bool dtm_initialized;
|
||||
uint32_t session_id;
|
||||
struct amdgpu_bo *dtm_shared_bo;
|
||||
uint64_t dtm_shared_mc_addr;
|
||||
void *dtm_shared_buf;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
struct psp_rap_context {
|
||||
bool rap_initialized;
|
||||
uint32_t session_id;
|
||||
struct amdgpu_bo *rap_shared_bo;
|
||||
uint64_t rap_shared_mc_addr;
|
||||
void *rap_shared_buf;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
struct psp_securedisplay_context {
|
||||
bool securedisplay_initialized;
|
||||
uint32_t session_id;
|
||||
struct amdgpu_bo *securedisplay_shared_bo;
|
||||
uint64_t securedisplay_shared_mc_addr;
|
||||
void *securedisplay_shared_buf;
|
||||
struct mutex mutex;
|
||||
struct ta_context context;
|
||||
struct amdgpu_ras *ras;
|
||||
};
|
||||
|
||||
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
|
||||
@@ -282,6 +263,13 @@ struct psp_runtime_boot_cfg_entry {
|
||||
uint32_t reserved;
|
||||
};
|
||||
|
||||
struct psp_bin_desc {
|
||||
uint32_t fw_version;
|
||||
uint32_t feature_version;
|
||||
uint32_t size_bytes;
|
||||
uint8_t *start_addr;
|
||||
};
|
||||
|
||||
struct psp_context
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
@@ -297,36 +285,26 @@ struct psp_context
|
||||
|
||||
/* sos firmware */
|
||||
const struct firmware *sos_fw;
|
||||
uint32_t sos_fw_version;
|
||||
uint32_t sos_feature_version;
|
||||
uint32_t sys_bin_size;
|
||||
uint32_t sos_bin_size;
|
||||
uint32_t toc_bin_size;
|
||||
uint32_t kdb_bin_size;
|
||||
uint32_t spl_bin_size;
|
||||
uint32_t rl_bin_size;
|
||||
uint8_t *sys_start_addr;
|
||||
uint8_t *sos_start_addr;
|
||||
uint8_t *toc_start_addr;
|
||||
uint8_t *kdb_start_addr;
|
||||
uint8_t *spl_start_addr;
|
||||
uint8_t *rl_start_addr;
|
||||
struct psp_bin_desc sys;
|
||||
struct psp_bin_desc sos;
|
||||
struct psp_bin_desc toc;
|
||||
struct psp_bin_desc kdb;
|
||||
struct psp_bin_desc spl;
|
||||
struct psp_bin_desc rl;
|
||||
struct psp_bin_desc soc_drv;
|
||||
struct psp_bin_desc intf_drv;
|
||||
struct psp_bin_desc dbg_drv;
|
||||
|
||||
/* tmr buffer */
|
||||
struct amdgpu_bo *tmr_bo;
|
||||
uint64_t tmr_mc_addr;
|
||||
|
||||
/* asd firmware */
|
||||
const struct firmware *asd_fw;
|
||||
uint32_t asd_fw_version;
|
||||
uint32_t asd_feature_version;
|
||||
uint32_t asd_ucode_size;
|
||||
uint8_t *asd_start_addr;
|
||||
const struct firmware *asd_fw;
|
||||
struct psp_bin_desc asd;
|
||||
|
||||
/* toc firmware */
|
||||
const struct firmware *toc_fw;
|
||||
uint32_t toc_fw_version;
|
||||
uint32_t toc_feature_version;
|
||||
|
||||
/* fence buffer */
|
||||
struct amdgpu_bo *fence_buf_bo;
|
||||
@@ -348,36 +326,20 @@ struct psp_context
|
||||
/* xgmi ta firmware and buffer */
|
||||
const struct firmware *ta_fw;
|
||||
uint32_t ta_fw_version;
|
||||
uint32_t ta_xgmi_ucode_version;
|
||||
uint32_t ta_xgmi_ucode_size;
|
||||
uint8_t *ta_xgmi_start_addr;
|
||||
uint32_t ta_ras_ucode_version;
|
||||
uint32_t ta_ras_ucode_size;
|
||||
uint8_t *ta_ras_start_addr;
|
||||
|
||||
uint32_t ta_hdcp_ucode_version;
|
||||
uint32_t ta_hdcp_ucode_size;
|
||||
uint8_t *ta_hdcp_start_addr;
|
||||
|
||||
uint32_t ta_dtm_ucode_version;
|
||||
uint32_t ta_dtm_ucode_size;
|
||||
uint8_t *ta_dtm_start_addr;
|
||||
|
||||
uint32_t ta_rap_ucode_version;
|
||||
uint32_t ta_rap_ucode_size;
|
||||
uint8_t *ta_rap_start_addr;
|
||||
|
||||
uint32_t ta_securedisplay_ucode_version;
|
||||
uint32_t ta_securedisplay_ucode_size;
|
||||
uint8_t *ta_securedisplay_start_addr;
|
||||
struct psp_bin_desc xgmi;
|
||||
struct psp_bin_desc ras;
|
||||
struct psp_bin_desc hdcp;
|
||||
struct psp_bin_desc dtm;
|
||||
struct psp_bin_desc rap;
|
||||
struct psp_bin_desc securedisplay;
|
||||
|
||||
struct psp_asd_context asd_context;
|
||||
struct psp_xgmi_context xgmi_context;
|
||||
struct psp_ras_context ras;
|
||||
struct psp_hdcp_context hdcp_context;
|
||||
struct psp_dtm_context dtm_context;
|
||||
struct psp_rap_context rap_context;
|
||||
struct psp_securedisplay_context securedisplay_context;
|
||||
struct psp_ras_context ras_context;
|
||||
struct ta_cp_context hdcp_context;
|
||||
struct ta_cp_context dtm_context;
|
||||
struct ta_cp_context rap_context;
|
||||
struct ta_cp_context securedisplay_context;
|
||||
struct mutex mutex;
|
||||
struct psp_memory_training_context mem_train_ctx;
|
||||
|
||||
@@ -402,6 +364,12 @@ struct amdgpu_psp_funcs {
|
||||
((psp)->funcs->bootloader_load_spl ? (psp)->funcs->bootloader_load_spl((psp)) : 0)
|
||||
#define psp_bootloader_load_sysdrv(psp) \
|
||||
((psp)->funcs->bootloader_load_sysdrv ? (psp)->funcs->bootloader_load_sysdrv((psp)) : 0)
|
||||
#define psp_bootloader_load_soc_drv(psp) \
|
||||
((psp)->funcs->bootloader_load_soc_drv ? (psp)->funcs->bootloader_load_soc_drv((psp)) : 0)
|
||||
#define psp_bootloader_load_intf_drv(psp) \
|
||||
((psp)->funcs->bootloader_load_intf_drv ? (psp)->funcs->bootloader_load_intf_drv((psp)) : 0)
|
||||
#define psp_bootloader_load_dbg_drv(psp) \
|
||||
((psp)->funcs->bootloader_load_dbg_drv ? (psp)->funcs->bootloader_load_dbg_drv((psp)) : 0)
|
||||
#define psp_bootloader_load_sos(psp) \
|
||||
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
|
||||
#define psp_smu_reload_quirk(psp) \
|
||||
@@ -414,9 +382,9 @@ struct amdgpu_psp_funcs {
|
||||
#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
|
||||
#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
|
||||
|
||||
#define psp_load_usbc_pd_fw(psp, dma_addr) \
|
||||
#define psp_load_usbc_pd_fw(psp, fw_pri_mc_addr) \
|
||||
((psp)->funcs->load_usbc_pd_fw ? \
|
||||
(psp)->funcs->load_usbc_pd_fw((psp), (dma_addr)) : -EINVAL)
|
||||
(psp)->funcs->load_usbc_pd_fw((psp), (fw_pri_mc_addr)) : -EINVAL)
|
||||
|
||||
#define psp_read_usbc_pd_fw(psp, fw_ver) \
|
||||
((psp)->funcs->read_usbc_pd_fw ? \
|
||||
@@ -427,6 +395,7 @@ extern const struct amd_ip_funcs psp_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
|
||||
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version psp_v11_0_8_ip_block;
|
||||
extern const struct amdgpu_ip_block_version psp_v12_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
|
||||
|
||||
@@ -437,14 +406,15 @@ int psp_gpu_reset(struct amdgpu_device *adev);
|
||||
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
|
||||
uint64_t cmd_gpu_addr, int cmd_size);
|
||||
|
||||
int psp_xgmi_initialize(struct psp_context *psp);
|
||||
int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta);
|
||||
int psp_xgmi_terminate(struct psp_context *psp);
|
||||
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
|
||||
int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id);
|
||||
int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id);
|
||||
int psp_xgmi_get_topology_info(struct psp_context *psp,
|
||||
int number_devices,
|
||||
struct psp_xgmi_topology_info *topology);
|
||||
struct psp_xgmi_topology_info *topology,
|
||||
bool get_extended_data);
|
||||
int psp_xgmi_set_topology_info(struct psp_context *psp,
|
||||
int number_devices,
|
||||
struct psp_xgmi_topology_info *topology);
|
||||
@@ -483,4 +453,5 @@ int psp_load_fw_list(struct psp_context *psp,
|
||||
struct amdgpu_firmware_info **ucode_list, int ucode_count);
|
||||
void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
|
||||
|
||||
int is_psp_fw_valid(struct psp_bin_desc bin);
|
||||
#endif
|
||||
|
||||
@@ -76,7 +76,7 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
|
||||
dev_info(adev->dev, "RAP L0 validate test success.\n");
|
||||
} else {
|
||||
rap_shared_mem = (struct ta_rap_shared_memory *)
|
||||
adev->psp.rap_context.rap_shared_buf;
|
||||
adev->psp.rap_context.context.mem_context.shared_buf;
|
||||
rap_cmd_output = &(rap_shared_mem->rap_out_message.output);
|
||||
|
||||
dev_info(adev->dev, "RAP test failed, the output is:\n");
|
||||
@@ -119,7 +119,7 @@ void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
struct drm_minor *minor = adev_to_drm(adev)->primary;
|
||||
|
||||
if (!adev->psp.rap_context.rap_initialized)
|
||||
if (!adev->psp.rap_context.context.initialized)
|
||||
return;
|
||||
|
||||
debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root,
|
||||
|
||||
@@ -64,15 +64,14 @@ const char *ras_block_string[] = {
|
||||
};
|
||||
|
||||
#define ras_err_str(i) (ras_error_string[ffs(i)])
|
||||
#define ras_block_str(i) (ras_block_string[i])
|
||||
|
||||
#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
|
||||
|
||||
/* inject address is 52 bits */
|
||||
#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
|
||||
|
||||
/* typical ECC bad page rate(1 bad page per 100MB VRAM) */
|
||||
#define RAS_BAD_PAGE_RATE (100 * 1024 * 1024ULL)
|
||||
/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
|
||||
#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
|
||||
|
||||
enum amdgpu_ras_retire_page_reservation {
|
||||
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
|
||||
@@ -355,8 +354,9 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
|
||||
* to see which blocks support RAS on a particular asic.
|
||||
*
|
||||
*/
|
||||
static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
|
||||
const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
|
||||
struct ras_debug_if data;
|
||||
@@ -370,7 +370,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
|
||||
|
||||
ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
|
||||
if (data.op == 3) {
|
||||
ret = amdgpu_reserve_page_direct(adev, data.inject.address);
|
||||
@@ -403,9 +403,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
|
||||
/* umc ce/ue error injection for a bad page is not allowed */
|
||||
if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
|
||||
amdgpu_ras_check_bad_page(adev, data.inject.address)) {
|
||||
dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked "
|
||||
"as bad before error injection!\n",
|
||||
data.inject.address);
|
||||
dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
|
||||
"already been marked as bad!\n",
|
||||
data.inject.address);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -439,21 +439,24 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
|
||||
* will reset EEPROM table to 0 entries.
|
||||
*
|
||||
*/
|
||||
static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
|
||||
const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_device *adev =
|
||||
(struct amdgpu_device *)file_inode(f)->i_private;
|
||||
int ret;
|
||||
|
||||
ret = amdgpu_ras_eeprom_reset_table(
|
||||
&(amdgpu_ras_get_context(adev)->eeprom_control));
|
||||
&(amdgpu_ras_get_context(adev)->eeprom_control));
|
||||
|
||||
if (ret == 1) {
|
||||
if (!ret) {
|
||||
/* Something was written to EEPROM.
|
||||
*/
|
||||
amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
|
||||
return size;
|
||||
} else {
|
||||
return -EIO;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -526,7 +529,7 @@ static inline void put_obj(struct ras_manager *obj)
|
||||
if (obj && (--obj->use == 0))
|
||||
list_del(&obj->node);
|
||||
if (obj && (obj->use < 0))
|
||||
DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
|
||||
DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", ras_block_str(obj->head.block));
|
||||
}
|
||||
|
||||
/* make one obj and return it. */
|
||||
@@ -789,7 +792,6 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
|
||||
.type = default_ras_type,
|
||||
.sub_block_index = 0,
|
||||
};
|
||||
strcpy(head.name, ras_block_str(i));
|
||||
if (bypass) {
|
||||
/*
|
||||
* bypass psp. vbios enable ras for us.
|
||||
@@ -1316,6 +1318,12 @@ static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *
|
||||
&con->bad_page_cnt_threshold);
|
||||
debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
|
||||
debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
|
||||
debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
|
||||
&amdgpu_ras_debugfs_eeprom_size_ops);
|
||||
con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
|
||||
S_IRUGO, dir, adev,
|
||||
&amdgpu_ras_debugfs_eeprom_table_ops);
|
||||
amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
|
||||
|
||||
/*
|
||||
* After one uncorrectable error happens, usually GPU recovery will
|
||||
@@ -1833,13 +1841,12 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
|
||||
|
||||
control = &con->eeprom_control;
|
||||
data = con->eh_data;
|
||||
save_count = data->count - control->num_recs;
|
||||
save_count = data->count - control->ras_num_recs;
|
||||
/* only new entries are saved */
|
||||
if (save_count > 0) {
|
||||
if (amdgpu_ras_eeprom_process_recods(control,
|
||||
&data->bps[control->num_recs],
|
||||
true,
|
||||
save_count)) {
|
||||
if (amdgpu_ras_eeprom_append(control,
|
||||
&data->bps[control->ras_num_recs],
|
||||
save_count)) {
|
||||
dev_err(adev->dev, "Failed to save EEPROM table data!");
|
||||
return -EIO;
|
||||
}
|
||||
@@ -1857,28 +1864,24 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
|
||||
static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ras_eeprom_control *control =
|
||||
&adev->psp.ras.ras->eeprom_control;
|
||||
struct eeprom_table_record *bps = NULL;
|
||||
int ret = 0;
|
||||
&adev->psp.ras_context.ras->eeprom_control;
|
||||
struct eeprom_table_record *bps;
|
||||
int ret;
|
||||
|
||||
/* no bad page record, skip eeprom access */
|
||||
if (!control->num_recs || (amdgpu_bad_page_threshold == 0))
|
||||
return ret;
|
||||
if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
|
||||
return 0;
|
||||
|
||||
bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
|
||||
bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
|
||||
if (!bps)
|
||||
return -ENOMEM;
|
||||
|
||||
if (amdgpu_ras_eeprom_process_recods(control, bps, false,
|
||||
control->num_recs)) {
|
||||
ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
|
||||
if (ret)
|
||||
dev_err(adev->dev, "Failed to load EEPROM table records!");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
else
|
||||
ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
|
||||
|
||||
ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
|
||||
|
||||
out:
|
||||
kfree(bps);
|
||||
return ret;
|
||||
}
|
||||
@@ -1918,11 +1921,9 @@ static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
|
||||
uint32_t max_length)
|
||||
uint32_t max_count)
|
||||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
int tmp_threshold = amdgpu_bad_page_threshold;
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* Justification of value bad_page_cnt_threshold in ras structure
|
||||
@@ -1943,18 +1944,15 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
|
||||
* take no effect.
|
||||
*/
|
||||
|
||||
if (tmp_threshold < -1)
|
||||
tmp_threshold = -1;
|
||||
else if (tmp_threshold > max_length)
|
||||
tmp_threshold = max_length;
|
||||
if (amdgpu_bad_page_threshold < 0) {
|
||||
u64 val = adev->gmc.mc_vram_size;
|
||||
|
||||
if (tmp_threshold == -1) {
|
||||
val = adev->gmc.mc_vram_size;
|
||||
do_div(val, RAS_BAD_PAGE_RATE);
|
||||
do_div(val, RAS_BAD_PAGE_COVER);
|
||||
con->bad_page_cnt_threshold = min(lower_32_bits(val),
|
||||
max_length);
|
||||
max_count);
|
||||
} else {
|
||||
con->bad_page_cnt_threshold = tmp_threshold;
|
||||
con->bad_page_cnt_threshold = min_t(int, max_count,
|
||||
amdgpu_bad_page_threshold);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1962,15 +1960,24 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
struct ras_err_handler_data **data;
|
||||
uint32_t max_eeprom_records_len = 0;
|
||||
u32 max_eeprom_records_count = 0;
|
||||
bool exc_err_limit = false;
|
||||
int ret;
|
||||
|
||||
if (adev->ras_enabled && con)
|
||||
data = &con->eh_data;
|
||||
else
|
||||
if (!con)
|
||||
return 0;
|
||||
|
||||
/* Allow access to RAS EEPROM via debugfs, when the ASIC
|
||||
* supports RAS and debugfs is enabled, but when
|
||||
* adev->ras_enabled is unset, i.e. when "ras_enable"
|
||||
* module parameter is set to 0.
|
||||
*/
|
||||
con->adev = adev;
|
||||
|
||||
if (!adev->ras_enabled)
|
||||
return 0;
|
||||
|
||||
data = &con->eh_data;
|
||||
*data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
|
||||
if (!*data) {
|
||||
ret = -ENOMEM;
|
||||
@@ -1980,10 +1987,9 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
|
||||
mutex_init(&con->recovery_lock);
|
||||
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
|
||||
atomic_set(&con->in_recovery, 0);
|
||||
con->adev = adev;
|
||||
|
||||
max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length();
|
||||
amdgpu_ras_validate_threshold(adev, max_eeprom_records_len);
|
||||
max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count();
|
||||
amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
|
||||
|
||||
/* Todo: During test the SMU might fail to read the eeprom through I2C
|
||||
* when the GPU is pending on XGMI reset during probe time
|
||||
@@ -1999,13 +2005,13 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
|
||||
if (exc_err_limit || ret)
|
||||
goto free;
|
||||
|
||||
if (con->eeprom_control.num_recs) {
|
||||
if (con->eeprom_control.ras_num_recs) {
|
||||
ret = amdgpu_ras_load_bad_pages(adev);
|
||||
if (ret)
|
||||
goto free;
|
||||
|
||||
if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
|
||||
adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.num_recs);
|
||||
adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -2015,7 +2021,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
|
||||
kfree(*data);
|
||||
con->eh_data = NULL;
|
||||
out:
|
||||
dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
|
||||
dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
|
||||
|
||||
/*
|
||||
* Except error threshold exceeding case, other failure cases in this
|
||||
|
||||
@@ -49,10 +49,14 @@ enum amdgpu_ras_block {
|
||||
AMDGPU_RAS_BLOCK__MP0,
|
||||
AMDGPU_RAS_BLOCK__MP1,
|
||||
AMDGPU_RAS_BLOCK__FUSE,
|
||||
AMDGPU_RAS_BLOCK__MPIO,
|
||||
|
||||
AMDGPU_RAS_BLOCK__LAST
|
||||
};
|
||||
|
||||
extern const char *ras_block_string[];
|
||||
|
||||
#define ras_block_str(i) (ras_block_string[i])
|
||||
#define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
|
||||
#define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
|
||||
|
||||
@@ -306,7 +310,6 @@ struct ras_common_if {
|
||||
enum amdgpu_ras_block block;
|
||||
enum amdgpu_ras_error_type type;
|
||||
uint32_t sub_block_index;
|
||||
/* block name */
|
||||
char name[32];
|
||||
};
|
||||
|
||||
@@ -318,6 +321,7 @@ struct amdgpu_ras {
|
||||
/* sysfs */
|
||||
struct device_attribute features_attr;
|
||||
struct bin_attribute badpages_attr;
|
||||
struct dentry *de_ras_eeprom_table;
|
||||
/* block array */
|
||||
struct ras_manager *objs;
|
||||
|
||||
@@ -417,7 +421,7 @@ struct ras_badpage {
|
||||
/* interfaces for IP */
|
||||
struct ras_fs_if {
|
||||
struct ras_common_if head;
|
||||
char sysfs_name[32];
|
||||
const char* sysfs_name;
|
||||
char debugfs_name[32];
|
||||
};
|
||||
|
||||
@@ -469,8 +473,8 @@ struct ras_debug_if {
|
||||
* 8: feature disable
|
||||
*/
|
||||
|
||||
#define amdgpu_ras_get_context(adev) ((adev)->psp.ras.ras)
|
||||
#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras.ras = (ras_con))
|
||||
#define amdgpu_ras_get_context(adev) ((adev)->psp.ras_context.ras)
|
||||
#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras_context.ras = (ras_con))
|
||||
|
||||
/* check if ras is supported on block, say, sdma, gfx */
|
||||
static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -28,10 +28,11 @@
|
||||
|
||||
struct amdgpu_device;
|
||||
|
||||
enum amdgpu_ras_eeprom_err_type{
|
||||
AMDGPU_RAS_EEPROM_ERR_PLACE_HOLDER,
|
||||
enum amdgpu_ras_eeprom_err_type {
|
||||
AMDGPU_RAS_EEPROM_ERR_NA,
|
||||
AMDGPU_RAS_EEPROM_ERR_RECOVERABLE,
|
||||
AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE
|
||||
AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE,
|
||||
AMDGPU_RAS_EEPROM_ERR_COUNT,
|
||||
};
|
||||
|
||||
struct amdgpu_ras_eeprom_table_header {
|
||||
@@ -40,15 +41,45 @@ struct amdgpu_ras_eeprom_table_header {
|
||||
uint32_t first_rec_offset;
|
||||
uint32_t tbl_size;
|
||||
uint32_t checksum;
|
||||
}__attribute__((__packed__));
|
||||
} __packed;
|
||||
|
||||
struct amdgpu_ras_eeprom_control {
|
||||
struct amdgpu_ras_eeprom_table_header tbl_hdr;
|
||||
uint32_t next_addr;
|
||||
unsigned int num_recs;
|
||||
struct mutex tbl_mutex;
|
||||
uint32_t tbl_byte_sum;
|
||||
uint16_t i2c_address; // 8-bit represented address
|
||||
|
||||
/* Base I2C EEPPROM 19-bit memory address,
|
||||
* where the table is located. For more information,
|
||||
* see top of amdgpu_eeprom.c.
|
||||
*/
|
||||
u32 i2c_address;
|
||||
|
||||
/* The byte offset off of @i2c_address
|
||||
* where the table header is found,
|
||||
* and where the records start--always
|
||||
* right after the header.
|
||||
*/
|
||||
u32 ras_header_offset;
|
||||
u32 ras_record_offset;
|
||||
|
||||
/* Number of records in the table.
|
||||
*/
|
||||
u32 ras_num_recs;
|
||||
|
||||
/* First record index to read, 0-based.
|
||||
* Range is [0, num_recs-1]. This is
|
||||
* an absolute index, starting right after
|
||||
* the table header.
|
||||
*/
|
||||
u32 ras_fri;
|
||||
|
||||
/* Maximum possible number of records
|
||||
* we could store, i.e. the maximum capacity
|
||||
* of the table.
|
||||
*/
|
||||
u32 ras_max_record_count;
|
||||
|
||||
/* Protect table access via this mutex.
|
||||
*/
|
||||
struct mutex ras_tbl_mutex;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -74,21 +105,26 @@ struct eeprom_table_record {
|
||||
|
||||
unsigned char mem_channel;
|
||||
unsigned char mcumc_id;
|
||||
}__attribute__((__packed__));
|
||||
} __packed;
|
||||
|
||||
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
|
||||
bool *exceed_err_limit);
|
||||
bool *exceed_err_limit);
|
||||
|
||||
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
|
||||
|
||||
bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
|
||||
struct eeprom_table_record *records,
|
||||
bool write,
|
||||
int num);
|
||||
int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
|
||||
struct eeprom_table_record *records, const u32 num);
|
||||
|
||||
inline uint32_t amdgpu_ras_eeprom_get_record_max_length(void);
|
||||
int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
|
||||
struct eeprom_table_record *records, const u32 num);
|
||||
|
||||
void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control);
|
||||
inline uint32_t amdgpu_ras_eeprom_max_record_count(void);
|
||||
|
||||
void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
|
||||
|
||||
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
|
||||
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
|
||||
|
||||
#endif // _AMDGPU_RAS_EEPROM_H
|
||||
|
||||
@@ -48,6 +48,9 @@
|
||||
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
||||
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
|
||||
|
||||
/* fence flag bit to indicate the face is embedded in job*/
|
||||
#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
|
||||
|
||||
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
|
||||
|
||||
#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
|
||||
@@ -106,9 +109,6 @@ struct amdgpu_fence_driver {
|
||||
struct dma_fence **fences;
|
||||
};
|
||||
|
||||
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
|
||||
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
@@ -117,9 +117,11 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
||||
struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
|
||||
void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
|
||||
unsigned flags);
|
||||
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
|
||||
uint32_t timeout);
|
||||
|
||||
@@ -127,8 +127,8 @@ struct amdgpu_rlc_funcs {
|
||||
void (*reset)(struct amdgpu_device *adev);
|
||||
void (*start)(struct amdgpu_device *adev);
|
||||
void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
|
||||
void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip);
|
||||
u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip);
|
||||
void (*sriov_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip);
|
||||
u32 (*sriov_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip);
|
||||
bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
|
||||
};
|
||||
|
||||
|
||||
@@ -105,7 +105,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
|
||||
adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
|
||||
adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->sdma.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->sdma.ras_if->name, "sdma");
|
||||
}
|
||||
fs_info.head = ih_info->head = *adev->sdma.ras_if;
|
||||
|
||||
|
||||
@@ -80,7 +80,7 @@ void psp_securedisplay_parse_resp_status(struct psp_context *psp,
|
||||
void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
|
||||
enum ta_securedisplay_command command_id)
|
||||
{
|
||||
*cmd = (struct securedisplay_cmd *)psp->securedisplay_context.securedisplay_shared_buf;
|
||||
*cmd = (struct securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf;
|
||||
memset(*cmd, 0, sizeof(struct securedisplay_cmd));
|
||||
(*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE;
|
||||
(*cmd)->cmd_id = command_id;
|
||||
@@ -170,7 +170,7 @@ void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
if (!adev->psp.securedisplay_context.securedisplay_initialized)
|
||||
if (!adev->psp.securedisplay_context.context.initialized)
|
||||
return;
|
||||
|
||||
debugfs_create_file("securedisplay_test", S_IWUSR, adev_to_drm(adev)->primary->debugfs_root,
|
||||
|
||||
@@ -28,6 +28,8 @@
|
||||
* Christian König <christian.koenig@amd.com>
|
||||
*/
|
||||
|
||||
#include <linux/dma-fence-chain.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
@@ -186,6 +188,55 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
|
||||
return amdgpu_sync_fence(sync, fence);
|
||||
}
|
||||
|
||||
/* Determine based on the owner and mode if we should sync to a fence or not */
|
||||
static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
|
||||
enum amdgpu_sync_mode mode,
|
||||
void *owner, struct dma_fence *f)
|
||||
{
|
||||
void *fence_owner = amdgpu_sync_get_owner(f);
|
||||
|
||||
/* Always sync to moves, no matter what */
|
||||
if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED)
|
||||
return true;
|
||||
|
||||
/* We only want to trigger KFD eviction fences on
|
||||
* evict or move jobs. Skip KFD fences otherwise.
|
||||
*/
|
||||
if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
|
||||
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
|
||||
return false;
|
||||
|
||||
/* Never sync to VM updates either. */
|
||||
if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
|
||||
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
|
||||
return false;
|
||||
|
||||
/* Ignore fences depending on the sync mode */
|
||||
switch (mode) {
|
||||
case AMDGPU_SYNC_ALWAYS:
|
||||
return true;
|
||||
|
||||
case AMDGPU_SYNC_NE_OWNER:
|
||||
if (amdgpu_sync_same_dev(adev, f) &&
|
||||
fence_owner == owner)
|
||||
return false;
|
||||
break;
|
||||
|
||||
case AMDGPU_SYNC_EQ_OWNER:
|
||||
if (amdgpu_sync_same_dev(adev, f) &&
|
||||
fence_owner != owner)
|
||||
return false;
|
||||
break;
|
||||
|
||||
case AMDGPU_SYNC_EXPLICIT:
|
||||
return false;
|
||||
}
|
||||
|
||||
WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD,
|
||||
"Adding eviction fence to sync obj");
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_resv - sync to a reservation object
|
||||
*
|
||||
@@ -211,67 +262,34 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||
|
||||
/* always sync to the exclusive fence */
|
||||
f = dma_resv_excl_fence(resv);
|
||||
r = amdgpu_sync_fence(sync, f);
|
||||
dma_fence_chain_for_each(f, f) {
|
||||
struct dma_fence_chain *chain = to_dma_fence_chain(f);
|
||||
|
||||
if (amdgpu_sync_test_fence(adev, mode, owner, chain ?
|
||||
chain->fence : f)) {
|
||||
r = amdgpu_sync_fence(sync, f);
|
||||
dma_fence_put(f);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
flist = dma_resv_shared_list(resv);
|
||||
if (!flist || r)
|
||||
return r;
|
||||
if (!flist)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < flist->shared_count; ++i) {
|
||||
void *fence_owner;
|
||||
|
||||
f = rcu_dereference_protected(flist->shared[i],
|
||||
dma_resv_held(resv));
|
||||
|
||||
fence_owner = amdgpu_sync_get_owner(f);
|
||||
|
||||
/* Always sync to moves, no matter what */
|
||||
if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
|
||||
if (amdgpu_sync_test_fence(adev, mode, owner, f)) {
|
||||
r = amdgpu_sync_fence(sync, f);
|
||||
if (r)
|
||||
break;
|
||||
return r;
|
||||
}
|
||||
|
||||
/* We only want to trigger KFD eviction fences on
|
||||
* evict or move jobs. Skip KFD fences otherwise.
|
||||
*/
|
||||
if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
|
||||
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
|
||||
continue;
|
||||
|
||||
/* Never sync to VM updates either. */
|
||||
if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
|
||||
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
|
||||
continue;
|
||||
|
||||
/* Ignore fences depending on the sync mode */
|
||||
switch (mode) {
|
||||
case AMDGPU_SYNC_ALWAYS:
|
||||
break;
|
||||
|
||||
case AMDGPU_SYNC_NE_OWNER:
|
||||
if (amdgpu_sync_same_dev(adev, f) &&
|
||||
fence_owner == owner)
|
||||
continue;
|
||||
break;
|
||||
|
||||
case AMDGPU_SYNC_EQ_OWNER:
|
||||
if (amdgpu_sync_same_dev(adev, f) &&
|
||||
fence_owner != owner)
|
||||
continue;
|
||||
break;
|
||||
|
||||
case AMDGPU_SYNC_EXPLICIT:
|
||||
continue;
|
||||
}
|
||||
|
||||
WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD,
|
||||
"Adding eviction fence to sync obj");
|
||||
r = amdgpu_sync_fence(sync, f);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -149,14 +149,16 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
||||
* BOs to be evicted from VRAM
|
||||
*/
|
||||
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
|
||||
AMDGPU_GEM_DOMAIN_GTT);
|
||||
AMDGPU_GEM_DOMAIN_GTT |
|
||||
AMDGPU_GEM_DOMAIN_CPU);
|
||||
abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
|
||||
abo->placements[0].lpfn = 0;
|
||||
abo->placement.busy_placement = &abo->placements[1];
|
||||
abo->placement.num_busy_placement = 1;
|
||||
} else {
|
||||
/* Move to GTT memory */
|
||||
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
|
||||
AMDGPU_GEM_DOMAIN_CPU);
|
||||
}
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
@@ -521,7 +523,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
|
||||
hop->fpfn = 0;
|
||||
hop->lpfn = 0;
|
||||
hop->mem_type = TTM_PL_TT;
|
||||
hop->flags = 0;
|
||||
hop->flags = TTM_PL_FLAG_TEMPORARY;
|
||||
return -EMULTIHOP;
|
||||
}
|
||||
|
||||
@@ -1121,7 +1123,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
|
||||
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
|
||||
if (gtt && gtt->userptr) {
|
||||
if (gtt->userptr) {
|
||||
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
|
||||
if (!ttm->sg)
|
||||
return -ENOMEM;
|
||||
@@ -1146,7 +1148,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_device *adev;
|
||||
|
||||
if (gtt && gtt->userptr) {
|
||||
if (gtt->userptr) {
|
||||
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
|
||||
kfree(ttm->sg);
|
||||
ttm->sg = NULL;
|
||||
@@ -1394,6 +1396,41 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||
return ttm_bo_eviction_valuable(bo, place);
|
||||
}
|
||||
|
||||
static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
|
||||
void *buf, size_t size, bool write)
|
||||
{
|
||||
while (size) {
|
||||
uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
|
||||
uint64_t bytes = 4 - (pos & 0x3);
|
||||
uint32_t shift = (pos & 0x3) * 8;
|
||||
uint32_t mask = 0xffffffff << shift;
|
||||
uint32_t value = 0;
|
||||
|
||||
if (size < bytes) {
|
||||
mask &= 0xffffffff >> (bytes - size) * 8;
|
||||
bytes = size;
|
||||
}
|
||||
|
||||
if (mask != 0xffffffff) {
|
||||
amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
|
||||
if (write) {
|
||||
value &= ~mask;
|
||||
value |= (*(uint32_t *)buf << shift) & mask;
|
||||
amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
|
||||
} else {
|
||||
value = (value & mask) >> shift;
|
||||
memcpy(buf, &value, bytes);
|
||||
}
|
||||
} else {
|
||||
amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
|
||||
}
|
||||
|
||||
pos += bytes;
|
||||
buf += bytes;
|
||||
size -= bytes;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
|
||||
*
|
||||
@@ -1413,8 +1450,6 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
|
||||
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
|
||||
struct amdgpu_res_cursor cursor;
|
||||
unsigned long flags;
|
||||
uint32_t value = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (bo->resource->mem_type != TTM_PL_VRAM)
|
||||
@@ -1422,41 +1457,21 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
|
||||
|
||||
amdgpu_res_first(bo->resource, offset, len, &cursor);
|
||||
while (cursor.remaining) {
|
||||
uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
|
||||
uint64_t bytes = 4 - (cursor.start & 3);
|
||||
uint32_t shift = (cursor.start & 3) * 8;
|
||||
uint32_t mask = 0xffffffff << shift;
|
||||
size_t count, size = cursor.size;
|
||||
loff_t pos = cursor.start;
|
||||
|
||||
if (cursor.size < bytes) {
|
||||
mask &= 0xffffffff >> (bytes - cursor.size) * 8;
|
||||
bytes = cursor.size;
|
||||
count = amdgpu_device_aper_access(adev, pos, buf, size, write);
|
||||
size -= count;
|
||||
if (size) {
|
||||
/* using MM to access rest vram and handle un-aligned address */
|
||||
pos += count;
|
||||
buf += count;
|
||||
amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
|
||||
}
|
||||
|
||||
if (mask != 0xffffffff) {
|
||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
|
||||
WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
|
||||
value = RREG32_NO_KIQ(mmMM_DATA);
|
||||
if (write) {
|
||||
value &= ~mask;
|
||||
value |= (*(uint32_t *)buf << shift) & mask;
|
||||
WREG32_NO_KIQ(mmMM_DATA, value);
|
||||
}
|
||||
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||
if (!write) {
|
||||
value = (value & mask) >> shift;
|
||||
memcpy(buf, &value, bytes);
|
||||
}
|
||||
} else {
|
||||
bytes = cursor.size & ~0x3ULL;
|
||||
amdgpu_device_vram_access(adev, cursor.start,
|
||||
(uint32_t *)buf, bytes,
|
||||
write);
|
||||
}
|
||||
|
||||
ret += bytes;
|
||||
buf = (uint8_t *)buf + bytes;
|
||||
amdgpu_res_next(&cursor, bytes);
|
||||
ret += cursor.size;
|
||||
buf += cursor.size;
|
||||
amdgpu_res_next(&cursor, cursor.size);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -2146,7 +2161,6 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
|
||||
return -ENXIO;
|
||||
|
||||
while (size) {
|
||||
unsigned long flags;
|
||||
uint32_t value;
|
||||
|
||||
if (*pos >= adev->gmc.mc_vram_size)
|
||||
@@ -2156,11 +2170,7 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
|
||||
WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
|
||||
WREG32_NO_KIQ(mmMM_DATA, value);
|
||||
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||
amdgpu_device_mm_access(adev, *pos, &value, 4, true);
|
||||
|
||||
result += 4;
|
||||
buf += 4;
|
||||
|
||||
@@ -52,7 +52,7 @@ struct amdgpu_gtt_mgr {
|
||||
struct ttm_resource_manager manager;
|
||||
struct drm_mm mm;
|
||||
spinlock_t lock;
|
||||
atomic64_t available;
|
||||
atomic64_t used;
|
||||
};
|
||||
|
||||
struct amdgpu_preempt_mgr {
|
||||
|
||||
@@ -409,6 +409,12 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
||||
return AMDGPU_FW_LOAD_DIRECT;
|
||||
else
|
||||
return AMDGPU_FW_LOAD_PSP;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
if (!(load_type &&
|
||||
adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2))
|
||||
return AMDGPU_FW_LOAD_DIRECT;
|
||||
else
|
||||
return AMDGPU_FW_LOAD_PSP;
|
||||
default:
|
||||
DRM_ERROR("Unknown firmware load type\n");
|
||||
}
|
||||
@@ -416,6 +422,84 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
||||
return AMDGPU_FW_LOAD_DIRECT;
|
||||
}
|
||||
|
||||
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
|
||||
{
|
||||
switch (ucode_id) {
|
||||
case AMDGPU_UCODE_ID_SDMA0:
|
||||
return "SDMA0";
|
||||
case AMDGPU_UCODE_ID_SDMA1:
|
||||
return "SDMA1";
|
||||
case AMDGPU_UCODE_ID_SDMA2:
|
||||
return "SDMA2";
|
||||
case AMDGPU_UCODE_ID_SDMA3:
|
||||
return "SDMA3";
|
||||
case AMDGPU_UCODE_ID_SDMA4:
|
||||
return "SDMA4";
|
||||
case AMDGPU_UCODE_ID_SDMA5:
|
||||
return "SDMA5";
|
||||
case AMDGPU_UCODE_ID_SDMA6:
|
||||
return "SDMA6";
|
||||
case AMDGPU_UCODE_ID_SDMA7:
|
||||
return "SDMA7";
|
||||
case AMDGPU_UCODE_ID_CP_CE:
|
||||
return "CP_CE";
|
||||
case AMDGPU_UCODE_ID_CP_PFP:
|
||||
return "CP_PFP";
|
||||
case AMDGPU_UCODE_ID_CP_ME:
|
||||
return "CP_ME";
|
||||
case AMDGPU_UCODE_ID_CP_MEC1:
|
||||
return "CP_MEC1";
|
||||
case AMDGPU_UCODE_ID_CP_MEC1_JT:
|
||||
return "CP_MEC1_JT";
|
||||
case AMDGPU_UCODE_ID_CP_MEC2:
|
||||
return "CP_MEC2";
|
||||
case AMDGPU_UCODE_ID_CP_MEC2_JT:
|
||||
return "CP_MEC2_JT";
|
||||
case AMDGPU_UCODE_ID_CP_MES:
|
||||
return "CP_MES";
|
||||
case AMDGPU_UCODE_ID_CP_MES_DATA:
|
||||
return "CP_MES_DATA";
|
||||
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
|
||||
return "RLC_RESTORE_LIST_CNTL";
|
||||
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
|
||||
return "RLC_RESTORE_LIST_GPM_MEM";
|
||||
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
|
||||
return "RLC_RESTORE_LIST_SRM_MEM";
|
||||
case AMDGPU_UCODE_ID_RLC_IRAM:
|
||||
return "RLC_IRAM";
|
||||
case AMDGPU_UCODE_ID_RLC_DRAM:
|
||||
return "RLC_DRAM";
|
||||
case AMDGPU_UCODE_ID_RLC_G:
|
||||
return "RLC_G";
|
||||
case AMDGPU_UCODE_ID_STORAGE:
|
||||
return "STORAGE";
|
||||
case AMDGPU_UCODE_ID_SMC:
|
||||
return "SMC";
|
||||
case AMDGPU_UCODE_ID_UVD:
|
||||
return "UVD";
|
||||
case AMDGPU_UCODE_ID_UVD1:
|
||||
return "UVD1";
|
||||
case AMDGPU_UCODE_ID_VCE:
|
||||
return "VCE";
|
||||
case AMDGPU_UCODE_ID_VCN:
|
||||
return "VCN";
|
||||
case AMDGPU_UCODE_ID_VCN1:
|
||||
return "VCN1";
|
||||
case AMDGPU_UCODE_ID_DMCU_ERAM:
|
||||
return "DMCU_ERAM";
|
||||
case AMDGPU_UCODE_ID_DMCU_INTV:
|
||||
return "DMCU_INTV";
|
||||
case AMDGPU_UCODE_ID_VCN0_RAM:
|
||||
return "VCN0_RAM";
|
||||
case AMDGPU_UCODE_ID_VCN1_RAM:
|
||||
return "VCN1_RAM";
|
||||
case AMDGPU_UCODE_ID_DMCUB:
|
||||
return "DMCUB";
|
||||
default:
|
||||
return "UNKNOWN UCODE";
|
||||
}
|
||||
}
|
||||
|
||||
#define FW_VERSION_ATTR(name, mode, field) \
|
||||
static ssize_t show_##name(struct device *dev, \
|
||||
struct device_attribute *attr, \
|
||||
@@ -440,10 +524,10 @@ FW_VERSION_ATTR(rlc_srlg_fw_version, 0444, gfx.rlc_srlg_fw_version);
|
||||
FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
|
||||
FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
|
||||
FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
|
||||
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos_fw_version);
|
||||
FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version);
|
||||
FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_ras_ucode_version);
|
||||
FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_xgmi_ucode_version);
|
||||
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
|
||||
FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd.fw_version);
|
||||
FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras.feature_version);
|
||||
FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi.feature_version);
|
||||
FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
|
||||
FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
|
||||
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
|
||||
|
||||
@@ -71,7 +71,7 @@ struct smc_firmware_header_v2_1 {
|
||||
uint32_t pptable_entry_offset;
|
||||
};
|
||||
|
||||
struct psp_fw_bin_desc {
|
||||
struct psp_fw_legacy_bin_desc {
|
||||
uint32_t fw_version;
|
||||
uint32_t offset_bytes;
|
||||
uint32_t size_bytes;
|
||||
@@ -80,50 +80,67 @@ struct psp_fw_bin_desc {
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct psp_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
struct psp_fw_bin_desc sos;
|
||||
struct psp_fw_legacy_bin_desc sos;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=1 */
|
||||
struct psp_firmware_header_v1_1 {
|
||||
struct psp_firmware_header_v1_0 v1_0;
|
||||
struct psp_fw_bin_desc toc;
|
||||
struct psp_fw_bin_desc kdb;
|
||||
struct psp_fw_legacy_bin_desc toc;
|
||||
struct psp_fw_legacy_bin_desc kdb;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=2 */
|
||||
struct psp_firmware_header_v1_2 {
|
||||
struct psp_firmware_header_v1_0 v1_0;
|
||||
struct psp_fw_bin_desc res;
|
||||
struct psp_fw_bin_desc kdb;
|
||||
struct psp_fw_legacy_bin_desc res;
|
||||
struct psp_fw_legacy_bin_desc kdb;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=3 */
|
||||
struct psp_firmware_header_v1_3 {
|
||||
struct psp_firmware_header_v1_1 v1_1;
|
||||
struct psp_fw_bin_desc spl;
|
||||
struct psp_fw_bin_desc rl;
|
||||
struct psp_fw_bin_desc sys_drv_aux;
|
||||
struct psp_fw_bin_desc sos_aux;
|
||||
struct psp_fw_legacy_bin_desc spl;
|
||||
struct psp_fw_legacy_bin_desc rl;
|
||||
struct psp_fw_legacy_bin_desc sys_drv_aux;
|
||||
struct psp_fw_legacy_bin_desc sos_aux;
|
||||
};
|
||||
|
||||
struct psp_fw_bin_desc {
|
||||
uint32_t fw_type;
|
||||
uint32_t fw_version;
|
||||
uint32_t offset_bytes;
|
||||
uint32_t size_bytes;
|
||||
};
|
||||
|
||||
enum psp_fw_type {
|
||||
PSP_FW_TYPE_UNKOWN,
|
||||
PSP_FW_TYPE_PSP_SOS,
|
||||
PSP_FW_TYPE_PSP_SYS_DRV,
|
||||
PSP_FW_TYPE_PSP_KDB,
|
||||
PSP_FW_TYPE_PSP_TOC,
|
||||
PSP_FW_TYPE_PSP_SPL,
|
||||
PSP_FW_TYPE_PSP_RL,
|
||||
PSP_FW_TYPE_PSP_SOC_DRV,
|
||||
PSP_FW_TYPE_PSP_INTF_DRV,
|
||||
PSP_FW_TYPE_PSP_DBG_DRV,
|
||||
};
|
||||
|
||||
/* version_major=2, version_minor=0 */
|
||||
struct psp_firmware_header_v2_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t psp_fw_bin_count;
|
||||
struct psp_fw_bin_desc psp_fw_bin[];
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct ta_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ta_xgmi_ucode_version;
|
||||
uint32_t ta_xgmi_offset_bytes;
|
||||
uint32_t ta_xgmi_size_bytes;
|
||||
uint32_t ta_ras_ucode_version;
|
||||
uint32_t ta_ras_offset_bytes;
|
||||
uint32_t ta_ras_size_bytes;
|
||||
uint32_t ta_hdcp_ucode_version;
|
||||
uint32_t ta_hdcp_offset_bytes;
|
||||
uint32_t ta_hdcp_size_bytes;
|
||||
uint32_t ta_dtm_ucode_version;
|
||||
uint32_t ta_dtm_offset_bytes;
|
||||
uint32_t ta_dtm_size_bytes;
|
||||
uint32_t ta_securedisplay_ucode_version;
|
||||
uint32_t ta_securedisplay_offset_bytes;
|
||||
uint32_t ta_securedisplay_size_bytes;
|
||||
struct psp_fw_legacy_bin_desc xgmi;
|
||||
struct psp_fw_legacy_bin_desc ras;
|
||||
struct psp_fw_legacy_bin_desc hdcp;
|
||||
struct psp_fw_legacy_bin_desc dtm;
|
||||
struct psp_fw_legacy_bin_desc securedisplay;
|
||||
};
|
||||
|
||||
enum ta_fw_type {
|
||||
@@ -138,18 +155,11 @@ enum ta_fw_type {
|
||||
TA_FW_TYPE_MAX_INDEX,
|
||||
};
|
||||
|
||||
struct ta_fw_bin_desc {
|
||||
uint32_t fw_type;
|
||||
uint32_t fw_version;
|
||||
uint32_t offset_bytes;
|
||||
uint32_t size_bytes;
|
||||
};
|
||||
|
||||
/* version_major=2, version_minor=0 */
|
||||
struct ta_firmware_header_v2_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ta_fw_bin_count;
|
||||
struct ta_fw_bin_desc ta_fw_bin[];
|
||||
struct psp_fw_bin_desc ta_fw_bin[];
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
@@ -312,6 +322,7 @@ union amdgpu_firmware_header {
|
||||
struct psp_firmware_header_v1_0 psp;
|
||||
struct psp_firmware_header_v1_1 psp_v1_1;
|
||||
struct psp_firmware_header_v1_3 psp_v1_3;
|
||||
struct psp_firmware_header_v2_0 psp_v2_0;
|
||||
struct ta_firmware_header_v1_0 ta;
|
||||
struct ta_firmware_header_v2_0 ta_v2_0;
|
||||
struct gfx_firmware_header_v1_0 gfx;
|
||||
@@ -326,7 +337,7 @@ union amdgpu_firmware_header {
|
||||
uint8_t raw[0x100];
|
||||
};
|
||||
|
||||
#define UCODE_MAX_TA_PACKAGING ((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct ta_fw_bin_desc))
|
||||
#define UCODE_MAX_PSP_PACKAGING ((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct psp_fw_bin_desc))
|
||||
|
||||
/*
|
||||
* fw loading support
|
||||
@@ -449,4 +460,6 @@ void amdgpu_ucode_sysfs_fini(struct amdgpu_device *adev);
|
||||
enum amdgpu_firmware_load_type
|
||||
amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
|
||||
|
||||
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -41,7 +41,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
|
||||
adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
|
||||
adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->umc.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->umc.ras_if->name, "umc");
|
||||
}
|
||||
ih_info.head = fs_info.head = *adev->umc.ras_if;
|
||||
|
||||
@@ -134,7 +133,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
|
||||
amdgpu_ras_save_bad_pages(adev);
|
||||
|
||||
if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
|
||||
adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.num_recs);
|
||||
adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
|
||||
}
|
||||
|
||||
amdgpu_ras_reset_gpu(adev);
|
||||
|
||||
@@ -326,7 +326,6 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
drm_sched_entity_destroy(&adev->uvd.entity);
|
||||
|
||||
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
||||
|
||||
@@ -218,7 +218,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
||||
if (adev->vce.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
drm_sched_entity_destroy(&adev->vce.entity);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
|
||||
|
||||
@@ -258,8 +258,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
|
||||
if (adev->vcn.harvest_config & (1 << j))
|
||||
continue;
|
||||
|
||||
@@ -531,10 +531,10 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ta_ras_ucode_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.ta_xgmi_ucode_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd.fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ras.feature_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.xgmi.feature_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
|
||||
|
||||
643
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
Normal file
643
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
Normal file
@@ -0,0 +1,643 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_simple_kms_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
#include "dce_v6_0.h"
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
#include "dce_v8_0.h"
|
||||
#endif
|
||||
#include "dce_v10_0.h"
|
||||
#include "dce_v11_0.h"
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
#include "amdgpu_vkms.h"
|
||||
#include "amdgpu_display.h"
|
||||
|
||||
/**
|
||||
* DOC: amdgpu_vkms
|
||||
*
|
||||
* The amdgpu vkms interface provides a virtual KMS interface for several use
|
||||
* cases: devices without display hardware, platforms where the actual display
|
||||
* hardware is not useful (e.g., servers), SR-IOV virtual functions, device
|
||||
* emulation/simulation, and device bring up prior to display hardware being
|
||||
* usable. We previously emulated a legacy KMS interface, but there was a desire
|
||||
* to move to the atomic KMS interface. The vkms driver did everything we
|
||||
* needed, but we wanted KMS support natively in the driver without buffer
|
||||
* sharing and the ability to support an instance of VKMS per device. We first
|
||||
* looked at splitting vkms into a stub driver and a helper module that other
|
||||
* drivers could use to implement a virtual display, but this strategy ended up
|
||||
* being messy due to driver specific callbacks needed for buffer management.
|
||||
* Ultimately, it proved easier to import the vkms code as it mostly used core
|
||||
* drm helpers anyway.
|
||||
*/
|
||||
|
||||
static const u32 amdgpu_vkms_formats[] = {
|
||||
DRM_FORMAT_XRGB8888,
|
||||
};
|
||||
|
||||
static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
|
||||
{
|
||||
struct amdgpu_vkms_output *output = container_of(timer,
|
||||
struct amdgpu_vkms_output,
|
||||
vblank_hrtimer);
|
||||
struct drm_crtc *crtc = &output->crtc;
|
||||
u64 ret_overrun;
|
||||
bool ret;
|
||||
|
||||
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
|
||||
output->period_ns);
|
||||
WARN_ON(ret_overrun != 1);
|
||||
|
||||
ret = drm_crtc_handle_vblank(crtc);
|
||||
if (!ret)
|
||||
DRM_ERROR("amdgpu_vkms failure on handling vblank");
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
unsigned int pipe = drm_crtc_index(crtc);
|
||||
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
|
||||
struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc);
|
||||
|
||||
drm_calc_timestamping_constants(crtc, &crtc->mode);
|
||||
|
||||
hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
out->vblank_hrtimer.function = &amdgpu_vkms_vblank_simulate;
|
||||
out->period_ns = ktime_set(0, vblank->framedur_ns);
|
||||
hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc);
|
||||
|
||||
hrtimer_cancel(&out->vblank_hrtimer);
|
||||
}
|
||||
|
||||
static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
|
||||
int *max_error,
|
||||
ktime_t *vblank_time,
|
||||
bool in_vblank_irq)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
unsigned int pipe = crtc->index;
|
||||
struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
|
||||
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
|
||||
|
||||
if (!READ_ONCE(vblank->enabled)) {
|
||||
*vblank_time = ktime_get();
|
||||
return true;
|
||||
}
|
||||
|
||||
*vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
|
||||
|
||||
if (WARN_ON(*vblank_time == vblank->time))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* To prevent races we roll the hrtimer forward before we do any
|
||||
* interrupt processing - this is how real hw works (the interrupt is
|
||||
* only generated after all the vblank registers are updated) and what
|
||||
* the vblank core expects. Therefore we need to always correct the
|
||||
* timestampe by one frame.
|
||||
*/
|
||||
*vblank_time -= output->period_ns;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct drm_crtc_funcs amdgpu_vkms_crtc_funcs = {
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.destroy = drm_crtc_cleanup,
|
||||
.page_flip = drm_atomic_helper_page_flip,
|
||||
.reset = drm_atomic_helper_crtc_reset,
|
||||
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
|
||||
.enable_vblank = amdgpu_vkms_enable_vblank,
|
||||
.disable_vblank = amdgpu_vkms_disable_vblank,
|
||||
.get_vblank_timestamp = amdgpu_vkms_get_vblank_timestamp,
|
||||
};
|
||||
|
||||
static void amdgpu_vkms_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
drm_crtc_vblank_off(crtc);
|
||||
}
|
||||
|
||||
static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
if (crtc->state->event) {
|
||||
spin_lock(&crtc->dev->event_lock);
|
||||
|
||||
if (drm_crtc_vblank_get(crtc) != 0)
|
||||
drm_crtc_send_vblank_event(crtc, crtc->state->event);
|
||||
else
|
||||
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
|
||||
|
||||
spin_unlock(&crtc->dev->event_lock);
|
||||
|
||||
crtc->state->event = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = {
|
||||
.atomic_flush = amdgpu_vkms_crtc_atomic_flush,
|
||||
.atomic_enable = amdgpu_vkms_crtc_atomic_enable,
|
||||
.atomic_disable = amdgpu_vkms_crtc_atomic_disable,
|
||||
};
|
||||
|
||||
static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
|
||||
struct drm_plane *primary, struct drm_plane *cursor)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
|
||||
&amdgpu_vkms_crtc_funcs, NULL);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to init CRTC\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs amdgpu_vkms_connector_funcs = {
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = drm_connector_cleanup,
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
};
|
||||
|
||||
static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_display_mode *mode = NULL;
|
||||
unsigned i;
|
||||
static const struct mode_size {
|
||||
int w;
|
||||
int h;
|
||||
} common_modes[] = {
|
||||
{ 640, 480},
|
||||
{ 720, 480},
|
||||
{ 800, 600},
|
||||
{ 848, 480},
|
||||
{1024, 768},
|
||||
{1152, 768},
|
||||
{1280, 720},
|
||||
{1280, 800},
|
||||
{1280, 854},
|
||||
{1280, 960},
|
||||
{1280, 1024},
|
||||
{1440, 900},
|
||||
{1400, 1050},
|
||||
{1680, 1050},
|
||||
{1600, 1200},
|
||||
{1920, 1080},
|
||||
{1920, 1200},
|
||||
{2560, 1440},
|
||||
{4096, 3112},
|
||||
{3656, 2664},
|
||||
{3840, 2160},
|
||||
{4096, 2160},
|
||||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
|
||||
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
|
||||
drm_mode_probed_add(connector, mode);
|
||||
}
|
||||
|
||||
drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
|
||||
|
||||
return ARRAY_SIZE(common_modes);
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs amdgpu_vkms_conn_helper_funcs = {
|
||||
.get_modes = amdgpu_vkms_conn_get_modes,
|
||||
};
|
||||
|
||||
static const struct drm_plane_funcs amdgpu_vkms_plane_funcs = {
|
||||
.update_plane = drm_atomic_helper_update_plane,
|
||||
.disable_plane = drm_atomic_helper_disable_plane,
|
||||
.destroy = drm_plane_cleanup,
|
||||
.reset = drm_atomic_helper_plane_reset,
|
||||
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
|
||||
};
|
||||
|
||||
static void amdgpu_vkms_plane_atomic_update(struct drm_plane *plane,
|
||||
struct drm_atomic_state *old_state)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
|
||||
plane);
|
||||
struct drm_crtc_state *crtc_state;
|
||||
int ret;
|
||||
|
||||
if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
|
||||
return 0;
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(state,
|
||||
new_plane_state->crtc);
|
||||
if (IS_ERR(crtc_state))
|
||||
return PTR_ERR(crtc_state);
|
||||
|
||||
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
false, true);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
/* for now primary plane must be visible and full screen */
|
||||
if (!new_plane_state->visible)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_state)
|
||||
{
|
||||
struct amdgpu_framebuffer *afb;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_bo *rbo;
|
||||
struct list_head list;
|
||||
struct ttm_validate_buffer tv;
|
||||
struct ww_acquire_ctx ticket;
|
||||
uint32_t domain;
|
||||
int r;
|
||||
|
||||
if (!new_state->fb) {
|
||||
DRM_DEBUG_KMS("No FB bound\n");
|
||||
return 0;
|
||||
}
|
||||
afb = to_amdgpu_framebuffer(new_state->fb);
|
||||
obj = new_state->fb->obj[0];
|
||||
rbo = gem_to_amdgpu_bo(obj);
|
||||
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
||||
tv.bo = &rbo->tbo;
|
||||
tv.num_shared = 1;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (plane->type != DRM_PLANE_TYPE_CURSOR)
|
||||
domain = amdgpu_display_supported_domains(adev, rbo->flags);
|
||||
else
|
||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
|
||||
r = amdgpu_bo_pin(rbo, domain);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r != -ERESTARTSYS)
|
||||
DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_ttm_alloc_gart(&rbo->tbo);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unpin(rbo);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
DRM_ERROR("%p bind failed\n", rbo);
|
||||
return r;
|
||||
}
|
||||
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
|
||||
afb->address = amdgpu_bo_gpu_offset(rbo);
|
||||
|
||||
amdgpu_bo_ref(rbo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
struct amdgpu_bo *rbo;
|
||||
int r;
|
||||
|
||||
if (!old_state->fb)
|
||||
return;
|
||||
|
||||
rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
|
||||
r = amdgpu_bo_reserve(rbo, false);
|
||||
if (unlikely(r)) {
|
||||
DRM_ERROR("failed to reserve rbo before unpin\n");
|
||||
return;
|
||||
}
|
||||
|
||||
amdgpu_bo_unpin(rbo);
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
amdgpu_bo_unref(&rbo);
|
||||
}
|
||||
|
||||
static const struct drm_plane_helper_funcs amdgpu_vkms_primary_helper_funcs = {
|
||||
.atomic_update = amdgpu_vkms_plane_atomic_update,
|
||||
.atomic_check = amdgpu_vkms_plane_atomic_check,
|
||||
.prepare_fb = amdgpu_vkms_prepare_fb,
|
||||
.cleanup_fb = amdgpu_vkms_cleanup_fb,
|
||||
};
|
||||
|
||||
static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
|
||||
enum drm_plane_type type,
|
||||
int index)
|
||||
{
|
||||
struct drm_plane *plane;
|
||||
int ret;
|
||||
|
||||
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
|
||||
if (!plane)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = drm_universal_plane_init(dev, plane, 1 << index,
|
||||
&amdgpu_vkms_plane_funcs,
|
||||
amdgpu_vkms_formats,
|
||||
ARRAY_SIZE(amdgpu_vkms_formats),
|
||||
NULL, type, NULL);
|
||||
if (ret) {
|
||||
kfree(plane);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
drm_plane_helper_add(plane, &amdgpu_vkms_primary_helper_funcs);
|
||||
|
||||
return plane;
|
||||
}
|
||||
|
||||
int amdgpu_vkms_output_init(struct drm_device *dev,
|
||||
struct amdgpu_vkms_output *output, int index)
|
||||
{
|
||||
struct drm_connector *connector = &output->connector;
|
||||
struct drm_encoder *encoder = &output->encoder;
|
||||
struct drm_crtc *crtc = &output->crtc;
|
||||
struct drm_plane *primary, *cursor = NULL;
|
||||
int ret;
|
||||
|
||||
primary = amdgpu_vkms_plane_init(dev, DRM_PLANE_TYPE_PRIMARY, index);
|
||||
if (IS_ERR(primary))
|
||||
return PTR_ERR(primary);
|
||||
|
||||
ret = amdgpu_vkms_crtc_init(dev, crtc, primary, cursor);
|
||||
if (ret)
|
||||
goto err_crtc;
|
||||
|
||||
ret = drm_connector_init(dev, connector, &amdgpu_vkms_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to init connector\n");
|
||||
goto err_connector;
|
||||
}
|
||||
|
||||
drm_connector_helper_add(connector, &amdgpu_vkms_conn_helper_funcs);
|
||||
|
||||
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to init encoder\n");
|
||||
goto err_encoder;
|
||||
}
|
||||
encoder->possible_crtcs = 1 << index;
|
||||
|
||||
ret = drm_connector_attach_encoder(connector, encoder);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to attach connector to encoder\n");
|
||||
goto err_attach;
|
||||
}
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_attach:
|
||||
drm_encoder_cleanup(encoder);
|
||||
|
||||
err_encoder:
|
||||
drm_connector_cleanup(connector);
|
||||
|
||||
err_connector:
|
||||
drm_crtc_cleanup(crtc);
|
||||
|
||||
err_crtc:
|
||||
drm_plane_cleanup(primary);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = {
|
||||
.fb_create = amdgpu_display_user_framebuffer_create,
|
||||
.atomic_check = drm_atomic_helper_check,
|
||||
.atomic_commit = drm_atomic_helper_commit,
|
||||
};
|
||||
|
||||
static int amdgpu_vkms_sw_init(void *handle)
|
||||
{
|
||||
int r, i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev_to_drm(adev)->max_vblank_count = 0;
|
||||
|
||||
adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs;
|
||||
|
||||
adev_to_drm(adev)->mode_config.max_width = XRES_MAX;
|
||||
adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
r = amdgpu_display_modeset_create_props(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, sizeof(struct amdgpu_vkms_output), GFP_KERNEL);
|
||||
if (!adev->amdgpu_vkms_output)
|
||||
return -ENOMEM;
|
||||
|
||||
/* allocate crtcs, encoders, connectors */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_kms_helper_poll_init(adev_to_drm(adev));
|
||||
|
||||
adev->mode_info.mode_config_initialized = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_sw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++)
|
||||
if (adev->mode_info.crtcs[i])
|
||||
hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
|
||||
|
||||
kfree(adev->mode_info.bios_hardcoded_edid);
|
||||
kfree(adev->amdgpu_vkms_output);
|
||||
|
||||
drm_kms_helper_poll_fini(adev_to_drm(adev));
|
||||
|
||||
adev->mode_info.mode_config_initialized = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_hw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
case CHIP_OLAND:
|
||||
dce_v6_0_disable_dce(adev);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
case CHIP_KAVERI:
|
||||
case CHIP_KABINI:
|
||||
case CHIP_MULLINS:
|
||||
dce_v8_0_disable_dce(adev);
|
||||
break;
|
||||
#endif
|
||||
case CHIP_FIJI:
|
||||
case CHIP_TONGA:
|
||||
dce_v10_0_disable_dce(adev);
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_VEGAM:
|
||||
dce_v11_0_disable_dce(adev);
|
||||
break;
|
||||
case CHIP_TOPAZ:
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
case CHIP_HAINAN:
|
||||
#endif
|
||||
/* no DCE */
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_hw_fini(void *handle)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
|
||||
r = drm_mode_config_helper_suspend(adev_to_drm(adev));
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_vkms_hw_fini(handle);
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
|
||||
r = amdgpu_vkms_hw_init(handle);
|
||||
if (r)
|
||||
return r;
|
||||
return drm_mode_config_helper_resume(adev_to_drm(adev));
|
||||
}
|
||||
|
||||
static bool amdgpu_vkms_is_idle(void *handle)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_wait_for_idle(void *handle)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_soft_reset(void *handle)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
|
||||
.name = "amdgpu_vkms",
|
||||
.early_init = NULL,
|
||||
.late_init = NULL,
|
||||
.sw_init = amdgpu_vkms_sw_init,
|
||||
.sw_fini = amdgpu_vkms_sw_fini,
|
||||
.hw_init = amdgpu_vkms_hw_init,
|
||||
.hw_fini = amdgpu_vkms_hw_fini,
|
||||
.suspend = amdgpu_vkms_suspend,
|
||||
.resume = amdgpu_vkms_resume,
|
||||
.is_idle = amdgpu_vkms_is_idle,
|
||||
.wait_for_idle = amdgpu_vkms_wait_for_idle,
|
||||
.soft_reset = amdgpu_vkms_soft_reset,
|
||||
.set_clockgating_state = amdgpu_vkms_set_clockgating_state,
|
||||
.set_powergating_state = amdgpu_vkms_set_powergating_state,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version amdgpu_vkms_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_vkms_ip_funcs,
|
||||
};
|
||||
|
||||
26
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h
Normal file
26
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h
Normal file
@@ -0,0 +1,26 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
|
||||
#ifndef _AMDGPU_VKMS_H_
|
||||
#define _AMDGPU_VKMS_H_
|
||||
|
||||
#define XRES_DEF 1024
|
||||
#define YRES_DEF 768
|
||||
|
||||
#define XRES_MAX 16384
|
||||
#define YRES_MAX 16384
|
||||
|
||||
#define drm_crtc_to_amdgpu_vkms_output(target) \
|
||||
container_of(target, struct amdgpu_vkms_output, crtc)
|
||||
|
||||
extern const struct amdgpu_ip_block_version amdgpu_vkms_ip_block;
|
||||
|
||||
struct amdgpu_vkms_output {
|
||||
struct drm_crtc crtc;
|
||||
struct drm_encoder encoder;
|
||||
struct drm_connector connector;
|
||||
struct hrtimer vblank_hrtimer;
|
||||
ktime_t period_ns;
|
||||
struct drm_pending_vblank_event *event;
|
||||
};
|
||||
|
||||
#endif /* _AMDGPU_VKMS_H_ */
|
||||
@@ -88,6 +88,46 @@ struct amdgpu_prt_cb {
|
||||
struct dma_fence_cb cb;
|
||||
};
|
||||
|
||||
/**
|
||||
* amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm: amdgpu_vm pointer
|
||||
* @pasid: the pasid the VM is using on this GPU
|
||||
*
|
||||
* Set the pasid this VM is using on this GPU, can also be used to remove the
|
||||
* pasid by passing in zero.
|
||||
*
|
||||
*/
|
||||
int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
u32 pasid)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (vm->pasid == pasid)
|
||||
return 0;
|
||||
|
||||
if (vm->pasid) {
|
||||
r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
vm->pasid = 0;
|
||||
}
|
||||
|
||||
if (pasid) {
|
||||
r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
|
||||
GFP_KERNEL));
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
vm->pasid = pasid;
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
|
||||
* happens while holding this lock anywhere to prevent deadlocks when
|
||||
@@ -886,7 +926,7 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
|
||||
bp.size = amdgpu_vm_bo_size(adev, level);
|
||||
bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
|
||||
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain);
|
||||
bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
|
||||
bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
||||
|
||||
@@ -1178,7 +1218,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
|
||||
|
||||
if (vm_flush_needed || pasid_mapping_needed) {
|
||||
r = amdgpu_fence_emit(ring, &fence, 0);
|
||||
r = amdgpu_fence_emit(ring, &fence, NULL, 0);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@@ -1758,7 +1798,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
r = vm->update_funcs->commit(¶ms, fence);
|
||||
|
||||
if (table_freed)
|
||||
*table_freed = params.table_freed;
|
||||
*table_freed = *table_freed || params.table_freed;
|
||||
|
||||
error_unlock:
|
||||
amdgpu_vm_eviction_unlock(vm);
|
||||
@@ -1816,6 +1856,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
|
||||
* @adev: amdgpu_device pointer
|
||||
* @bo_va: requested BO and VM object
|
||||
* @clear: if true clear the entries
|
||||
* @table_freed: return true if page table is freed
|
||||
*
|
||||
* Fill in the page table entries for @bo_va.
|
||||
*
|
||||
@@ -1823,7 +1864,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
|
||||
* 0 for success, -EINVAL for failure.
|
||||
*/
|
||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
||||
bool clear)
|
||||
bool clear, bool *table_freed)
|
||||
{
|
||||
struct amdgpu_bo *bo = bo_va->base.bo;
|
||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||
@@ -1902,7 +1943,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
||||
resv, mapping->start,
|
||||
mapping->last, update_flags,
|
||||
mapping->offset, mem,
|
||||
pages_addr, last_update, NULL);
|
||||
pages_addr, last_update, table_freed);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@@ -2154,7 +2195,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
|
||||
/* Per VM BOs never need to bo cleared in the page tables */
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@@ -2173,7 +2214,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
else
|
||||
clear = true;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, clear);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@@ -2863,14 +2904,13 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm: requested vm
|
||||
* @pasid: Process address space identifier
|
||||
*
|
||||
* Init @vm fields.
|
||||
*
|
||||
* Returns:
|
||||
* 0 for success, error for failure.
|
||||
*/
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
{
|
||||
struct amdgpu_bo *root_bo;
|
||||
struct amdgpu_bo_vm *root;
|
||||
@@ -2944,19 +2984,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
|
||||
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
|
||||
if (pasid) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
|
||||
GFP_ATOMIC);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
if (r < 0)
|
||||
goto error_free_root;
|
||||
|
||||
vm->pasid = pasid;
|
||||
}
|
||||
|
||||
INIT_KFIFO(vm->faults);
|
||||
|
||||
return 0;
|
||||
@@ -3012,7 +3039,6 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm: requested vm
|
||||
* @pasid: pasid to use
|
||||
*
|
||||
* This only works on GFX VMs that don't have any BOs added and no
|
||||
* page tables allocated yet.
|
||||
@@ -3020,7 +3046,6 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
|
||||
* Changes the following VM parameters:
|
||||
* - use_cpu_for_update
|
||||
* - pte_supports_ats
|
||||
* - pasid (old PASID is released, because compute manages its own PASIDs)
|
||||
*
|
||||
* Reinitializes the page directory to reflect the changed ATS
|
||||
* setting.
|
||||
@@ -3028,8 +3053,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
|
||||
* Returns:
|
||||
* 0 for success, -errno for errors.
|
||||
*/
|
||||
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
u32 pasid)
|
||||
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
{
|
||||
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
|
||||
int r;
|
||||
@@ -3043,19 +3067,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
if (r)
|
||||
goto unreserve_bo;
|
||||
|
||||
if (pasid) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
|
||||
GFP_ATOMIC);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
|
||||
if (r == -ENOSPC)
|
||||
goto unreserve_bo;
|
||||
r = 0;
|
||||
}
|
||||
|
||||
/* Check if PD needs to be reinitialized and do it before
|
||||
* changing any other state, in case it fails.
|
||||
*/
|
||||
@@ -3065,7 +3076,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
to_amdgpu_bo_vm(vm->root.bo),
|
||||
false);
|
||||
if (r)
|
||||
goto free_idr;
|
||||
goto unreserve_bo;
|
||||
}
|
||||
|
||||
/* Update VM state */
|
||||
@@ -3082,7 +3093,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
r = amdgpu_bo_sync_wait(vm->root.bo,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, true);
|
||||
if (r)
|
||||
goto free_idr;
|
||||
goto unreserve_bo;
|
||||
|
||||
vm->update_funcs = &amdgpu_vm_cpu_funcs;
|
||||
} else {
|
||||
@@ -3092,36 +3103,11 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
vm->last_update = NULL;
|
||||
vm->is_compute_context = true;
|
||||
|
||||
if (vm->pasid) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
|
||||
/* Free the original amdgpu allocated pasid
|
||||
* Will be replaced with kfd allocated pasid
|
||||
*/
|
||||
amdgpu_pasid_free(vm->pasid);
|
||||
vm->pasid = 0;
|
||||
}
|
||||
|
||||
/* Free the shadow bo for compute VM */
|
||||
amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
|
||||
|
||||
if (pasid)
|
||||
vm->pasid = pasid;
|
||||
|
||||
goto unreserve_bo;
|
||||
|
||||
free_idr:
|
||||
if (pasid) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
idr_remove(&adev->vm_manager.pasid_idr, pasid);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
}
|
||||
unreserve_bo:
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
return r;
|
||||
@@ -3137,14 +3123,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
*/
|
||||
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
{
|
||||
if (vm->pasid) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
}
|
||||
vm->pasid = 0;
|
||||
amdgpu_vm_set_pasid(adev, vm, 0);
|
||||
vm->is_compute_context = false;
|
||||
}
|
||||
|
||||
@@ -3168,15 +3147,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
|
||||
root = amdgpu_bo_ref(vm->root.bo);
|
||||
amdgpu_bo_reserve(root, true);
|
||||
if (vm->pasid) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
vm->pasid = 0;
|
||||
}
|
||||
|
||||
amdgpu_vm_set_pasid(adev, vm, 0);
|
||||
dma_fence_wait(vm->last_unlocked, false);
|
||||
dma_fence_put(vm->last_unlocked);
|
||||
|
||||
@@ -3258,8 +3229,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
||||
adev->vm_manager.vm_update_mode = 0;
|
||||
#endif
|
||||
|
||||
idr_init(&adev->vm_manager.pasid_idr);
|
||||
spin_lock_init(&adev->vm_manager.pasid_lock);
|
||||
xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3271,8 +3241,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
||||
*/
|
||||
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
|
||||
idr_destroy(&adev->vm_manager.pasid_idr);
|
||||
WARN_ON(!xa_empty(&adev->vm_manager.pasids));
|
||||
xa_destroy(&adev->vm_manager.pasids);
|
||||
|
||||
amdgpu_vmid_mgr_fini(adev);
|
||||
}
|
||||
@@ -3341,13 +3311,13 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
|
||||
struct amdgpu_vm *vm;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
xa_lock_irqsave(&adev->vm_manager.pasids, flags);
|
||||
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
vm = xa_load(&adev->vm_manager.pasids, pasid);
|
||||
if (vm)
|
||||
*task_info = vm->task_info;
|
||||
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3375,12 +3345,13 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
|
||||
* @adev: amdgpu device pointer
|
||||
* @pasid: PASID of the VM
|
||||
* @addr: Address of the fault
|
||||
* @write_fault: true is write fault, false is read fault
|
||||
*
|
||||
* Try to gracefully handle a VM fault. Return true if the fault was handled and
|
||||
* shouldn't be reported any more.
|
||||
*/
|
||||
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
||||
uint64_t addr)
|
||||
uint64_t addr, bool write_fault)
|
||||
{
|
||||
bool is_compute_context = false;
|
||||
struct amdgpu_bo *root;
|
||||
@@ -3389,15 +3360,15 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
||||
struct amdgpu_vm *vm;
|
||||
int r;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
|
||||
vm = xa_load(&adev->vm_manager.pasids, pasid);
|
||||
if (vm) {
|
||||
root = amdgpu_bo_ref(vm->root.bo);
|
||||
is_compute_context = vm->is_compute_context;
|
||||
} else {
|
||||
root = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
|
||||
xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
|
||||
|
||||
if (!root)
|
||||
return false;
|
||||
@@ -3405,7 +3376,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
||||
addr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
if (is_compute_context &&
|
||||
!svm_range_restore_pages(adev, pasid, addr)) {
|
||||
!svm_range_restore_pages(adev, pasid, addr, write_fault)) {
|
||||
amdgpu_bo_unref(&root);
|
||||
return true;
|
||||
}
|
||||
@@ -3415,11 +3386,11 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
||||
goto error_unref;
|
||||
|
||||
/* Double check that the VM still exists */
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
|
||||
vm = xa_load(&adev->vm_manager.pasids, pasid);
|
||||
if (vm && vm->root.bo != root)
|
||||
vm = NULL;
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
|
||||
xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
|
||||
if (!vm)
|
||||
goto error_unlock;
|
||||
|
||||
|
||||
@@ -359,8 +359,7 @@ struct amdgpu_vm_manager {
|
||||
/* PASID to VM mapping, will be used in interrupt context to
|
||||
* look up VM of a page fault
|
||||
*/
|
||||
struct idr pasid_idr;
|
||||
spinlock_t pasid_lock;
|
||||
struct xarray pasids;
|
||||
};
|
||||
|
||||
struct amdgpu_bo_va_mapping;
|
||||
@@ -375,9 +374,12 @@ extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
|
||||
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
|
||||
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
u32 pasid);
|
||||
|
||||
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
|
||||
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
||||
@@ -406,7 +408,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
struct dma_fence **fence, bool *free_table);
|
||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
bool clear);
|
||||
bool clear, bool *table_freed);
|
||||
bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
|
||||
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo, bool evicted);
|
||||
@@ -446,7 +448,7 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
|
||||
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
|
||||
struct amdgpu_task_info *task_info);
|
||||
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
||||
uint64_t addr);
|
||||
uint64_t addr, bool write_fault);
|
||||
|
||||
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
|
||||
|
||||
|
||||
@@ -32,6 +32,10 @@
|
||||
#include "wafl/wafl2_4_0_0_smn.h"
|
||||
#include "wafl/wafl2_4_0_0_sh_mask.h"
|
||||
|
||||
#define smnPCS_XGMI23_PCS_ERROR_STATUS 0x11a01210
|
||||
#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
|
||||
#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
|
||||
|
||||
static DEFINE_MUTEX(xgmi_mutex);
|
||||
|
||||
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
|
||||
@@ -63,6 +67,33 @@ static const int wafl_pcs_err_status_reg_arct[] = {
|
||||
smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
|
||||
};
|
||||
|
||||
static const int xgmi23_pcs_err_status_reg_aldebaran[] = {
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x100000,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x200000,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x300000,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x400000,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x500000,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x600000,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x700000
|
||||
};
|
||||
|
||||
static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = {
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x200000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x300000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x400000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x500000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x600000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000
|
||||
};
|
||||
|
||||
static const int walf_pcs_err_status_reg_aldebaran[] = {
|
||||
smnPCS_GOPX1_PCS_ERROR_STATUS,
|
||||
smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000
|
||||
};
|
||||
|
||||
static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
|
||||
{"XGMI PCS DataLossErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
|
||||
@@ -486,6 +517,44 @@ int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
|
||||
struct amdgpu_device *peer_adev)
|
||||
{
|
||||
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
|
||||
int i;
|
||||
|
||||
for (i = 0 ; i < top->num_nodes; ++i)
|
||||
if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
|
||||
return top->nodes[i].num_links;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Devices that support extended data require the entire hive to initialize with
|
||||
* the shared memory buffer flag set.
|
||||
*
|
||||
* Hive locks and conditions apply - see amdgpu_xgmi_add_device
|
||||
*/
|
||||
static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_info *hive,
|
||||
bool set_extended_data)
|
||||
{
|
||||
struct amdgpu_device *tmp_adev;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
ret = psp_xgmi_initialize(&tmp_adev->psp, set_extended_data, false);
|
||||
if (ret) {
|
||||
dev_err(tmp_adev->dev,
|
||||
"XGMI: Failed to initialize xgmi session for data partition %i\n",
|
||||
set_extended_data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
|
||||
{
|
||||
struct psp_xgmi_topology_info *top_info;
|
||||
@@ -500,7 +569,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
|
||||
|
||||
if (!adev->gmc.xgmi.pending_reset &&
|
||||
amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
|
||||
ret = psp_xgmi_initialize(&adev->psp);
|
||||
ret = psp_xgmi_initialize(&adev->psp, false, true);
|
||||
if (ret) {
|
||||
dev_err(adev->dev,
|
||||
"XGMI: Failed to initialize xgmi session\n");
|
||||
@@ -563,7 +632,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
|
||||
/* get latest topology info for each device from psp */
|
||||
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
|
||||
&tmp_adev->psp.xgmi_context.top_info);
|
||||
&tmp_adev->psp.xgmi_context.top_info, false);
|
||||
if (ret) {
|
||||
dev_err(tmp_adev->dev,
|
||||
"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
|
||||
@@ -573,6 +642,34 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
|
||||
goto exit_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/* get topology again for hives that support extended data */
|
||||
if (adev->psp.xgmi_context.supports_extended_data) {
|
||||
|
||||
/* initialize the hive to get extended data. */
|
||||
ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, true);
|
||||
if (ret)
|
||||
goto exit_unlock;
|
||||
|
||||
/* get the extended data. */
|
||||
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
|
||||
&tmp_adev->psp.xgmi_context.top_info, true);
|
||||
if (ret) {
|
||||
dev_err(tmp_adev->dev,
|
||||
"XGMI: Get topology for extended data failure on device %llx, hive %llx, ret %d",
|
||||
tmp_adev->gmc.xgmi.node_id,
|
||||
tmp_adev->gmc.xgmi.hive_id, ret);
|
||||
goto exit_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize the hive to get non-extended data for the next round. */
|
||||
ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, false);
|
||||
if (ret)
|
||||
goto exit_unlock;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret && !adev->gmc.xgmi.pending_reset)
|
||||
@@ -651,7 +748,6 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
|
||||
adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
|
||||
adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->gmc.xgmi.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
|
||||
}
|
||||
ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
|
||||
r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
|
||||
@@ -706,6 +802,17 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
|
||||
pcs_clear_status(adev,
|
||||
xgmi_pcs_err_status_reg_vg20[i]);
|
||||
break;
|
||||
case CHIP_ALDEBARAN:
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
|
||||
pcs_clear_status(adev,
|
||||
xgmi23_pcs_err_status_reg_aldebaran[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
|
||||
pcs_clear_status(adev,
|
||||
xgmi23_pcs_err_status_reg_aldebaran[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++)
|
||||
pcs_clear_status(adev,
|
||||
walf_pcs_err_status_reg_aldebaran[i]);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -783,7 +890,6 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
|
||||
}
|
||||
break;
|
||||
case CHIP_VEGA20:
|
||||
default:
|
||||
/* check xgmi pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
|
||||
data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
|
||||
@@ -799,6 +905,32 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
|
||||
data, &ue_cnt, &ce_cnt, false);
|
||||
}
|
||||
break;
|
||||
case CHIP_ALDEBARAN:
|
||||
/* check xgmi23 pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) {
|
||||
data = RREG32_PCIE(xgmi23_pcs_err_status_reg_aldebaran[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, true);
|
||||
}
|
||||
/* check xgmi3x16 pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) {
|
||||
data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, true);
|
||||
}
|
||||
/* check wafl pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) {
|
||||
data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, false);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
dev_warn(adev->dev, "XGMI RAS error query not supported");
|
||||
break;
|
||||
}
|
||||
|
||||
adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
|
||||
|
||||
@@ -59,6 +59,8 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
|
||||
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
|
||||
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
|
||||
struct amdgpu_device *peer_adev);
|
||||
int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
|
||||
struct amdgpu_device *peer_adev);
|
||||
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
|
||||
uint64_t addr);
|
||||
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
|
||||
|
||||
@@ -851,7 +851,7 @@ void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
|
||||
pll->reference_div = amdgpu_crtc->pll_reference_div;
|
||||
pll->post_div = amdgpu_crtc->pll_post_div;
|
||||
|
||||
amdgpu_pll_compute(pll, amdgpu_crtc->adjusted_clock, &pll_clock,
|
||||
amdgpu_pll_compute(adev, pll, amdgpu_crtc->adjusted_clock, &pll_clock,
|
||||
&fb_div, &frac_fb_div, &ref_div, &post_div);
|
||||
|
||||
amdgpu_atombios_crtc_program_ss(adev, ATOM_DISABLE, amdgpu_crtc->pll_id,
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
|
||||
#include "amdgpu_dm.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "dce_virtual.h"
|
||||
#include "amdgpu_vkms.h"
|
||||
|
||||
static const struct amdgpu_video_codec_info cik_video_codecs_encode_array[] =
|
||||
{
|
||||
@@ -2259,7 +2259,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
@@ -2277,7 +2277,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
@@ -2295,7 +2295,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
@@ -2315,7 +2315,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
|
||||
51
drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
Normal file
51
drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
Normal file
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "amdgpu.h"
|
||||
#include "nv.h"
|
||||
|
||||
#include "soc15_common.h"
|
||||
#include "soc15_hw_ip.h"
|
||||
#include "cyan_skillfish_ip_offset.h"
|
||||
|
||||
int cyan_skillfish_reg_base_init(struct amdgpu_device *adev)
|
||||
{
|
||||
/* HW has more IP blocks, only initialized the blocke needed by driver */
|
||||
uint32_t i;
|
||||
for (i = 0 ; i < MAX_INSTANCE ; ++i) {
|
||||
adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
|
||||
adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
|
||||
adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
|
||||
adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
|
||||
adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
|
||||
adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
|
||||
adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
|
||||
adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
|
||||
adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
|
||||
adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
|
||||
adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
|
||||
adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
|
||||
adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
|
||||
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -1,780 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_i2c.h"
|
||||
#include "atom.h"
|
||||
#include "amdgpu_pll.h"
|
||||
#include "amdgpu_connectors.h"
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
#include "dce_v6_0.h"
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
#include "dce_v8_0.h"
|
||||
#endif
|
||||
#include "dce_v10_0.h"
|
||||
#include "dce_v11_0.h"
|
||||
#include "dce_virtual.h"
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
#include "amdgpu_display.h"
|
||||
|
||||
#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
|
||||
|
||||
|
||||
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
|
||||
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
|
||||
int index);
|
||||
static int dce_virtual_pageflip(struct amdgpu_device *adev,
|
||||
unsigned crtc_id);
|
||||
static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer);
|
||||
static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
|
||||
int crtc,
|
||||
enum amdgpu_interrupt_state state);
|
||||
|
||||
static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dce_virtual_page_flip(struct amdgpu_device *adev,
|
||||
int crtc_id, u64 crtc_base, bool async)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
|
||||
u32 *vbl, u32 *position)
|
||||
{
|
||||
*vbl = 0;
|
||||
*position = 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static bool dce_virtual_hpd_sense(struct amdgpu_device *adev,
|
||||
enum amdgpu_hpd_id hpd)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev,
|
||||
enum amdgpu_hpd_id hpd)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dce_virtual_bandwidth_update - program display watermarks
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Calculate and program the display watermarks and line
|
||||
* buffer allocation (CIK).
|
||||
*/
|
||||
static void dce_virtual_bandwidth_update(struct amdgpu_device *adev)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
|
||||
u16 *green, u16 *blue, uint32_t size,
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dce_virtual_crtc_destroy(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
drm_crtc_cleanup(crtc);
|
||||
kfree(amdgpu_crtc);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
|
||||
.cursor_set2 = NULL,
|
||||
.cursor_move = NULL,
|
||||
.gamma_set = dce_virtual_crtc_gamma_set,
|
||||
.set_config = amdgpu_display_crtc_set_config,
|
||||
.destroy = dce_virtual_crtc_destroy,
|
||||
.page_flip_target = amdgpu_display_crtc_page_flip_target,
|
||||
.get_vblank_counter = amdgpu_get_vblank_counter_kms,
|
||||
.enable_vblank = amdgpu_enable_vblank_kms,
|
||||
.disable_vblank = amdgpu_disable_vblank_kms,
|
||||
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
|
||||
};
|
||||
|
||||
static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
unsigned type;
|
||||
|
||||
switch (mode) {
|
||||
case DRM_MODE_DPMS_ON:
|
||||
amdgpu_crtc->enabled = true;
|
||||
/* Make sure VBLANK interrupts are still enabled */
|
||||
type = amdgpu_display_crtc_idx_to_irq_type(adev,
|
||||
amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_update(adev, &adev->crtc_irq, type);
|
||||
drm_crtc_vblank_on(crtc);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
drm_crtc_vblank_off(crtc);
|
||||
amdgpu_crtc->enabled = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void dce_virtual_crtc_prepare(struct drm_crtc *crtc)
|
||||
{
|
||||
dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
||||
static void dce_virtual_crtc_commit(struct drm_crtc *crtc)
|
||||
{
|
||||
dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
|
||||
static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
|
||||
if (dev->num_crtcs)
|
||||
drm_crtc_vblank_off(crtc);
|
||||
|
||||
amdgpu_crtc->enabled = false;
|
||||
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
|
||||
amdgpu_crtc->encoder = NULL;
|
||||
amdgpu_crtc->connector = NULL;
|
||||
}
|
||||
|
||||
static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode,
|
||||
int x, int y, struct drm_framebuffer *old_fb)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
/* update the hw version fpr dpm */
|
||||
amdgpu_crtc->hw_mode = *adjusted_mode;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *old_fb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
int x, int y, enum mode_set_atomic state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
|
||||
.dpms = dce_virtual_crtc_dpms,
|
||||
.mode_fixup = dce_virtual_crtc_mode_fixup,
|
||||
.mode_set = dce_virtual_crtc_mode_set,
|
||||
.mode_set_base = dce_virtual_crtc_set_base,
|
||||
.mode_set_base_atomic = dce_virtual_crtc_set_base_atomic,
|
||||
.prepare = dce_virtual_crtc_prepare,
|
||||
.commit = dce_virtual_crtc_commit,
|
||||
.disable = dce_virtual_crtc_disable,
|
||||
.get_scanout_position = amdgpu_crtc_get_scanout_position,
|
||||
};
|
||||
|
||||
static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc;
|
||||
|
||||
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
|
||||
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
|
||||
if (amdgpu_crtc == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
|
||||
|
||||
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
|
||||
amdgpu_crtc->crtc_id = index;
|
||||
adev->mode_info.crtcs[index] = amdgpu_crtc;
|
||||
|
||||
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
|
||||
amdgpu_crtc->encoder = NULL;
|
||||
amdgpu_crtc->connector = NULL;
|
||||
amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
|
||||
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
|
||||
|
||||
hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
hrtimer_set_expires(&amdgpu_crtc->vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD);
|
||||
amdgpu_crtc->vblank_timer.function = dce_virtual_vblank_timer_handle;
|
||||
hrtimer_start(&amdgpu_crtc->vblank_timer,
|
||||
DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_virtual_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
dce_virtual_set_display_funcs(adev);
|
||||
dce_virtual_set_irq_funcs(adev);
|
||||
|
||||
adev->mode_info.num_hpd = 1;
|
||||
adev->mode_info.num_dig = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_encoder *
|
||||
dce_virtual_encoder(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_encoder *encoder;
|
||||
|
||||
drm_connector_for_each_possible_encoder(connector, encoder) {
|
||||
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
|
||||
return encoder;
|
||||
}
|
||||
|
||||
/* pick the first one */
|
||||
drm_connector_for_each_possible_encoder(connector, encoder)
|
||||
return encoder;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int dce_virtual_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_display_mode *mode = NULL;
|
||||
unsigned i;
|
||||
static const struct mode_size {
|
||||
int w;
|
||||
int h;
|
||||
} common_modes[] = {
|
||||
{ 640, 480},
|
||||
{ 720, 480},
|
||||
{ 800, 600},
|
||||
{ 848, 480},
|
||||
{1024, 768},
|
||||
{1152, 768},
|
||||
{1280, 720},
|
||||
{1280, 800},
|
||||
{1280, 854},
|
||||
{1280, 960},
|
||||
{1280, 1024},
|
||||
{1440, 900},
|
||||
{1400, 1050},
|
||||
{1680, 1050},
|
||||
{1600, 1200},
|
||||
{1920, 1080},
|
||||
{1920, 1200},
|
||||
{2560, 1440},
|
||||
{4096, 3112},
|
||||
{3656, 2664},
|
||||
{3840, 2160},
|
||||
{4096, 2160},
|
||||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
|
||||
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
|
||||
drm_mode_probed_add(connector, mode);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum drm_mode_status dce_virtual_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static int
|
||||
dce_virtual_dpms(struct drm_connector *connector, int mode)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
dce_virtual_set_property(struct drm_connector *connector,
|
||||
struct drm_property *property,
|
||||
uint64_t val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dce_virtual_destroy(struct drm_connector *connector)
|
||||
{
|
||||
drm_connector_unregister(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
||||
static void dce_virtual_force(struct drm_connector *connector)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = {
|
||||
.get_modes = dce_virtual_get_modes,
|
||||
.mode_valid = dce_virtual_mode_valid,
|
||||
.best_encoder = dce_virtual_encoder,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs dce_virtual_connector_funcs = {
|
||||
.dpms = dce_virtual_dpms,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = dce_virtual_set_property,
|
||||
.destroy = dce_virtual_destroy,
|
||||
.force = dce_virtual_force,
|
||||
};
|
||||
|
||||
static int dce_virtual_sw_init(void *handle)
|
||||
{
|
||||
int r, i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev_to_drm(adev)->max_vblank_count = 0;
|
||||
|
||||
adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
|
||||
|
||||
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
r = amdgpu_display_modeset_create_props(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
/* allocate crtcs, encoders, connectors */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
r = dce_virtual_crtc_init(adev, i);
|
||||
if (r)
|
||||
return r;
|
||||
r = dce_virtual_connector_encoder_init(adev, i);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
drm_kms_helper_poll_init(adev_to_drm(adev));
|
||||
|
||||
adev->mode_info.mode_config_initialized = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_virtual_sw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++)
|
||||
if (adev->mode_info.crtcs[i])
|
||||
hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
|
||||
|
||||
kfree(adev->mode_info.bios_hardcoded_edid);
|
||||
|
||||
drm_kms_helper_poll_fini(adev_to_drm(adev));
|
||||
|
||||
drm_mode_config_cleanup(adev_to_drm(adev));
|
||||
/* clear crtcs pointer to avoid dce irq finish routine access freed data */
|
||||
memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS);
|
||||
adev->mode_info.mode_config_initialized = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_virtual_hw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
case CHIP_OLAND:
|
||||
dce_v6_0_disable_dce(adev);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
case CHIP_KAVERI:
|
||||
case CHIP_KABINI:
|
||||
case CHIP_MULLINS:
|
||||
dce_v8_0_disable_dce(adev);
|
||||
break;
|
||||
#endif
|
||||
case CHIP_FIJI:
|
||||
case CHIP_TONGA:
|
||||
dce_v10_0_disable_dce(adev);
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_VEGAM:
|
||||
dce_v11_0_disable_dce(adev);
|
||||
break;
|
||||
case CHIP_TOPAZ:
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
case CHIP_HAINAN:
|
||||
#endif
|
||||
/* no DCE */
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_virtual_hw_fini(void *handle)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_virtual_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
|
||||
r = amdgpu_display_suspend_helper(adev);
|
||||
if (r)
|
||||
return r;
|
||||
return dce_virtual_hw_fini(handle);
|
||||
}
|
||||
|
||||
static int dce_virtual_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
|
||||
r = dce_virtual_hw_init(handle);
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_display_resume_helper(adev);
|
||||
}
|
||||
|
||||
static bool dce_virtual_is_idle(void *handle)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static int dce_virtual_wait_for_idle(void *handle)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_virtual_soft_reset(void *handle)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_virtual_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_virtual_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs dce_virtual_ip_funcs = {
|
||||
.name = "dce_virtual",
|
||||
.early_init = dce_virtual_early_init,
|
||||
.late_init = NULL,
|
||||
.sw_init = dce_virtual_sw_init,
|
||||
.sw_fini = dce_virtual_sw_fini,
|
||||
.hw_init = dce_virtual_hw_init,
|
||||
.hw_fini = dce_virtual_hw_fini,
|
||||
.suspend = dce_virtual_suspend,
|
||||
.resume = dce_virtual_resume,
|
||||
.is_idle = dce_virtual_is_idle,
|
||||
.wait_for_idle = dce_virtual_wait_for_idle,
|
||||
.soft_reset = dce_virtual_soft_reset,
|
||||
.set_clockgating_state = dce_virtual_set_clockgating_state,
|
||||
.set_powergating_state = dce_virtual_set_powergating_state,
|
||||
};
|
||||
|
||||
/* these are handled by the primary encoders */
|
||||
static void dce_virtual_encoder_prepare(struct drm_encoder *encoder)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static void dce_virtual_encoder_disable(struct drm_encoder *encoder)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = {
|
||||
.dpms = dce_virtual_encoder_dpms,
|
||||
.mode_fixup = dce_virtual_encoder_mode_fixup,
|
||||
.prepare = dce_virtual_encoder_prepare,
|
||||
.mode_set = dce_virtual_encoder_mode_set,
|
||||
.commit = dce_virtual_encoder_commit,
|
||||
.disable = dce_virtual_encoder_disable,
|
||||
};
|
||||
|
||||
static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(encoder);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
|
||||
.destroy = dce_virtual_encoder_destroy,
|
||||
};
|
||||
|
||||
static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
|
||||
int index)
|
||||
{
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_connector *connector;
|
||||
|
||||
/* add a new encoder */
|
||||
encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
|
||||
if (!encoder)
|
||||
return -ENOMEM;
|
||||
encoder->possible_crtcs = 1 << index;
|
||||
drm_encoder_init(adev_to_drm(adev), encoder, &dce_virtual_encoder_funcs,
|
||||
DRM_MODE_ENCODER_VIRTUAL, NULL);
|
||||
drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
|
||||
|
||||
connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
|
||||
if (!connector) {
|
||||
kfree(encoder);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* add a new connector */
|
||||
drm_connector_init(adev_to_drm(adev), connector, &dce_virtual_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
|
||||
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
|
||||
connector->interlace_allowed = false;
|
||||
connector->doublescan_allowed = false;
|
||||
|
||||
/* link them */
|
||||
drm_connector_attach_encoder(connector, encoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
|
||||
.bandwidth_update = &dce_virtual_bandwidth_update,
|
||||
.vblank_get_counter = &dce_virtual_vblank_get_counter,
|
||||
.backlight_set_level = NULL,
|
||||
.backlight_get_level = NULL,
|
||||
.hpd_sense = &dce_virtual_hpd_sense,
|
||||
.hpd_set_polarity = &dce_virtual_hpd_set_polarity,
|
||||
.hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
|
||||
.page_flip = &dce_virtual_page_flip,
|
||||
.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
|
||||
.add_encoder = NULL,
|
||||
.add_connector = NULL,
|
||||
};
|
||||
|
||||
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->mode_info.funcs = &dce_virtual_display_funcs;
|
||||
}
|
||||
|
||||
static int dce_virtual_pageflip(struct amdgpu_device *adev,
|
||||
unsigned crtc_id)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct amdgpu_crtc *amdgpu_crtc;
|
||||
struct amdgpu_flip_work *works;
|
||||
|
||||
amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
|
||||
|
||||
if (crtc_id >= adev->mode_info.num_crtc) {
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* IRQ could occur when in initial stage */
|
||||
if (amdgpu_crtc == NULL)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||
works = amdgpu_crtc->pflip_works;
|
||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
|
||||
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
|
||||
"AMDGPU_FLIP_SUBMITTED(%d)\n",
|
||||
amdgpu_crtc->pflip_status,
|
||||
AMDGPU_FLIP_SUBMITTED);
|
||||
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* page flip completed. clean up */
|
||||
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
|
||||
amdgpu_crtc->pflip_works = NULL;
|
||||
|
||||
/* wakeup usersapce */
|
||||
if (works->event)
|
||||
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
||||
|
||||
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||
|
||||
drm_crtc_vblank_put(&amdgpu_crtc->base);
|
||||
amdgpu_bo_unref(&works->old_abo);
|
||||
kfree(works->shared);
|
||||
kfree(works);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
|
||||
struct amdgpu_crtc, vblank_timer);
|
||||
struct drm_device *ddev = amdgpu_crtc->base.dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
struct amdgpu_irq_src *source = adev->irq.client[AMDGPU_IRQ_CLIENTID_LEGACY].sources
|
||||
[VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER];
|
||||
int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
|
||||
amdgpu_crtc->crtc_id);
|
||||
|
||||
if (amdgpu_irq_enabled(adev, source, irq_type)) {
|
||||
drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
|
||||
dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
|
||||
}
|
||||
hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD,
|
||||
HRTIMER_MODE_REL);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
|
||||
int crtc,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
if (crtc >= adev->mode_info.num_crtc || !adev->mode_info.crtcs[crtc]) {
|
||||
DRM_DEBUG("invalid crtc %d\n", crtc);
|
||||
return;
|
||||
}
|
||||
|
||||
adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
|
||||
DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
|
||||
}
|
||||
|
||||
|
||||
static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
if (type > AMDGPU_CRTC_IRQ_VBLANK6)
|
||||
return -EINVAL;
|
||||
|
||||
dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
|
||||
.set = dce_virtual_set_crtc_irq_state,
|
||||
.process = NULL,
|
||||
};
|
||||
|
||||
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->crtc_irq.num_types = adev->mode_info.num_crtc;
|
||||
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version dce_virtual_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &dce_virtual_ip_funcs,
|
||||
};
|
||||
@@ -56,6 +56,10 @@
|
||||
#define GFX10_NUM_GFX_RINGS_Sienna_Cichlid 1
|
||||
#define GFX10_MEC_HPD_SIZE 2048
|
||||
|
||||
#define RLCG_VFGATE_DISABLED 0x4000000
|
||||
#define RLCG_WRONG_OPERATION_TYPE 0x2000000
|
||||
#define RLCG_NOT_IN_RANGE 0x1000000
|
||||
|
||||
#define F32_CE_PROGRAM_RAM_SIZE 65536
|
||||
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
|
||||
|
||||
@@ -181,6 +185,9 @@
|
||||
#define GFX_RLCG_GC_READ (0x1 << 28)
|
||||
#define GFX_RLCG_MMHUB_WRITE (0x2 << 28)
|
||||
|
||||
#define RLCG_ERROR_REPORT_ENABLED(adev) \
|
||||
(amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
|
||||
@@ -249,6 +256,39 @@ MODULE_FIRMWARE("amdgpu/yellow_carp_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_rlc.bin");
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_0[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
|
||||
/* TA_GRAD_ADJ_UCONFIG -> TA_GRAD_ADJ */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382),
|
||||
/* VGT_TF_RING_SIZE_UMD -> VGT_TF_RING_SIZE */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2262c24e),
|
||||
/* VGT_HS_OFFCHIP_PARAM_UMD -> VGT_HS_OFFCHIP_PARAM */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x226cc24f),
|
||||
/* VGT_TF_MEMORY_BASE_UMD -> VGT_TF_MEMORY_BASE */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x226ec250),
|
||||
/* VGT_TF_MEMORY_BASE_HI_UMD -> VGT_TF_MEMORY_BASE_HI */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2278c261),
|
||||
/* VGT_ESGS_RING_SIZE_UMD -> VGT_ESGS_RING_SIZE */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2232c240),
|
||||
/* VGT_GSVS_RING_SIZE_UMD -> VGT_GSVS_RING_SIZE */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2233c241),
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_1[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
|
||||
@@ -1486,6 +1526,7 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
|
||||
uint32_t i = 0;
|
||||
uint32_t retries = 50000;
|
||||
u32 ret = 0;
|
||||
u32 tmp;
|
||||
|
||||
scratch_reg0 = adev->rmmio +
|
||||
(adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0) * 4;
|
||||
@@ -1519,9 +1560,8 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
|
||||
writel(v, scratch_reg0);
|
||||
writel(offset | flag, scratch_reg1);
|
||||
writel(1, spare_int);
|
||||
for (i = 0; i < retries; i++) {
|
||||
u32 tmp;
|
||||
|
||||
for (i = 0; i < retries; i++) {
|
||||
tmp = readl(scratch_reg1);
|
||||
if (!(tmp & flag))
|
||||
break;
|
||||
@@ -1529,8 +1569,19 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
if (i >= retries)
|
||||
pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
|
||||
if (i >= retries) {
|
||||
if (RLCG_ERROR_REPORT_ENABLED(adev)) {
|
||||
if (tmp & RLCG_VFGATE_DISABLED)
|
||||
pr_err("The vfgate is disabled, program reg:0x%05x failed!\n", offset);
|
||||
else if (tmp & RLCG_WRONG_OPERATION_TYPE)
|
||||
pr_err("Wrong operation type, program reg:0x%05x failed!\n", offset);
|
||||
else if (tmp & RLCG_NOT_IN_RANGE)
|
||||
pr_err("The register is not in range, program reg:0x%05x failed!\n", offset);
|
||||
else
|
||||
pr_err("Unknown error type, program reg:0x%05x failed!\n", offset);
|
||||
} else
|
||||
pr_err("timeout: rlcg program reg:0x%05x failed!\n", offset);
|
||||
}
|
||||
}
|
||||
|
||||
ret = readl(scratch_reg0);
|
||||
@@ -1538,7 +1589,7 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip)
|
||||
static void gfx_v10_sriov_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip)
|
||||
{
|
||||
u32 rlcg_flag;
|
||||
|
||||
@@ -1554,7 +1605,7 @@ static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value,
|
||||
WREG32(offset, value);
|
||||
}
|
||||
|
||||
static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
|
||||
static u32 gfx_v10_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
|
||||
{
|
||||
u32 rlcg_flag;
|
||||
|
||||
@@ -3488,12 +3539,51 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_5[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_0_cyan_skillfish[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_FAST_CLKS, 0x3fffffff, 0x0000493e),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x3c000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0xa0000000, 0xa0000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x00008000, 0x003c8014),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_DRAM_BURST_CTRL, 0x00000010, 0x00000017),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xd8d8d8d8),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000003, 0x00000003),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860210),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x00009d00, 0x00008500),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCMC_VM_CACHEABLE_DRAM_ADDRESS_END, 0xffffffff, 0x000fffff),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_DRAM_BURST_CTRL, 0x00000010, 0x00000017),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xfcfcfcfc, 0xd8d8d8d8),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77707770, 0x21302130),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77707770, 0x21302130),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xfc02002f, 0x9402002f),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0x00002188, 0x00000188),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x08000009, 0x08000009),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_0, 0xcc3fcc03, 0x842a4c02),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000000f, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffff3109, 0xffff3101),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x00030008, 0x01030000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
|
||||
};
|
||||
|
||||
#define DEFAULT_SH_MEM_CONFIG \
|
||||
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
|
||||
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
|
||||
(SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
|
||||
(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
|
||||
|
||||
/* TODO: pending on golden setting value of gb address config */
|
||||
#define CYAN_SKILLFISH_GB_ADDR_CONFIG_GOLDEN 0x00100044
|
||||
|
||||
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
@@ -3718,6 +3808,14 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
golden_settings_gc_10_3_5,
|
||||
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_5));
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_gc_10_0,
|
||||
(const u32)ARRAY_SIZE(golden_settings_gc_10_0));
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_gc_10_0_cyan_skillfish,
|
||||
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_cyan_skillfish));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -3891,6 +3989,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
|
||||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
if ((adev->gfx.me_fw_version >= 0x00000046) &&
|
||||
(adev->gfx.me_feature_version >= 27) &&
|
||||
(adev->gfx.pfp_fw_version >= 0x00000068) &&
|
||||
@@ -4025,6 +4124,12 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
|
||||
case CHIP_YELLOW_CARP:
|
||||
chip_name = "yellow_carp";
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
|
||||
chip_name = "cyan_skillfish2";
|
||||
else
|
||||
chip_name = "cyan_skillfish";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
@@ -4516,6 +4621,7 @@ static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
|
||||
}
|
||||
|
||||
static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
|
||||
@@ -4604,6 +4710,14 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
|
||||
adev->gfx.config.gb_addr_config_fields.num_pkrs =
|
||||
1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
|
||||
gb_addr_config = CYAN_SKILLFISH_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
@@ -4708,6 +4822,7 @@ static int gfx_v10_0_sw_init(void *handle)
|
||||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
adev->gfx.me.num_me = 1;
|
||||
adev->gfx.me.num_pipe_per_me = 1;
|
||||
adev->gfx.me.num_queue_per_pipe = 1;
|
||||
@@ -5319,7 +5434,8 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
|
||||
adev->psp.autoload_supported) {
|
||||
|
||||
r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
|
||||
if (r)
|
||||
@@ -5379,7 +5495,7 @@ static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
|
||||
int ret;
|
||||
RLC_TABLE_OF_CONTENT *rlc_toc;
|
||||
|
||||
ret = amdgpu_bo_create_reserved(adev, adev->psp.toc_bin_size, PAGE_SIZE,
|
||||
ret = amdgpu_bo_create_reserved(adev, adev->psp.toc.size_bytes, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->gfx.rlc.rlc_toc_bo,
|
||||
&adev->gfx.rlc.rlc_toc_gpu_addr,
|
||||
@@ -5390,7 +5506,7 @@ static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
/* Copy toc from psp sos fw to rlc toc buffer */
|
||||
memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc_start_addr, adev->psp.toc_bin_size);
|
||||
memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc.start_addr, adev->psp.toc.size_bytes);
|
||||
|
||||
rlc_toc = (RLC_TABLE_OF_CONTENT *)adev->gfx.rlc.rlc_toc_buf;
|
||||
while (rlc_toc && (rlc_toc->id > FIRMWARE_ID_INVALID) &&
|
||||
@@ -7608,10 +7724,8 @@ static int gfx_v10_0_soft_reset(void *handle)
|
||||
|
||||
static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t clock;
|
||||
uint64_t clock, clock_lo, clock_hi, hi_check;
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
mutex_lock(&adev->gfx.gpu_clock_mutex);
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
@@ -7619,12 +7733,21 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
|
||||
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
|
||||
break;
|
||||
default:
|
||||
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
|
||||
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
|
||||
preempt_disable();
|
||||
clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER);
|
||||
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER);
|
||||
hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER);
|
||||
/* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
|
||||
* roughly every 42 seconds.
|
||||
*/
|
||||
if (hi_check != clock_hi) {
|
||||
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER);
|
||||
clock_hi = hi_check;
|
||||
}
|
||||
preempt_enable();
|
||||
clock = clock_lo | (clock_hi << 32ULL);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&adev->gfx.gpu_clock_mutex);
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
return clock;
|
||||
}
|
||||
|
||||
@@ -7665,6 +7788,7 @@ static int gfx_v10_0_early_init(void *handle)
|
||||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
|
||||
break;
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
@@ -8261,8 +8385,8 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
|
||||
.reset = gfx_v10_0_rlc_reset,
|
||||
.start = gfx_v10_0_rlc_start,
|
||||
.update_spm_vmid = gfx_v10_0_update_spm_vmid,
|
||||
.rlcg_wreg = gfx_v10_rlcg_wreg,
|
||||
.rlcg_rreg = gfx_v10_rlcg_rreg,
|
||||
.sriov_wreg = gfx_v10_sriov_wreg,
|
||||
.sriov_rreg = gfx_v10_sriov_rreg,
|
||||
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
|
||||
};
|
||||
|
||||
@@ -9425,6 +9549,7 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
|
||||
break;
|
||||
case CHIP_NAVI12:
|
||||
|
||||
@@ -3027,6 +3027,7 @@ static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
|
||||
}
|
||||
|
||||
static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
|
||||
|
||||
@@ -4198,6 +4198,7 @@ static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
|
||||
}
|
||||
|
||||
static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
|
||||
|
||||
@@ -5279,6 +5279,7 @@ static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
|
||||
}
|
||||
|
||||
static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
|
||||
|
||||
@@ -787,7 +787,7 @@ static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
|
||||
|
||||
}
|
||||
|
||||
static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset,
|
||||
static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset,
|
||||
u32 v, u32 acc_flags, u32 hwip)
|
||||
{
|
||||
if ((acc_flags & AMDGPU_REGS_RLC) &&
|
||||
@@ -2090,6 +2090,7 @@ static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
|
||||
@@ -5150,7 +5151,7 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
|
||||
.reset = gfx_v9_0_rlc_reset,
|
||||
.start = gfx_v9_0_rlc_start,
|
||||
.update_spm_vmid = gfx_v9_0_update_spm_vmid,
|
||||
.rlcg_wreg = gfx_v9_0_rlcg_wreg,
|
||||
.sriov_wreg = gfx_v9_0_sriov_wreg,
|
||||
.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
|
||||
};
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCI_CNTL_3, 0xff, 0x20),
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* This shader is used to clear VGPRS and LDS, and also write the input
|
||||
* pattern into the write back buffer, which will be used by driver to
|
||||
* check whether all SIMDs have been covered.
|
||||
@@ -206,7 +206,7 @@ const struct soc15_reg_entry vgpr_init_regs_aldebaran[] = {
|
||||
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* The below shaders are used to clear SGPRS, and also write the input
|
||||
* pattern into the write back buffer. The first two dispatch should be
|
||||
* scheduled simultaneously which make sure that all SGPRS could be
|
||||
@@ -302,7 +302,7 @@ const struct soc15_reg_entry sgpr96_init_regs_aldebaran[] = {
|
||||
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* This shader is used to clear the uninitiated sgprs after the above
|
||||
* two dispatches, because of hardware feature, dispath 0 couldn't clear
|
||||
* top hole sgprs. Therefore need 4 waves per SIMD to cover these sgprs
|
||||
|
||||
@@ -75,9 +75,8 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
|
||||
max_physical_node_id = 7;
|
||||
break;
|
||||
case CHIP_ALDEBARAN:
|
||||
/* just using duplicates for Aldebaran support, revisit later */
|
||||
max_num_physical_nodes = 8;
|
||||
max_physical_node_id = 7;
|
||||
max_num_physical_nodes = 16;
|
||||
max_physical_node_id = 15;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
||||
@@ -93,6 +93,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
bool retry_fault = !!(entry->src_data[1] & 0x80);
|
||||
bool write_fault = !!(entry->src_data[1] & 0x20);
|
||||
struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
|
||||
struct amdgpu_task_info task_info;
|
||||
uint32_t status = 0;
|
||||
@@ -121,7 +122,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
|
||||
/* Try to handle the recoverable page faults by filling page
|
||||
* tables
|
||||
*/
|
||||
if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
|
||||
if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -810,6 +811,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
default:
|
||||
adev->gmc.gart_size = 512ULL << 20;
|
||||
break;
|
||||
@@ -879,6 +881,7 @@ static int gmc_v10_0_sw_init(void *handle)
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
adev->num_vmhubs = 2;
|
||||
/*
|
||||
* To fulfill 4-level page support,
|
||||
@@ -996,6 +999,7 @@ static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
||||
@@ -53,7 +53,9 @@
|
||||
#include "mmhub_v1_7.h"
|
||||
#include "umc_v6_1.h"
|
||||
#include "umc_v6_0.h"
|
||||
#include "umc_v6_7.h"
|
||||
#include "hdp_v4_0.h"
|
||||
#include "mca_v3_0.h"
|
||||
|
||||
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
|
||||
|
||||
@@ -505,6 +507,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
bool retry_fault = !!(entry->src_data[1] & 0x80);
|
||||
bool write_fault = !!(entry->src_data[1] & 0x20);
|
||||
uint32_t status = 0, cid = 0, rw = 0;
|
||||
struct amdgpu_task_info task_info;
|
||||
struct amdgpu_vmhub *hub;
|
||||
@@ -535,7 +538,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
|
||||
/* Try to handle the recoverable page faults by filling page
|
||||
* tables
|
||||
*/
|
||||
if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
|
||||
if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1168,6 +1171,18 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
|
||||
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
|
||||
adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
|
||||
break;
|
||||
case CHIP_ALDEBARAN:
|
||||
adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM;
|
||||
adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
|
||||
adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
|
||||
adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
|
||||
if (!adev->gmc.xgmi.connected_to_cpu)
|
||||
adev->umc.ras_funcs = &umc_v6_7_ras_funcs;
|
||||
if (1 & adev->smuio.funcs->get_die_id(adev))
|
||||
adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
|
||||
else
|
||||
adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -1216,6 +1231,18 @@ static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
|
||||
adev->hdp.ras_funcs = &hdp_v4_0_ras_funcs;
|
||||
}
|
||||
|
||||
static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_ALDEBARAN:
|
||||
if (!adev->gmc.xgmi.connected_to_cpu)
|
||||
adev->mca.funcs = &mca_v3_0_funcs;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int gmc_v9_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@@ -1237,6 +1264,7 @@ static int gmc_v9_0_early_init(void *handle)
|
||||
gmc_v9_0_set_mmhub_ras_funcs(adev);
|
||||
gmc_v9_0_set_gfxhub_funcs(adev);
|
||||
gmc_v9_0_set_hdp_ras_funcs(adev);
|
||||
gmc_v9_0_set_mca_funcs(adev);
|
||||
|
||||
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
|
||||
adev->gmc.shared_aperture_end =
|
||||
@@ -1448,6 +1476,8 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||
adev->gfxhub.funcs->init(adev);
|
||||
|
||||
adev->mmhub.funcs->init(adev);
|
||||
if (adev->mca.funcs)
|
||||
adev->mca.funcs->init(adev);
|
||||
|
||||
spin_lock_init(&adev->gmc.invalidate_lock);
|
||||
|
||||
|
||||
125
drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
Normal file
125
drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
Normal file
@@ -0,0 +1,125 @@
|
||||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "amdgpu_ras.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_mca.h"
|
||||
|
||||
#define smnMCMP0_STATUST0 0x03830408
|
||||
#define smnMCMP1_STATUST0 0x03b30408
|
||||
#define smnMCMPIO_STATUST0 0x0c930408
|
||||
|
||||
|
||||
static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status)
|
||||
{
|
||||
amdgpu_mca_query_ras_error_count(adev,
|
||||
smnMCMP0_STATUST0,
|
||||
ras_error_status);
|
||||
}
|
||||
|
||||
static int mca_v3_0_mp0_ras_late_init(struct amdgpu_device *adev)
|
||||
{
|
||||
return amdgpu_mca_ras_late_init(adev, &adev->mca.mp0);
|
||||
}
|
||||
|
||||
static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_mca_ras_fini(adev, &adev->mca.mp0);
|
||||
}
|
||||
|
||||
const struct amdgpu_mca_ras_funcs mca_v3_0_mp0_ras_funcs = {
|
||||
.ras_late_init = mca_v3_0_mp0_ras_late_init,
|
||||
.ras_fini = mca_v3_0_mp0_ras_fini,
|
||||
.query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
|
||||
.query_ras_error_address = NULL,
|
||||
.ras_block = AMDGPU_RAS_BLOCK__MP0,
|
||||
.sysfs_name = "mp0_err_count",
|
||||
};
|
||||
|
||||
static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status)
|
||||
{
|
||||
amdgpu_mca_query_ras_error_count(adev,
|
||||
smnMCMP1_STATUST0,
|
||||
ras_error_status);
|
||||
}
|
||||
|
||||
static int mca_v3_0_mp1_ras_late_init(struct amdgpu_device *adev)
|
||||
{
|
||||
return amdgpu_mca_ras_late_init(adev, &adev->mca.mp1);
|
||||
}
|
||||
|
||||
static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_mca_ras_fini(adev, &adev->mca.mp1);
|
||||
}
|
||||
|
||||
const struct amdgpu_mca_ras_funcs mca_v3_0_mp1_ras_funcs = {
|
||||
.ras_late_init = mca_v3_0_mp1_ras_late_init,
|
||||
.ras_fini = mca_v3_0_mp1_ras_fini,
|
||||
.query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
|
||||
.query_ras_error_address = NULL,
|
||||
.ras_block = AMDGPU_RAS_BLOCK__MP1,
|
||||
.sysfs_name = "mp1_err_count",
|
||||
};
|
||||
|
||||
static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status)
|
||||
{
|
||||
amdgpu_mca_query_ras_error_count(adev,
|
||||
smnMCMPIO_STATUST0,
|
||||
ras_error_status);
|
||||
}
|
||||
|
||||
static int mca_v3_0_mpio_ras_late_init(struct amdgpu_device *adev)
|
||||
{
|
||||
return amdgpu_mca_ras_late_init(adev, &adev->mca.mpio);
|
||||
}
|
||||
|
||||
static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_mca_ras_fini(adev, &adev->mca.mpio);
|
||||
}
|
||||
|
||||
const struct amdgpu_mca_ras_funcs mca_v3_0_mpio_ras_funcs = {
|
||||
.ras_late_init = mca_v3_0_mpio_ras_late_init,
|
||||
.ras_fini = mca_v3_0_mpio_ras_fini,
|
||||
.query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
|
||||
.query_ras_error_address = NULL,
|
||||
.ras_block = AMDGPU_RAS_BLOCK__MPIO,
|
||||
.sysfs_name = "mpio_err_count",
|
||||
};
|
||||
|
||||
|
||||
static void mca_v3_0_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_mca *mca = &adev->mca;
|
||||
|
||||
mca->mp0.ras_funcs = &mca_v3_0_mp0_ras_funcs;
|
||||
mca->mp1.ras_funcs = &mca_v3_0_mp1_ras_funcs;
|
||||
mca->mpio.ras_funcs = &mca_v3_0_mpio_ras_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_mca_funcs mca_v3_0_funcs = {
|
||||
.init = mca_v3_0_init,
|
||||
};
|
||||
26
drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
Normal file
26
drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
Normal file
@@ -0,0 +1,26 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef __MCA_V3_0_H__
|
||||
#define __MCA_V3_0_H__
|
||||
|
||||
extern const struct amdgpu_mca_funcs mca_v3_0_funcs;
|
||||
|
||||
#endif
|
||||
@@ -24,9 +24,7 @@
|
||||
#ifndef __MMSCH_V1_0_H__
|
||||
#define __MMSCH_V1_0_H__
|
||||
|
||||
#define MMSCH_VERSION_MAJOR 1
|
||||
#define MMSCH_VERSION_MINOR 0
|
||||
#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
|
||||
#define MMSCH_VERSION 0x1
|
||||
|
||||
enum mmsch_v1_0_command_type {
|
||||
MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
|
||||
|
||||
@@ -96,7 +96,11 @@ static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
|
||||
|
||||
static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
|
||||
{
|
||||
int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
|
||||
int r;
|
||||
uint64_t timeout, now;
|
||||
|
||||
now = (uint64_t)ktime_to_ms(ktime_get());
|
||||
timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
|
||||
|
||||
do {
|
||||
r = xgpu_nv_mailbox_rcv_msg(adev, event);
|
||||
@@ -104,8 +108,8 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
|
||||
return 0;
|
||||
|
||||
msleep(10);
|
||||
timeout -= 10;
|
||||
} while (timeout > 1);
|
||||
now = (uint64_t)ktime_to_ms(ktime_get());
|
||||
} while (timeout > now);
|
||||
|
||||
|
||||
return -ETIME;
|
||||
@@ -149,9 +153,10 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
|
||||
static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
|
||||
enum idh_request req)
|
||||
{
|
||||
int r;
|
||||
int r, retry = 1;
|
||||
enum idh_event event = -1;
|
||||
|
||||
send_request:
|
||||
xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
|
||||
|
||||
switch (req) {
|
||||
@@ -170,6 +175,9 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
|
||||
if (event != -1) {
|
||||
r = xgpu_nv_poll_msg(adev, event);
|
||||
if (r) {
|
||||
if (retry++ < 2)
|
||||
goto send_request;
|
||||
|
||||
if (req != IDH_REQ_GPU_INIT_DATA) {
|
||||
pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
|
||||
return r;
|
||||
@@ -279,6 +287,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
|
||||
amdgpu_virt_fini_data_exchange(adev);
|
||||
atomic_set(&adev->in_gpu_reset, 1);
|
||||
|
||||
xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
|
||||
|
||||
do {
|
||||
if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
|
||||
goto flr_done;
|
||||
|
||||
@@ -37,7 +37,8 @@ enum idh_request {
|
||||
IDH_REQ_GPU_RESET_ACCESS,
|
||||
IDH_REQ_GPU_INIT_DATA,
|
||||
|
||||
IDH_LOG_VF_ERROR = 200,
|
||||
IDH_LOG_VF_ERROR = 200,
|
||||
IDH_READY_TO_RESET = 201,
|
||||
};
|
||||
|
||||
enum idh_event {
|
||||
|
||||
@@ -508,6 +508,26 @@ static void nbio_v2_3_apply_l1_link_width_reconfig_wa(struct amdgpu_device *adev
|
||||
WREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL, reg_data);
|
||||
}
|
||||
|
||||
static void nbio_v2_3_clear_doorbell_interrupt(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t reg, reg_data;
|
||||
|
||||
if (adev->asic_type != CHIP_SIENNA_CICHLID)
|
||||
return;
|
||||
|
||||
reg = RREG32_SOC15(NBIO, 0, mmBIF_RB_CNTL);
|
||||
|
||||
/* Clear Interrupt Status
|
||||
*/
|
||||
if ((reg & BIF_RB_CNTL__RB_ENABLE_MASK) == 0) {
|
||||
reg = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
|
||||
if (reg & BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS_MASK) {
|
||||
reg_data = 1 << BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR__SHIFT;
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, reg_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
|
||||
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
|
||||
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
|
||||
@@ -531,4 +551,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
|
||||
.program_aspm = nbio_v2_3_program_aspm,
|
||||
.apply_lc_spc_mode_wa = nbio_v2_3_apply_lc_spc_mode_wa,
|
||||
.apply_l1_link_width_reconfig_wa = nbio_v2_3_apply_l1_link_width_reconfig_wa,
|
||||
.clear_doorbell_interrupt = nbio_v2_3_clear_doorbell_interrupt,
|
||||
};
|
||||
|
||||
@@ -85,6 +85,11 @@
|
||||
#define mmRCC_DEV0_EPF0_STRAP0_ALDE 0x0015
|
||||
#define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX 2
|
||||
|
||||
#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x3878
|
||||
#define mmBIF_DOORBELL_INT_CNTL_ALDE_BASE_IDX 2
|
||||
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
|
||||
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
|
||||
|
||||
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status);
|
||||
|
||||
@@ -346,14 +351,21 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
|
||||
struct ras_err_data err_data = {0, 0, 0, NULL};
|
||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||
|
||||
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
|
||||
else
|
||||
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
|
||||
|
||||
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
|
||||
BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
|
||||
/* driver has to clear the interrupt status when bif ring is disabled */
|
||||
bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
|
||||
BIF_DOORBELL_INT_CNTL,
|
||||
RAS_CNTLR_INTERRUPT_CLEAR, 1);
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
|
||||
else
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
|
||||
|
||||
if (!ras->disable_ras_err_cnt_harvest) {
|
||||
/*
|
||||
@@ -372,13 +384,13 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
|
||||
"errors detected in %s block, "
|
||||
"no user action is needed.\n",
|
||||
obj->err_data.ce_count,
|
||||
adev->nbio.ras_if->name);
|
||||
ras_block_str(adev->nbio.ras_if->block));
|
||||
|
||||
if (err_data.ue_count)
|
||||
dev_info(adev->dev, "%ld uncorrectable hardware "
|
||||
"errors detected in %s block\n",
|
||||
obj->err_data.ue_count,
|
||||
adev->nbio.ras_if->name);
|
||||
ras_block_str(adev->nbio.ras_if->block));
|
||||
}
|
||||
|
||||
dev_info(adev->dev, "RAS controller interrupt triggered "
|
||||
@@ -395,14 +407,22 @@ static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_d
|
||||
{
|
||||
uint32_t bif_doorbell_intr_cntl;
|
||||
|
||||
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
|
||||
else
|
||||
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
|
||||
|
||||
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
|
||||
BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
|
||||
/* driver has to clear the interrupt status when bif ring is disabled */
|
||||
bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
|
||||
BIF_DOORBELL_INT_CNTL,
|
||||
RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
|
||||
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
|
||||
else
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
|
||||
|
||||
amdgpu_ras_global_ras_isr(adev);
|
||||
}
|
||||
@@ -572,7 +592,11 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
|
||||
static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL_ALDE,
|
||||
DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
|
||||
else
|
||||
WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
|
||||
DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
|
||||
}
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
#include "jpeg_v2_0.h"
|
||||
#include "vcn_v3_0.h"
|
||||
#include "jpeg_v3_0.h"
|
||||
#include "dce_virtual.h"
|
||||
#include "amdgpu_vkms.h"
|
||||
#include "mes_v10_1.h"
|
||||
#include "mxgpu_nv.h"
|
||||
#include "smuio_v11_0.h"
|
||||
@@ -666,6 +666,9 @@ static int nv_reg_base_init(struct amdgpu_device *adev)
|
||||
case CHIP_YELLOW_CARP:
|
||||
yellow_carp_reg_base_init(adev);
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
cyan_skillfish_reg_base_init(adev);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -682,7 +685,10 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
if (adev->asic_type == CHIP_CYAN_SKILLFISH) {
|
||||
adev->nbio.funcs = &nbio_v2_3_funcs;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
|
||||
} else if (adev->flags & AMD_IS_APU) {
|
||||
adev->nbio.funcs = &nbio_v7_2_funcs;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
|
||||
} else {
|
||||
@@ -715,7 +721,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
!amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
@@ -743,7 +749,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
@@ -773,7 +779,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
is_support_sw_smu(adev))
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
@@ -796,7 +802,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
is_support_sw_smu(adev))
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
@@ -817,7 +823,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
@@ -837,7 +843,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
is_support_sw_smu(adev))
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
@@ -859,7 +865,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
@@ -877,11 +883,11 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
@@ -889,6 +895,20 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
|
||||
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
|
||||
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
|
||||
amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
}
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -1241,6 +1261,11 @@ static int nv_common_early_init(void *handle)
|
||||
else
|
||||
adev->external_rev_id = adev->rev_id + 0x01;
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x82;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
|
||||
@@ -38,5 +38,6 @@ void vangogh_reg_base_init(struct amdgpu_device *adev);
|
||||
int dimgrey_cavefish_reg_base_init(struct amdgpu_device *adev);
|
||||
int beige_goby_reg_base_init(struct amdgpu_device *adev);
|
||||
int yellow_carp_reg_base_init(struct amdgpu_device *adev);
|
||||
int cyan_skillfish_reg_base_init(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -84,29 +84,29 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
|
||||
|
||||
ta_hdr = (const struct ta_firmware_header_v1_0 *)
|
||||
adev->psp.ta_fw->data;
|
||||
adev->psp.ta_hdcp_ucode_version =
|
||||
le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
|
||||
adev->psp.ta_hdcp_ucode_size =
|
||||
le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
|
||||
adev->psp.ta_hdcp_start_addr =
|
||||
adev->psp.hdcp.feature_version =
|
||||
le32_to_cpu(ta_hdr->hdcp.fw_version);
|
||||
adev->psp.hdcp.size_bytes =
|
||||
le32_to_cpu(ta_hdr->hdcp.size_bytes);
|
||||
adev->psp.hdcp.start_addr =
|
||||
(uint8_t *)ta_hdr +
|
||||
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
adev->psp.ta_dtm_ucode_version =
|
||||
le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
|
||||
adev->psp.ta_dtm_ucode_size =
|
||||
le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
|
||||
adev->psp.ta_dtm_start_addr =
|
||||
(uint8_t *)adev->psp.ta_hdcp_start_addr +
|
||||
le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
|
||||
adev->psp.dtm.feature_version =
|
||||
le32_to_cpu(ta_hdr->dtm.fw_version);
|
||||
adev->psp.dtm.size_bytes =
|
||||
le32_to_cpu(ta_hdr->dtm.size_bytes);
|
||||
adev->psp.dtm.start_addr =
|
||||
(uint8_t *)adev->psp.hdcp.start_addr +
|
||||
le32_to_cpu(ta_hdr->dtm.offset_bytes);
|
||||
|
||||
adev->psp.ta_securedisplay_ucode_version =
|
||||
le32_to_cpu(ta_hdr->ta_securedisplay_ucode_version);
|
||||
adev->psp.ta_securedisplay_ucode_size =
|
||||
le32_to_cpu(ta_hdr->ta_securedisplay_size_bytes);
|
||||
adev->psp.ta_securedisplay_start_addr =
|
||||
(uint8_t *)adev->psp.ta_hdcp_start_addr +
|
||||
le32_to_cpu(ta_hdr->ta_securedisplay_offset_bytes);
|
||||
adev->psp.securedisplay.feature_version =
|
||||
le32_to_cpu(ta_hdr->securedisplay.fw_version);
|
||||
adev->psp.securedisplay.size_bytes =
|
||||
le32_to_cpu(ta_hdr->securedisplay.size_bytes);
|
||||
adev->psp.securedisplay.start_addr =
|
||||
(uint8_t *)adev->psp.hdcp.start_addr +
|
||||
le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
|
||||
|
||||
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
|
||||
}
|
||||
|
||||
@@ -80,6 +80,9 @@ MODULE_FIRMWARE("amdgpu/beige_goby_ta.bin");
|
||||
/* For large FW files the time to complete can be very long */
|
||||
#define USBC_PD_POLLING_LIMIT_S 240
|
||||
|
||||
/* Read USB-PD from LFB */
|
||||
#define GFX_CMD_USB_PD_USE_LFB 0x480
|
||||
|
||||
static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
@@ -148,15 +151,15 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
goto out2;
|
||||
|
||||
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
|
||||
adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
|
||||
adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
|
||||
adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
|
||||
adev->psp.xgmi.feature_version = le32_to_cpu(ta_hdr->xgmi.fw_version);
|
||||
adev->psp.xgmi.size_bytes = le32_to_cpu(ta_hdr->xgmi.size_bytes);
|
||||
adev->psp.xgmi.start_addr = (uint8_t *)ta_hdr +
|
||||
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
|
||||
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
|
||||
adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version);
|
||||
adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes);
|
||||
adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr +
|
||||
le32_to_cpu(ta_hdr->ta_ras_offset_bytes);
|
||||
adev->psp.ras.feature_version = le32_to_cpu(ta_hdr->ras.fw_version);
|
||||
adev->psp.ras.size_bytes = le32_to_cpu(ta_hdr->ras.size_bytes);
|
||||
adev->psp.ras.start_addr = (uint8_t *)adev->psp.xgmi.start_addr +
|
||||
le32_to_cpu(ta_hdr->ras.offset_bytes);
|
||||
}
|
||||
break;
|
||||
case CHIP_NAVI10:
|
||||
@@ -183,17 +186,17 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
goto out2;
|
||||
|
||||
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
|
||||
adev->psp.ta_hdcp_ucode_version = le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
|
||||
adev->psp.ta_hdcp_ucode_size = le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
|
||||
adev->psp.ta_hdcp_start_addr = (uint8_t *)ta_hdr +
|
||||
adev->psp.hdcp.feature_version = le32_to_cpu(ta_hdr->hdcp.fw_version);
|
||||
adev->psp.hdcp.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes);
|
||||
adev->psp.hdcp.start_addr = (uint8_t *)ta_hdr +
|
||||
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
|
||||
|
||||
adev->psp.ta_dtm_ucode_version = le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
|
||||
adev->psp.ta_dtm_ucode_size = le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
|
||||
adev->psp.ta_dtm_start_addr = (uint8_t *)adev->psp.ta_hdcp_start_addr +
|
||||
le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
|
||||
adev->psp.dtm.feature_version = le32_to_cpu(ta_hdr->dtm.fw_version);
|
||||
adev->psp.dtm.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes);
|
||||
adev->psp.dtm.start_addr = (uint8_t *)adev->psp.hdcp.start_addr +
|
||||
le32_to_cpu(ta_hdr->dtm.offset_bytes);
|
||||
}
|
||||
break;
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
@@ -284,7 +287,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
|
||||
return ret;
|
||||
|
||||
/* Copy PSP KDB binary to memory */
|
||||
psp_copy_fw(psp, psp->kdb_start_addr, psp->kdb_bin_size);
|
||||
psp_copy_fw(psp, psp->kdb.start_addr, psp->kdb.size_bytes);
|
||||
|
||||
/* Provide the PSP KDB to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
@@ -315,7 +318,7 @@ static int psp_v11_0_bootloader_load_spl(struct psp_context *psp)
|
||||
return ret;
|
||||
|
||||
/* Copy PSP SPL binary to memory */
|
||||
psp_copy_fw(psp, psp->spl_start_addr, psp->spl_bin_size);
|
||||
psp_copy_fw(psp, psp->spl.start_addr, psp->spl.size_bytes);
|
||||
|
||||
/* Provide the PSP SPL to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
@@ -346,7 +349,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
|
||||
return ret;
|
||||
|
||||
/* Copy PSP System Driver binary to memory */
|
||||
psp_copy_fw(psp, psp->sys_start_addr, psp->sys_bin_size);
|
||||
psp_copy_fw(psp, psp->sys.start_addr, psp->sys.size_bytes);
|
||||
|
||||
/* Provide the sys driver to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
@@ -380,7 +383,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
|
||||
return ret;
|
||||
|
||||
/* Copy Secure OS binary to PSP memory */
|
||||
psp_copy_fw(psp, psp->sos_start_addr, psp->sos_bin_size);
|
||||
psp_copy_fw(psp, psp->sos.start_addr, psp->sos.size_bytes);
|
||||
|
||||
/* Provide the PSP secure OS to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
@@ -753,44 +756,26 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
|
||||
}
|
||||
|
||||
static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, dma_addr_t dma_addr)
|
||||
static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc_addr)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t reg_status;
|
||||
int ret, i = 0;
|
||||
|
||||
/* Write lower 32-bit address of the PD Controller FW */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, lower_32_bits(dma_addr));
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Fireup interrupt so PSP can pick up the lower address */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x800000);
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
|
||||
|
||||
if ((reg_status & 0xFFFF) != 0) {
|
||||
DRM_ERROR("Lower address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %02x...\n",
|
||||
reg_status & 0xFFFF);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Write upper 32-bit address of the PD Controller FW */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, upper_32_bits(dma_addr));
|
||||
/*
|
||||
* LFB address which is aligned to 1MB address and has to be
|
||||
* right-shifted by 20 so that LFB address can be passed on a 32-bit C2P
|
||||
* register
|
||||
*/
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
|
||||
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Fireup interrupt so PSP can pick up the upper address */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x4000000);
|
||||
/* Fireup interrupt so PSP can pick up the address */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, (GFX_CMD_USB_PD_USE_LFB << 16));
|
||||
|
||||
/* FW load takes very long time */
|
||||
do {
|
||||
@@ -806,7 +791,7 @@ static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, dma_addr_t dma_add
|
||||
done:
|
||||
|
||||
if ((reg_status & 0xFFFF) != 0) {
|
||||
DRM_ERROR("Upper address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = x%04x\n",
|
||||
DRM_ERROR("Address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = 0x%04x\n",
|
||||
reg_status & 0xFFFF);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user