mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-28 06:44:36 -05:00
Merge tag 'drm-xe-next-2025-11-14' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next
Driver Changes: Avoid TOCTOU when montoring throttle reasons (Lucas) Add/extend workaround (Nitin) SRIOV migration work / plumbing (Michal Wajdeczko, Michal Winiarski, Lukasz) Drop debug flag requirement for VF resource fixup Fix MTL vm_max_level (Rodrigo) Changes around TILE_ADDR_RANGE for platform compatibility (Fei, Lucas) Add runtime registers for GFX ver >= 35 (Piotr) Kerneldoc fix (Kriish) Rework pcode error mapping (Lucas) Allow lockdown the PF (Michal) Eliminate GUC code caching of some frequency values (Sk) Improvements around forcewake referencing (Matt Roper) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com> Link: https://patch.msgid.link/aRcJOrisG2qPbucE@fedora
This commit is contained in:
@@ -174,9 +174,11 @@ xe-$(CONFIG_PCI_IOV) += \
|
||||
xe_lmtt_2l.o \
|
||||
xe_lmtt_ml.o \
|
||||
xe_pci_sriov.o \
|
||||
xe_sriov_packet.o \
|
||||
xe_sriov_pf.o \
|
||||
xe_sriov_pf_control.o \
|
||||
xe_sriov_pf_debugfs.o \
|
||||
xe_sriov_pf_migration.o \
|
||||
xe_sriov_pf_provision.o \
|
||||
xe_sriov_pf_service.o \
|
||||
xe_sriov_pf_sysfs.o \
|
||||
|
||||
@@ -101,7 +101,6 @@
|
||||
|
||||
#define XE2_LMEM_CFG XE_REG(0x48b0)
|
||||
|
||||
#define XEHP_TILE_ADDR_RANGE(_idx) XE_REG_MCR(0x4900 + (_idx) * 4)
|
||||
#define XEHP_FLAT_CCS_BASE_ADDR XE_REG_MCR(0x4910)
|
||||
#define XEHP_FLAT_CCS_PTR REG_GENMASK(31, 8)
|
||||
|
||||
|
||||
@@ -40,6 +40,8 @@
|
||||
#define STOLEN_RESERVED XE_REG(0x1082c0)
|
||||
#define WOPCM_SIZE_MASK REG_GENMASK64(9, 7)
|
||||
|
||||
#define SG_TILE_ADDR_RANGE(_idx) XE_REG(0x1083a0 + (_idx) * 4)
|
||||
|
||||
#define MTL_RP_STATE_CAP XE_REG(0x138000)
|
||||
|
||||
#define MTL_GT_RPA_FREQUENCY XE_REG(0x138008)
|
||||
|
||||
208
drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c
Normal file
208
drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c
Normal file
@@ -0,0 +1,208 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 AND MIT
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <kunit/static_stub.h>
|
||||
#include <kunit/test.h>
|
||||
#include <kunit/test-bug.h>
|
||||
|
||||
#include "xe_kunit_helpers.h"
|
||||
#include "xe_pci_test.h"
|
||||
|
||||
#define TEST_MAX_VFS 63
|
||||
|
||||
static void pf_set_admin_mode(struct xe_device *xe, bool enable)
|
||||
{
|
||||
/* should match logic of xe_sriov_pf_admin_only() */
|
||||
xe->info.probe_display = !enable;
|
||||
KUNIT_EXPECT_EQ(kunit_get_current_test(), enable, xe_sriov_pf_admin_only(xe));
|
||||
}
|
||||
|
||||
static const void *num_vfs_gen_param(struct kunit *test, const void *prev, char *desc)
|
||||
{
|
||||
unsigned long next = 1 + (unsigned long)prev;
|
||||
|
||||
if (next > TEST_MAX_VFS)
|
||||
return NULL;
|
||||
snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%lu VF%s",
|
||||
next, str_plural(next));
|
||||
return (void *)next;
|
||||
}
|
||||
|
||||
static int pf_gt_config_test_init(struct kunit *test)
|
||||
{
|
||||
struct xe_pci_fake_data fake = {
|
||||
.sriov_mode = XE_SRIOV_MODE_PF,
|
||||
.platform = XE_TIGERLAKE, /* any random platform with SR-IOV */
|
||||
.subplatform = XE_SUBPLATFORM_NONE,
|
||||
};
|
||||
struct xe_device *xe;
|
||||
struct xe_gt *gt;
|
||||
|
||||
test->priv = &fake;
|
||||
xe_kunit_helper_xe_device_test_init(test);
|
||||
|
||||
xe = test->priv;
|
||||
KUNIT_ASSERT_TRUE(test, IS_SRIOV_PF(xe));
|
||||
|
||||
gt = xe_root_mmio_gt(xe);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gt);
|
||||
test->priv = gt;
|
||||
|
||||
/* pretend it can support up to 63 VFs */
|
||||
xe->sriov.pf.device_total_vfs = TEST_MAX_VFS;
|
||||
xe->sriov.pf.driver_max_vfs = TEST_MAX_VFS;
|
||||
KUNIT_ASSERT_EQ(test, xe_sriov_pf_get_totalvfs(xe), 63);
|
||||
|
||||
pf_set_admin_mode(xe, false);
|
||||
KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
|
||||
|
||||
/* more sanity checks */
|
||||
KUNIT_EXPECT_EQ(test, GUC_ID_MAX + 1, SZ_64K);
|
||||
KUNIT_EXPECT_EQ(test, GUC_NUM_DOORBELLS, SZ_256);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fair_contexts_1vf(struct kunit *test)
|
||||
{
|
||||
struct xe_gt *gt = test->priv;
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
|
||||
pf_set_admin_mode(xe, false);
|
||||
KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
|
||||
KUNIT_EXPECT_EQ(test, SZ_32K, pf_profile_fair_ctxs(gt, 1));
|
||||
|
||||
pf_set_admin_mode(xe, true);
|
||||
KUNIT_ASSERT_TRUE(test, xe_sriov_pf_admin_only(xe));
|
||||
KUNIT_EXPECT_EQ(test, SZ_64K - SZ_1K, pf_profile_fair_ctxs(gt, 1));
|
||||
}
|
||||
|
||||
static void fair_contexts(struct kunit *test)
|
||||
{
|
||||
unsigned int num_vfs = (unsigned long)test->param_value;
|
||||
struct xe_gt *gt = test->priv;
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
|
||||
pf_set_admin_mode(xe, false);
|
||||
KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
|
||||
|
||||
KUNIT_EXPECT_TRUE(test, is_power_of_2(pf_profile_fair_ctxs(gt, num_vfs)));
|
||||
KUNIT_EXPECT_GT(test, GUC_ID_MAX, num_vfs * pf_profile_fair_ctxs(gt, num_vfs));
|
||||
|
||||
if (num_vfs > 31)
|
||||
KUNIT_ASSERT_EQ(test, SZ_1K, pf_profile_fair_ctxs(gt, num_vfs));
|
||||
else if (num_vfs > 15)
|
||||
KUNIT_ASSERT_EQ(test, SZ_2K, pf_profile_fair_ctxs(gt, num_vfs));
|
||||
else if (num_vfs > 7)
|
||||
KUNIT_ASSERT_EQ(test, SZ_4K, pf_profile_fair_ctxs(gt, num_vfs));
|
||||
else if (num_vfs > 3)
|
||||
KUNIT_ASSERT_EQ(test, SZ_8K, pf_profile_fair_ctxs(gt, num_vfs));
|
||||
else if (num_vfs > 1)
|
||||
KUNIT_ASSERT_EQ(test, SZ_16K, pf_profile_fair_ctxs(gt, num_vfs));
|
||||
else
|
||||
KUNIT_ASSERT_EQ(test, SZ_32K, pf_profile_fair_ctxs(gt, num_vfs));
|
||||
}
|
||||
|
||||
static void fair_doorbells_1vf(struct kunit *test)
|
||||
{
|
||||
struct xe_gt *gt = test->priv;
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
|
||||
pf_set_admin_mode(xe, false);
|
||||
KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
|
||||
KUNIT_EXPECT_EQ(test, 128, pf_profile_fair_dbs(gt, 1));
|
||||
|
||||
pf_set_admin_mode(xe, true);
|
||||
KUNIT_ASSERT_TRUE(test, xe_sriov_pf_admin_only(xe));
|
||||
KUNIT_EXPECT_EQ(test, 240, pf_profile_fair_dbs(gt, 1));
|
||||
}
|
||||
|
||||
static void fair_doorbells(struct kunit *test)
|
||||
{
|
||||
unsigned int num_vfs = (unsigned long)test->param_value;
|
||||
struct xe_gt *gt = test->priv;
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
|
||||
pf_set_admin_mode(xe, false);
|
||||
KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
|
||||
|
||||
KUNIT_EXPECT_TRUE(test, is_power_of_2(pf_profile_fair_dbs(gt, num_vfs)));
|
||||
KUNIT_EXPECT_GE(test, GUC_NUM_DOORBELLS, (num_vfs + 1) * pf_profile_fair_dbs(gt, num_vfs));
|
||||
|
||||
if (num_vfs > 31)
|
||||
KUNIT_ASSERT_EQ(test, SZ_4, pf_profile_fair_dbs(gt, num_vfs));
|
||||
else if (num_vfs > 15)
|
||||
KUNIT_ASSERT_EQ(test, SZ_8, pf_profile_fair_dbs(gt, num_vfs));
|
||||
else if (num_vfs > 7)
|
||||
KUNIT_ASSERT_EQ(test, SZ_16, pf_profile_fair_dbs(gt, num_vfs));
|
||||
else if (num_vfs > 3)
|
||||
KUNIT_ASSERT_EQ(test, SZ_32, pf_profile_fair_dbs(gt, num_vfs));
|
||||
else if (num_vfs > 1)
|
||||
KUNIT_ASSERT_EQ(test, SZ_64, pf_profile_fair_dbs(gt, num_vfs));
|
||||
else
|
||||
KUNIT_ASSERT_EQ(test, SZ_128, pf_profile_fair_dbs(gt, num_vfs));
|
||||
}
|
||||
|
||||
static void fair_ggtt_1vf(struct kunit *test)
|
||||
{
|
||||
struct xe_gt *gt = test->priv;
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
|
||||
pf_set_admin_mode(xe, false);
|
||||
KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
|
||||
KUNIT_EXPECT_EQ(test, SZ_2G, pf_profile_fair_ggtt(gt, 1));
|
||||
|
||||
pf_set_admin_mode(xe, true);
|
||||
KUNIT_ASSERT_TRUE(test, xe_sriov_pf_admin_only(xe));
|
||||
KUNIT_EXPECT_EQ(test, SZ_2G + SZ_1G + SZ_512M, pf_profile_fair_ggtt(gt, 1));
|
||||
}
|
||||
|
||||
static void fair_ggtt(struct kunit *test)
|
||||
{
|
||||
unsigned int num_vfs = (unsigned long)test->param_value;
|
||||
struct xe_gt *gt = test->priv;
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
u64 alignment = pf_get_ggtt_alignment(gt);
|
||||
u64 shareable = SZ_2G + SZ_1G + SZ_512M;
|
||||
|
||||
pf_set_admin_mode(xe, false);
|
||||
KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
|
||||
|
||||
KUNIT_EXPECT_TRUE(test, IS_ALIGNED(pf_profile_fair_ggtt(gt, num_vfs), alignment));
|
||||
KUNIT_EXPECT_GE(test, shareable, num_vfs * pf_profile_fair_ggtt(gt, num_vfs));
|
||||
|
||||
if (num_vfs > 56)
|
||||
KUNIT_ASSERT_EQ(test, SZ_64M - SZ_8M, pf_profile_fair_ggtt(gt, num_vfs));
|
||||
else if (num_vfs > 28)
|
||||
KUNIT_ASSERT_EQ(test, SZ_64M, pf_profile_fair_ggtt(gt, num_vfs));
|
||||
else if (num_vfs > 14)
|
||||
KUNIT_ASSERT_EQ(test, SZ_128M, pf_profile_fair_ggtt(gt, num_vfs));
|
||||
else if (num_vfs > 7)
|
||||
KUNIT_ASSERT_EQ(test, SZ_256M, pf_profile_fair_ggtt(gt, num_vfs));
|
||||
else if (num_vfs > 3)
|
||||
KUNIT_ASSERT_EQ(test, SZ_512M, pf_profile_fair_ggtt(gt, num_vfs));
|
||||
else if (num_vfs > 1)
|
||||
KUNIT_ASSERT_EQ(test, SZ_1G, pf_profile_fair_ggtt(gt, num_vfs));
|
||||
else
|
||||
KUNIT_ASSERT_EQ(test, SZ_2G, pf_profile_fair_ggtt(gt, num_vfs));
|
||||
}
|
||||
|
||||
static struct kunit_case pf_gt_config_test_cases[] = {
|
||||
KUNIT_CASE(fair_contexts_1vf),
|
||||
KUNIT_CASE(fair_doorbells_1vf),
|
||||
KUNIT_CASE(fair_ggtt_1vf),
|
||||
KUNIT_CASE_PARAM(fair_contexts, num_vfs_gen_param),
|
||||
KUNIT_CASE_PARAM(fair_doorbells, num_vfs_gen_param),
|
||||
KUNIT_CASE_PARAM(fair_ggtt, num_vfs_gen_param),
|
||||
{}
|
||||
};
|
||||
|
||||
static struct kunit_suite pf_gt_config_suite = {
|
||||
.name = "pf_gt_config",
|
||||
.test_cases = pf_gt_config_test_cases,
|
||||
.init = pf_gt_config_test_init,
|
||||
};
|
||||
|
||||
kunit_test_suite(pf_gt_config_suite);
|
||||
@@ -49,6 +49,7 @@ struct xe_eu_stall_data_stream {
|
||||
wait_queue_head_t poll_wq;
|
||||
size_t data_record_size;
|
||||
size_t per_xecore_buf_size;
|
||||
unsigned int fw_ref;
|
||||
|
||||
struct xe_gt *gt;
|
||||
struct xe_bo *bo;
|
||||
@@ -660,13 +661,12 @@ static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
|
||||
struct per_xecore_buf *xecore_buf;
|
||||
struct xe_gt *gt = stream->gt;
|
||||
u16 group, instance;
|
||||
unsigned int fw_ref;
|
||||
int xecore;
|
||||
|
||||
/* Take runtime pm ref and forcewake to disable RC6 */
|
||||
xe_pm_runtime_get(gt_to_xe(gt));
|
||||
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_RENDER);
|
||||
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_RENDER)) {
|
||||
stream->fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_RENDER);
|
||||
if (!xe_force_wake_ref_has_domain(stream->fw_ref, XE_FW_RENDER)) {
|
||||
xe_gt_err(gt, "Failed to get RENDER forcewake\n");
|
||||
xe_pm_runtime_put(gt_to_xe(gt));
|
||||
return -ETIMEDOUT;
|
||||
@@ -832,7 +832,7 @@ static int xe_eu_stall_disable_locked(struct xe_eu_stall_data_stream *stream)
|
||||
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
|
||||
_MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
|
||||
|
||||
xe_force_wake_put(gt_to_fw(gt), XE_FW_RENDER);
|
||||
xe_force_wake_put(gt_to_fw(gt), stream->fw_ref);
|
||||
xe_pm_runtime_put(gt_to_xe(gt));
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -52,7 +52,22 @@ enum xe_force_wake_domains {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xe_force_wake_domain - Xe force wake domains
|
||||
* struct xe_force_wake_domain - Xe force wake power domain
|
||||
*
|
||||
* Represents an individual device-internal power domain. The driver must
|
||||
* ensure the power domain is awake before accessing registers or other
|
||||
* hardware functionality that is part of the power domain. Since different
|
||||
* driver threads may access hardware units simultaneously, a reference count
|
||||
* is used to ensure that the domain remains awake as long as any software
|
||||
* is using the part of the hardware covered by the power domain.
|
||||
*
|
||||
* Hardware provides a register interface to allow the driver to request
|
||||
* wake/sleep of power domains, although in most cases the actual action of
|
||||
* powering the hardware up/down is handled by firmware (and may be subject to
|
||||
* requirements and constraints outside of the driver's visibility) so the
|
||||
* driver needs to wait for an acknowledgment that a wake request has been
|
||||
* acted upon before accessing the parts of the hardware that reside within the
|
||||
* power domain.
|
||||
*/
|
||||
struct xe_force_wake_domain {
|
||||
/** @id: domain force wake id */
|
||||
@@ -70,7 +85,14 @@ struct xe_force_wake_domain {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xe_force_wake - Xe force wake
|
||||
* struct xe_force_wake - Xe force wake collection
|
||||
*
|
||||
* Represents a collection of related power domains (struct
|
||||
* xe_force_wake_domain) associated with a subunit of the device.
|
||||
*
|
||||
* Currently only used for GT power domains (where the term "forcewake" is used
|
||||
* in the hardware documentation), although the interface could be extended to
|
||||
* power wells in other parts of the hardware in the future.
|
||||
*/
|
||||
struct xe_force_wake {
|
||||
/** @gt: back pointers to GT */
|
||||
|
||||
@@ -151,6 +151,14 @@ static void xe_ggtt_set_pte_and_flush(struct xe_ggtt *ggtt, u64 addr, u64 pte)
|
||||
ggtt_update_access_counter(ggtt);
|
||||
}
|
||||
|
||||
static u64 xe_ggtt_get_pte(struct xe_ggtt *ggtt, u64 addr)
|
||||
{
|
||||
xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
|
||||
xe_tile_assert(ggtt->tile, addr < ggtt->size);
|
||||
|
||||
return readq(&ggtt->gsm[addr >> XE_PTE_SHIFT]);
|
||||
}
|
||||
|
||||
static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
|
||||
{
|
||||
u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
|
||||
@@ -233,16 +241,19 @@ void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
|
||||
static const struct xe_ggtt_pt_ops xelp_pt_ops = {
|
||||
.pte_encode_flags = xelp_ggtt_pte_flags,
|
||||
.ggtt_set_pte = xe_ggtt_set_pte,
|
||||
.ggtt_get_pte = xe_ggtt_get_pte,
|
||||
};
|
||||
|
||||
static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
|
||||
.pte_encode_flags = xelpg_ggtt_pte_flags,
|
||||
.ggtt_set_pte = xe_ggtt_set_pte,
|
||||
.ggtt_get_pte = xe_ggtt_get_pte,
|
||||
};
|
||||
|
||||
static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
|
||||
.pte_encode_flags = xelpg_ggtt_pte_flags,
|
||||
.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
|
||||
.ggtt_get_pte = xe_ggtt_get_pte,
|
||||
};
|
||||
|
||||
static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved)
|
||||
@@ -697,6 +708,20 @@ bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
|
||||
return drm_mm_node_allocated(&node->base);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_ggtt_node_pt_size() - Get the size of page table entries needed to map a GGTT node.
|
||||
* @node: the &xe_ggtt_node
|
||||
*
|
||||
* Return: GGTT node page table entries size in bytes.
|
||||
*/
|
||||
size_t xe_ggtt_node_pt_size(const struct xe_ggtt_node *node)
|
||||
{
|
||||
if (!node)
|
||||
return 0;
|
||||
|
||||
return node->base.size / XE_PAGE_SIZE * sizeof(u64);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_ggtt_map_bo - Map the BO into GGTT
|
||||
* @ggtt: the &xe_ggtt where node will be mapped
|
||||
@@ -930,6 +955,85 @@ void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid)
|
||||
xe_ggtt_assign_locked(node->ggtt, &node->base, vfid);
|
||||
mutex_unlock(&node->ggtt->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_ggtt_node_save() - Save a &xe_ggtt_node to a buffer.
|
||||
* @node: the &xe_ggtt_node to be saved
|
||||
* @dst: destination buffer
|
||||
* @size: destination buffer size in bytes
|
||||
* @vfid: VF identifier
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size, u16 vfid)
|
||||
{
|
||||
struct xe_ggtt *ggtt;
|
||||
u64 start, end;
|
||||
u64 *buf = dst;
|
||||
u64 pte;
|
||||
|
||||
if (!node)
|
||||
return -ENOENT;
|
||||
|
||||
guard(mutex)(&node->ggtt->lock);
|
||||
|
||||
if (xe_ggtt_node_pt_size(node) != size)
|
||||
return -EINVAL;
|
||||
|
||||
ggtt = node->ggtt;
|
||||
start = node->base.start;
|
||||
end = start + node->base.size - 1;
|
||||
|
||||
while (start < end) {
|
||||
pte = ggtt->pt_ops->ggtt_get_pte(ggtt, start);
|
||||
if (vfid != u64_get_bits(pte, GGTT_PTE_VFID))
|
||||
return -EPERM;
|
||||
|
||||
*buf++ = u64_replace_bits(pte, 0, GGTT_PTE_VFID);
|
||||
start += XE_PAGE_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_ggtt_node_load() - Load a &xe_ggtt_node from a buffer.
|
||||
* @node: the &xe_ggtt_node to be loaded
|
||||
* @src: source buffer
|
||||
* @size: source buffer size in bytes
|
||||
* @vfid: VF identifier
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t size, u16 vfid)
|
||||
{
|
||||
u64 vfid_pte = xe_encode_vfid_pte(vfid);
|
||||
const u64 *buf = src;
|
||||
struct xe_ggtt *ggtt;
|
||||
u64 start, end;
|
||||
|
||||
if (!node)
|
||||
return -ENOENT;
|
||||
|
||||
guard(mutex)(&node->ggtt->lock);
|
||||
|
||||
if (xe_ggtt_node_pt_size(node) != size)
|
||||
return -EINVAL;
|
||||
|
||||
ggtt = node->ggtt;
|
||||
start = node->base.start;
|
||||
end = start + node->base.size - 1;
|
||||
|
||||
while (start < end) {
|
||||
vfid_pte = u64_replace_bits(*buf++, vfid, GGTT_PTE_VFID);
|
||||
ggtt->pt_ops->ggtt_set_pte(ggtt, start, vfid_pte);
|
||||
start += XE_PAGE_SIZE;
|
||||
}
|
||||
xe_ggtt_invalidate(ggtt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
||||
@@ -29,6 +29,7 @@ int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
|
||||
u32 size, u32 align, u32 mm_flags);
|
||||
void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate);
|
||||
bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node);
|
||||
size_t xe_ggtt_node_pt_size(const struct xe_ggtt_node *node);
|
||||
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
|
||||
struct xe_bo *bo, u16 pat_index);
|
||||
void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo);
|
||||
@@ -43,6 +44,8 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid);
|
||||
int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size, u16 vfid);
|
||||
int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t size, u16 vfid);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_LOCKDEP
|
||||
|
||||
@@ -78,6 +78,8 @@ struct xe_ggtt_pt_ops {
|
||||
u64 (*pte_encode_flags)(struct xe_bo *bo, u16 pat_index);
|
||||
/** @ggtt_set_pte: Directly write into GGTT's PTE */
|
||||
void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte);
|
||||
/** @ggtt_get_pte: Directly read from GGTT's PTE */
|
||||
u64 (*ggtt_get_pte)(struct xe_ggtt *ggtt, u64 addr);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include "abi/guc_actions_sriov_abi.h"
|
||||
#include "abi/guc_klvs_abi.h"
|
||||
|
||||
#include "regs/xe_gtt_defs.h"
|
||||
#include "regs/xe_guc_regs.h"
|
||||
|
||||
#include "xe_bo.h"
|
||||
@@ -697,6 +698,22 @@ static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
|
||||
return fair;
|
||||
}
|
||||
|
||||
static u64 pf_profile_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
|
||||
{
|
||||
bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt));
|
||||
u64 shareable = ALIGN_DOWN(GUC_GGTT_TOP, SZ_512M);
|
||||
u64 alignment = pf_get_ggtt_alignment(gt);
|
||||
|
||||
if (admin_only_pf && num_vfs == 1)
|
||||
return ALIGN_DOWN(shareable, alignment);
|
||||
|
||||
/* need to hardcode due to ~512M of GGTT being reserved */
|
||||
if (num_vfs > 56)
|
||||
return SZ_64M - SZ_8M;
|
||||
|
||||
return rounddown_pow_of_two(shareable / num_vfs);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
|
||||
* @gt: the &xe_gt (can't be media)
|
||||
@@ -710,6 +727,7 @@ static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
|
||||
int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
|
||||
unsigned int num_vfs)
|
||||
{
|
||||
u64 profile = pf_profile_fair_ggtt(gt, num_vfs);
|
||||
u64 fair;
|
||||
|
||||
xe_gt_assert(gt, vfid);
|
||||
@@ -723,9 +741,71 @@ int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
|
||||
if (!fair)
|
||||
return -ENOSPC;
|
||||
|
||||
fair = min(fair, profile);
|
||||
if (fair < profile)
|
||||
xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %llu vs %llu)\n",
|
||||
"GGTT", fair, profile);
|
||||
|
||||
return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_config_ggtt_save() - Save a VF provisioned GGTT data into a buffer.
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: VF identifier (can't be 0)
|
||||
* @buf: the GGTT data destination buffer (or NULL to query the buf size)
|
||||
* @size: the size of the buffer (or 0 to query the buf size)
|
||||
*
|
||||
* This function can only be called on PF.
|
||||
*
|
||||
* Return: size of the buffer needed to save GGTT data if querying,
|
||||
* 0 on successful save or a negative error code on failure.
|
||||
*/
|
||||
ssize_t xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid,
|
||||
void *buf, size_t size)
|
||||
{
|
||||
struct xe_ggtt_node *node;
|
||||
|
||||
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
|
||||
xe_gt_assert(gt, vfid);
|
||||
xe_gt_assert(gt, !(!buf ^ !size));
|
||||
|
||||
guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
|
||||
|
||||
node = pf_pick_vf_config(gt, vfid)->ggtt_region;
|
||||
|
||||
if (!buf)
|
||||
return xe_ggtt_node_pt_size(node);
|
||||
|
||||
return xe_ggtt_node_save(node, buf, size, vfid);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_config_ggtt_restore() - Restore a VF provisioned GGTT data from a buffer.
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: VF identifier (can't be 0)
|
||||
* @buf: the GGTT data source buffer
|
||||
* @size: the size of the buffer
|
||||
*
|
||||
* This function can only be called on PF.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
|
||||
const void *buf, size_t size)
|
||||
{
|
||||
struct xe_ggtt_node *node;
|
||||
|
||||
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
|
||||
xe_gt_assert(gt, vfid);
|
||||
|
||||
guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
|
||||
|
||||
node = pf_pick_vf_config(gt, vfid)->ggtt_region;
|
||||
|
||||
return xe_ggtt_node_load(node, buf, size, vfid);
|
||||
}
|
||||
|
||||
static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
|
||||
{
|
||||
/* XXX: preliminary */
|
||||
@@ -985,6 +1065,16 @@ int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
|
||||
"GuC context IDs", no_unit, n, err);
|
||||
}
|
||||
|
||||
static u32 pf_profile_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
|
||||
{
|
||||
bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt));
|
||||
|
||||
if (admin_only_pf && num_vfs == 1)
|
||||
return ALIGN_DOWN(GUC_ID_MAX, SZ_1K);
|
||||
|
||||
return rounddown_pow_of_two(GUC_ID_MAX / num_vfs);
|
||||
}
|
||||
|
||||
static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
|
||||
{
|
||||
struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm;
|
||||
@@ -1017,6 +1107,7 @@ static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
|
||||
int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
|
||||
unsigned int num_vfs)
|
||||
{
|
||||
u32 profile = pf_profile_fair_ctxs(gt, num_vfs);
|
||||
u32 fair;
|
||||
|
||||
xe_gt_assert(gt, vfid);
|
||||
@@ -1029,6 +1120,11 @@ int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
|
||||
if (!fair)
|
||||
return -ENOSPC;
|
||||
|
||||
fair = min(fair, profile);
|
||||
if (fair < profile)
|
||||
xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %u vs %u)\n",
|
||||
"GuC context IDs", fair, profile);
|
||||
|
||||
return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
|
||||
}
|
||||
|
||||
@@ -1233,6 +1329,17 @@ int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
|
||||
"GuC doorbell IDs", no_unit, n, err);
|
||||
}
|
||||
|
||||
static u32 pf_profile_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
|
||||
{
|
||||
bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt));
|
||||
|
||||
/* XXX: preliminary */
|
||||
if (admin_only_pf && num_vfs == 1)
|
||||
return GUC_NUM_DOORBELLS - SZ_16;
|
||||
|
||||
return rounddown_pow_of_two(GUC_NUM_DOORBELLS / (num_vfs + 1));
|
||||
}
|
||||
|
||||
static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
|
||||
{
|
||||
struct xe_guc_db_mgr *dbm = >->uc.guc.dbm;
|
||||
@@ -1265,6 +1372,7 @@ static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
|
||||
int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
|
||||
unsigned int num_vfs)
|
||||
{
|
||||
u32 profile = pf_profile_fair_dbs(gt, num_vfs);
|
||||
u32 fair;
|
||||
|
||||
xe_gt_assert(gt, vfid);
|
||||
@@ -1277,6 +1385,11 @@ int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
|
||||
if (!fair)
|
||||
return -ENOSPC;
|
||||
|
||||
fair = min(fair, profile);
|
||||
if (fair < profile)
|
||||
xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %u vs %u)\n",
|
||||
"GuC doorbell IDs", fair, profile);
|
||||
|
||||
return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
|
||||
}
|
||||
|
||||
@@ -1602,6 +1715,32 @@ int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
|
||||
"LMEM", n, err);
|
||||
}
|
||||
|
||||
static struct xe_bo *pf_get_vf_config_lmem_obj(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
|
||||
|
||||
return config->lmem_obj;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_config_get_lmem_obj() - Take a reference to the struct &xe_bo backing VF LMEM.
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: the VF identifier (can't be 0)
|
||||
*
|
||||
* This function can only be called on PF.
|
||||
* The caller is responsible for calling xe_bo_put() on the returned object.
|
||||
*
|
||||
* Return: pointer to struct &xe_bo backing VF LMEM (if any).
|
||||
*/
|
||||
struct xe_bo *xe_gt_sriov_pf_config_get_lmem_obj(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
xe_gt_assert(gt, vfid);
|
||||
|
||||
guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
|
||||
|
||||
return xe_bo_get(pf_get_vf_config_lmem_obj(gt, vfid));
|
||||
}
|
||||
|
||||
static u64 pf_query_free_lmem(struct xe_gt *gt)
|
||||
{
|
||||
struct xe_tile *tile = gt->tile;
|
||||
@@ -2793,3 +2932,7 @@ int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_prin
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
|
||||
#include "tests/xe_gt_sriov_pf_config_kunit.c"
|
||||
#endif
|
||||
|
||||
@@ -36,6 +36,7 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size
|
||||
int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
|
||||
int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs,
|
||||
u64 size);
|
||||
struct xe_bo *xe_gt_sriov_pf_config_get_lmem_obj(struct xe_gt *gt, unsigned int vfid);
|
||||
|
||||
u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 exec_quantum);
|
||||
@@ -71,6 +72,11 @@ ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *bu
|
||||
int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
|
||||
const void *buf, size_t size);
|
||||
|
||||
ssize_t xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid,
|
||||
void *buf, size_t size);
|
||||
int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
|
||||
const void *buf, size_t size);
|
||||
|
||||
bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid);
|
||||
|
||||
int xe_gt_sriov_pf_config_init(struct xe_gt *gt);
|
||||
|
||||
@@ -18,7 +18,10 @@
|
||||
#include "xe_gt_sriov_printk.h"
|
||||
#include "xe_guc_ct.h"
|
||||
#include "xe_sriov.h"
|
||||
#include "xe_sriov_packet.h"
|
||||
#include "xe_sriov_packet_types.h"
|
||||
#include "xe_sriov_pf_control.h"
|
||||
#include "xe_sriov_pf_migration.h"
|
||||
#include "xe_sriov_pf_service.h"
|
||||
#include "xe_tile.h"
|
||||
|
||||
@@ -181,9 +184,20 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit)
|
||||
CASE2STR(PAUSE_SEND_PAUSE);
|
||||
CASE2STR(PAUSE_WAIT_GUC);
|
||||
CASE2STR(PAUSE_GUC_DONE);
|
||||
CASE2STR(PAUSE_SAVE_GUC);
|
||||
CASE2STR(PAUSE_FAILED);
|
||||
CASE2STR(PAUSED);
|
||||
CASE2STR(SAVE_WIP);
|
||||
CASE2STR(SAVE_PROCESS_DATA);
|
||||
CASE2STR(SAVE_WAIT_DATA);
|
||||
CASE2STR(SAVE_DATA_DONE);
|
||||
CASE2STR(SAVE_FAILED);
|
||||
CASE2STR(SAVED);
|
||||
CASE2STR(RESTORE_WIP);
|
||||
CASE2STR(RESTORE_PROCESS_DATA);
|
||||
CASE2STR(RESTORE_WAIT_DATA);
|
||||
CASE2STR(RESTORE_DATA_DONE);
|
||||
CASE2STR(RESTORE_FAILED);
|
||||
CASE2STR(RESTORED);
|
||||
CASE2STR(RESUME_WIP);
|
||||
CASE2STR(RESUME_SEND_RESUME);
|
||||
CASE2STR(RESUME_FAILED);
|
||||
@@ -208,6 +222,8 @@ static unsigned long pf_get_default_timeout(enum xe_gt_sriov_control_bits bit)
|
||||
case XE_GT_SRIOV_STATE_FLR_WIP:
|
||||
case XE_GT_SRIOV_STATE_FLR_RESET_CONFIG:
|
||||
return 5 * HZ;
|
||||
case XE_GT_SRIOV_STATE_RESTORE_WIP:
|
||||
return 20 * HZ;
|
||||
default:
|
||||
return HZ;
|
||||
}
|
||||
@@ -225,7 +241,7 @@ static unsigned long *pf_peek_vf_state(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
|
||||
|
||||
return &cs->state;
|
||||
return cs->state;
|
||||
}
|
||||
|
||||
static bool pf_check_vf_state(struct xe_gt *gt, unsigned int vfid,
|
||||
@@ -329,6 +345,8 @@ static void pf_exit_vf_mismatch(struct xe_gt *gt, unsigned int vfid)
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED);
|
||||
}
|
||||
|
||||
#define pf_enter_vf_state_machine_bug(gt, vfid) ({ \
|
||||
@@ -359,6 +377,8 @@ static void pf_queue_vf(struct xe_gt *gt, unsigned int vfid)
|
||||
|
||||
static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid);
|
||||
static void pf_exit_vf_stop_wip(struct xe_gt *gt, unsigned int vfid);
|
||||
static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid);
|
||||
static void pf_exit_vf_restore_wip(struct xe_gt *gt, unsigned int vfid);
|
||||
static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid);
|
||||
static void pf_exit_vf_resume_wip(struct xe_gt *gt, unsigned int vfid);
|
||||
|
||||
@@ -380,6 +400,8 @@ static void pf_exit_vf_wip(struct xe_gt *gt, unsigned int vfid)
|
||||
|
||||
pf_exit_vf_flr_wip(gt, vfid);
|
||||
pf_exit_vf_stop_wip(gt, vfid);
|
||||
pf_exit_vf_save_wip(gt, vfid);
|
||||
pf_exit_vf_restore_wip(gt, vfid);
|
||||
pf_exit_vf_pause_wip(gt, vfid);
|
||||
pf_exit_vf_resume_wip(gt, vfid);
|
||||
|
||||
@@ -399,6 +421,8 @@ static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid)
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED);
|
||||
pf_exit_vf_mismatch(gt, vfid);
|
||||
pf_exit_vf_wip(gt, vfid);
|
||||
}
|
||||
@@ -429,8 +453,7 @@ static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid)
|
||||
* : PAUSE_GUC_DONE o-----restart
|
||||
* : | :
|
||||
* : | o---<--busy :
|
||||
* : v / / :
|
||||
* : PAUSE_SAVE_GUC :
|
||||
* : / :
|
||||
* : / :
|
||||
* : / :
|
||||
* :....o..............o...............o...........:
|
||||
@@ -450,7 +473,6 @@ static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid)
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE);
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC);
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE);
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -481,41 +503,12 @@ static void pf_enter_vf_pause_rejected(struct xe_gt *gt, unsigned int vfid)
|
||||
pf_enter_vf_pause_failed(gt, vfid);
|
||||
}
|
||||
|
||||
static void pf_enter_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC))
|
||||
pf_enter_vf_state_machine_bug(gt, vfid);
|
||||
}
|
||||
|
||||
static bool pf_exit_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC))
|
||||
return false;
|
||||
|
||||
err = xe_gt_sriov_pf_migration_save_guc_state(gt, vfid);
|
||||
if (err) {
|
||||
/* retry if busy */
|
||||
if (err == -EBUSY) {
|
||||
pf_enter_vf_pause_save_guc(gt, vfid);
|
||||
return true;
|
||||
}
|
||||
/* give up on error */
|
||||
if (err == -EIO)
|
||||
pf_enter_vf_mismatch(gt, vfid);
|
||||
}
|
||||
|
||||
pf_enter_vf_pause_completed(gt, vfid);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool pf_exit_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE))
|
||||
return false;
|
||||
|
||||
pf_enter_vf_pause_save_guc(gt, vfid);
|
||||
pf_enter_vf_pause_completed(gt, vfid);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -675,6 +668,8 @@ static void pf_enter_vf_resumed(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED);
|
||||
pf_exit_vf_mismatch(gt, vfid);
|
||||
pf_exit_vf_wip(gt, vfid);
|
||||
}
|
||||
@@ -753,6 +748,16 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) {
|
||||
xe_gt_sriov_dbg(gt, "VF%u save is in progress!\n", vfid);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) {
|
||||
xe_gt_sriov_dbg(gt, "VF%u restore is in progress!\n", vfid);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!pf_enter_vf_resume_wip(gt, vfid)) {
|
||||
xe_gt_sriov_dbg(gt, "VF%u resume already in progress!\n", vfid);
|
||||
return -EALREADY;
|
||||
@@ -776,6 +781,562 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: The VF SAVE state machine
|
||||
*
|
||||
* SAVE extends the PAUSED state.
|
||||
*
|
||||
* The VF SAVE state machine looks like::
|
||||
*
|
||||
* ....PAUSED....................................................
|
||||
* : :
|
||||
* : (O)<---------o :
|
||||
* : | \ :
|
||||
* : save (SAVED) (SAVE_FAILED) :
|
||||
* : | ^ ^ :
|
||||
* : | | | :
|
||||
* : ....V...............o...........o......SAVE_WIP......... :
|
||||
* : : | | | : :
|
||||
* : : | empty | : :
|
||||
* : : | | | : :
|
||||
* : : | | | : :
|
||||
* : : | DATA_DONE | : :
|
||||
* : : | ^ | : :
|
||||
* : : | | error : :
|
||||
* : : | no_data / : :
|
||||
* : : | / / : :
|
||||
* : : | / / : :
|
||||
* : : | / / : :
|
||||
* : : o---------->PROCESS_DATA<----consume : :
|
||||
* : : \ \ : :
|
||||
* : : \ \ : :
|
||||
* : : \ \ : :
|
||||
* : : ring_full----->WAIT_DATA : :
|
||||
* : : : :
|
||||
* : :......................................................: :
|
||||
* :............................................................:
|
||||
*
|
||||
* For the full state machine view, see `The VF state machine`_.
|
||||
*/
|
||||
|
||||
static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) {
|
||||
xe_gt_sriov_pf_migration_ring_free(gt, vfid);
|
||||
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA);
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA);
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE);
|
||||
}
|
||||
}
|
||||
|
||||
static void pf_enter_vf_saved(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED))
|
||||
pf_enter_vf_state_machine_bug(gt, vfid);
|
||||
|
||||
xe_gt_sriov_dbg(gt, "VF%u saved!\n", vfid);
|
||||
|
||||
pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
|
||||
pf_exit_vf_mismatch(gt, vfid);
|
||||
pf_exit_vf_wip(gt, vfid);
|
||||
}
|
||||
|
||||
static void pf_enter_vf_save_failed(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED))
|
||||
pf_enter_vf_state_machine_bug(gt, vfid);
|
||||
|
||||
wake_up_all(xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid));
|
||||
|
||||
pf_exit_vf_wip(gt, vfid);
|
||||
}
|
||||
|
||||
static int pf_handle_vf_save_data(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (xe_gt_sriov_pf_migration_save_data_pending(gt, vfid,
|
||||
XE_SRIOV_PACKET_TYPE_GUC)) {
|
||||
ret = xe_gt_sriov_pf_migration_guc_save(gt, vfid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
xe_gt_sriov_pf_migration_save_data_complete(gt, vfid,
|
||||
XE_SRIOV_PACKET_TYPE_GUC);
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (xe_gt_sriov_pf_migration_save_data_pending(gt, vfid,
|
||||
XE_SRIOV_PACKET_TYPE_GGTT)) {
|
||||
ret = xe_gt_sriov_pf_migration_ggtt_save(gt, vfid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
xe_gt_sriov_pf_migration_save_data_complete(gt, vfid,
|
||||
XE_SRIOV_PACKET_TYPE_GGTT);
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (xe_gt_sriov_pf_migration_save_data_pending(gt, vfid,
|
||||
XE_SRIOV_PACKET_TYPE_MMIO)) {
|
||||
ret = xe_gt_sriov_pf_migration_mmio_save(gt, vfid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
xe_gt_sriov_pf_migration_save_data_complete(gt, vfid,
|
||||
XE_SRIOV_PACKET_TYPE_MMIO);
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (xe_gt_sriov_pf_migration_save_data_pending(gt, vfid,
|
||||
XE_SRIOV_PACKET_TYPE_VRAM)) {
|
||||
ret = xe_gt_sriov_pf_migration_vram_save(gt, vfid);
|
||||
if (ret == -EAGAIN)
|
||||
return -EAGAIN;
|
||||
else if (ret)
|
||||
return ret;
|
||||
|
||||
xe_gt_sriov_pf_migration_save_data_complete(gt, vfid,
|
||||
XE_SRIOV_PACKET_TYPE_VRAM);
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool pf_handle_vf_save(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA))
|
||||
return false;
|
||||
|
||||
if (xe_gt_sriov_pf_migration_ring_full(gt, vfid)) {
|
||||
pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA);
|
||||
return true;
|
||||
}
|
||||
|
||||
ret = pf_handle_vf_save_data(gt, vfid);
|
||||
if (ret == -EAGAIN)
|
||||
pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA);
|
||||
else if (ret)
|
||||
pf_enter_vf_save_failed(gt, vfid);
|
||||
else
|
||||
pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void pf_exit_vf_save_wait_data(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA))
|
||||
return;
|
||||
|
||||
pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA);
|
||||
pf_queue_vf(gt, vfid);
|
||||
}
|
||||
|
||||
static bool pf_enter_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) {
|
||||
xe_gt_sriov_pf_migration_save_init(gt, vfid);
|
||||
pf_enter_vf_wip(gt, vfid);
|
||||
pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA);
|
||||
pf_queue_vf(gt, vfid);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_control_check_save_data_done() - Check if all save migration data was produced.
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: true if all migration data was produced, false otherwise.
|
||||
*/
|
||||
bool xe_gt_sriov_pf_control_check_save_data_done(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
return pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_control_check_save_failed() - Check if save processing has failed.
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: true if save processing failed, false otherwise.
|
||||
*/
|
||||
bool xe_gt_sriov_pf_control_check_save_failed(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
return pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_control_process_save_data() - Queue VF save migration data processing.
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_gt_sriov_pf_control_process_save_data(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED))
|
||||
return -EIO;
|
||||
|
||||
pf_exit_vf_save_wait_data(gt, vfid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_control_trigger_save_vf() - Start an SR-IOV VF migration data save sequence.
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_gt_sriov_pf_control_trigger_save_vf(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
|
||||
xe_gt_sriov_dbg(gt, "VF%u is stopped!\n", vfid);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
|
||||
xe_gt_sriov_dbg(gt, "VF%u is not paused!\n", vfid);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) {
|
||||
xe_gt_sriov_dbg(gt, "VF%u restore is in progress!\n", vfid);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!pf_enter_vf_save_wip(gt, vfid)) {
|
||||
xe_gt_sriov_dbg(gt, "VF%u save already in progress!\n", vfid);
|
||||
return -EALREADY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_control_finish_save_vf() - Complete a VF migration data save sequence.
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_gt_sriov_pf_control_finish_save_vf(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE)) {
|
||||
xe_gt_sriov_err(gt, "VF%u save is still in progress!\n", vfid);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE);
|
||||
pf_enter_vf_saved(gt, vfid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: The VF RESTORE state machine
|
||||
*
|
||||
* RESTORE extends the PAUSED state.
|
||||
*
|
||||
* The VF RESTORE state machine looks like::
|
||||
*
|
||||
* ....PAUSED....................................................
|
||||
* : :
|
||||
* : (O)<---------o :
|
||||
* : | \ :
|
||||
* : restore (RESTORED) (RESTORE_FAILED) :
|
||||
* : | ^ ^ :
|
||||
* : | | | :
|
||||
* : ....V...............o...........o......RESTORE_WIP...... :
|
||||
* : : | | | : :
|
||||
* : : | empty | : :
|
||||
* : : | | | : :
|
||||
* : : | | | : :
|
||||
* : : | DATA_DONE | : :
|
||||
* : : | ^ | : :
|
||||
* : : | | error : :
|
||||
* : : | trailer / : :
|
||||
* : : | / / : :
|
||||
* : : | / / : :
|
||||
* : : | / / : :
|
||||
* : : o---------->PROCESS_DATA<----produce : :
|
||||
* : : \ \ : :
|
||||
* : : \ \ : :
|
||||
* : : \ \ : :
|
||||
* : : ring_empty---->WAIT_DATA : :
|
||||
* : : : :
|
||||
* : :......................................................: :
|
||||
* :............................................................:
|
||||
*
|
||||
* For the full state machine view, see `The VF state machine`_.
|
||||
*/
|
||||
|
||||
static void pf_exit_vf_restore_wip(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) {
|
||||
xe_gt_sriov_pf_migration_ring_free(gt, vfid);
|
||||
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA);
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA);
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE);
|
||||
}
|
||||
}
|
||||
|
||||
static void pf_enter_vf_restored(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED))
|
||||
pf_enter_vf_state_machine_bug(gt, vfid);
|
||||
|
||||
xe_gt_sriov_dbg(gt, "VF%u restored!\n", vfid);
|
||||
|
||||
pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
|
||||
pf_exit_vf_mismatch(gt, vfid);
|
||||
pf_exit_vf_wip(gt, vfid);
|
||||
}
|
||||
|
||||
static void pf_enter_vf_restore_failed(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED))
|
||||
pf_enter_vf_state_machine_bug(gt, vfid);
|
||||
|
||||
wake_up_all(xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid));
|
||||
|
||||
pf_exit_vf_wip(gt, vfid);
|
||||
}
|
||||
|
||||
static int pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
struct xe_sriov_packet *data = xe_gt_sriov_pf_migration_restore_consume(gt, vfid);
|
||||
int ret = 0;
|
||||
|
||||
switch (data->hdr.type) {
|
||||
case XE_SRIOV_PACKET_TYPE_GGTT:
|
||||
ret = xe_gt_sriov_pf_migration_ggtt_restore(gt, vfid, data);
|
||||
break;
|
||||
case XE_SRIOV_PACKET_TYPE_MMIO:
|
||||
ret = xe_gt_sriov_pf_migration_mmio_restore(gt, vfid, data);
|
||||
break;
|
||||
case XE_SRIOV_PACKET_TYPE_GUC:
|
||||
ret = xe_gt_sriov_pf_migration_guc_restore(gt, vfid, data);
|
||||
break;
|
||||
case XE_SRIOV_PACKET_TYPE_VRAM:
|
||||
ret = xe_gt_sriov_pf_migration_vram_restore(gt, vfid, data);
|
||||
break;
|
||||
default:
|
||||
xe_gt_sriov_notice(gt, "Skipping VF%u unknown data type: %d\n",
|
||||
vfid, data->hdr.type);
|
||||
break;
|
||||
}
|
||||
|
||||
xe_sriov_packet_free(data);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool pf_handle_vf_restore(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA))
|
||||
return false;
|
||||
|
||||
if (xe_gt_sriov_pf_migration_ring_empty(gt, vfid)) {
|
||||
if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE))
|
||||
pf_enter_vf_restored(gt, vfid);
|
||||
else
|
||||
pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
ret = pf_handle_vf_restore_data(gt, vfid);
|
||||
if (ret)
|
||||
pf_enter_vf_restore_failed(gt, vfid);
|
||||
else
|
||||
pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void pf_exit_vf_restore_wait_data(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA))
|
||||
return;
|
||||
|
||||
pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA);
|
||||
pf_queue_vf(gt, vfid);
|
||||
}
|
||||
|
||||
static bool pf_enter_vf_restore_wip(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) {
|
||||
pf_enter_vf_wip(gt, vfid);
|
||||
pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA);
|
||||
pf_queue_vf(gt, vfid);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_control_check_restore_failed() - Check if restore processing has failed.
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: true if restore processing failed, false otherwise.
|
||||
*/
|
||||
bool xe_gt_sriov_pf_control_check_restore_failed(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
return pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_control_restore_data_done() - Indicate the end of VF migration data stream.
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_gt_sriov_pf_control_restore_data_done(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE)) {
|
||||
pf_enter_vf_state_machine_bug(gt, vfid);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return xe_gt_sriov_pf_control_process_restore_data(gt, vfid);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_control_process_restore_data() - Queue VF restore migration data processing.
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_gt_sriov_pf_control_process_restore_data(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED)) {
|
||||
xe_gt_sriov_pf_migration_ring_free(gt, vfid);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
pf_exit_vf_restore_wait_data(gt, vfid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_control_trigger restore_vf() - Start an SR-IOV VF migration data restore sequence.
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_gt_sriov_pf_control_trigger_restore_vf(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
|
||||
xe_gt_sriov_dbg(gt, "VF%u is stopped!\n", vfid);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
|
||||
xe_gt_sriov_dbg(gt, "VF%u is not paused!\n", vfid);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) {
|
||||
xe_gt_sriov_dbg(gt, "VF%u save is in progress!\n", vfid);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!pf_enter_vf_restore_wip(gt, vfid)) {
|
||||
xe_gt_sriov_dbg(gt, "VF%u restore already in progress!\n", vfid);
|
||||
return -EALREADY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pf_wait_vf_restore_done(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_RESTORE_WIP);
|
||||
int err;
|
||||
|
||||
err = pf_wait_vf_wip_done(gt, vfid, timeout);
|
||||
if (err) {
|
||||
xe_gt_sriov_notice(gt, "VF%u RESTORE didn't finish in %u ms (%pe)\n",
|
||||
vfid, jiffies_to_msecs(timeout), ERR_PTR(err));
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED))
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_control_finish_restore_vf() - Complete a VF migration data restore sequence.
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_gt_sriov_pf_control_finish_restore_vf(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = pf_wait_vf_restore_done(gt, vfid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED)) {
|
||||
pf_enter_vf_mismatch(gt, vfid);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: The VF STOP state machine
|
||||
*
|
||||
@@ -817,6 +1378,8 @@ static void pf_enter_vf_stopped(struct xe_gt *gt, unsigned int vfid)
|
||||
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED);
|
||||
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED);
|
||||
pf_exit_vf_mismatch(gt, vfid);
|
||||
pf_exit_vf_wip(gt, vfid);
|
||||
}
|
||||
@@ -1460,7 +2023,22 @@ static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid)
|
||||
if (pf_exit_vf_pause_guc_done(gt, vfid))
|
||||
return true;
|
||||
|
||||
if (pf_exit_vf_pause_save_guc(gt, vfid))
|
||||
if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA)) {
|
||||
xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid,
|
||||
control_bit_to_string(XE_GT_SRIOV_STATE_SAVE_WAIT_DATA));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pf_handle_vf_save(gt, vfid))
|
||||
return true;
|
||||
|
||||
if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA)) {
|
||||
xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid,
|
||||
control_bit_to_string(XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pf_handle_vf_restore(gt, vfid))
|
||||
return true;
|
||||
|
||||
if (pf_exit_vf_resume_send_resume(gt, vfid))
|
||||
|
||||
@@ -16,6 +16,16 @@ void xe_gt_sriov_pf_control_restart(struct xe_gt *gt);
|
||||
|
||||
int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid);
|
||||
bool xe_gt_sriov_pf_control_check_save_data_done(struct xe_gt *gt, unsigned int vfid);
|
||||
bool xe_gt_sriov_pf_control_check_save_failed(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_control_process_save_data(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_control_trigger_save_vf(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_control_finish_save_vf(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_control_restore_data_done(struct xe_gt *gt, unsigned int vfid);
|
||||
bool xe_gt_sriov_pf_control_check_restore_failed(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_control_process_restore_data(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_control_trigger_restore_vf(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_control_finish_restore_vf(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_control_sync_flr(struct xe_gt *gt, unsigned int vfid, bool sync);
|
||||
|
||||
@@ -28,9 +28,20 @@
|
||||
* @XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE: indicates that the PF is about to send a PAUSE command.
|
||||
* @XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
|
||||
* @XE_GT_SRIOV_STATE_PAUSE_GUC_DONE: indicates that the PF has received a response from the GuC.
|
||||
* @XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC: indicates that the PF needs to save the VF GuC state.
|
||||
* @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed.
|
||||
* @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused.
|
||||
* @XE_GT_SRIOV_STATE_SAVE_WIP: indicates that VF save operation is in progress.
|
||||
* @XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA: indicates that VF migration data is being produced.
|
||||
* @XE_GT_SRIOV_STATE_SAVE_WAIT_DATA: indicates that PF awaits for space in migration data ring.
|
||||
* @XE_GT_SRIOV_STATE_SAVE_DATA_DONE: indicates that all migration data was produced by Xe.
|
||||
* @XE_GT_SRIOV_STATE_SAVE_FAILED: indicates that VF save operation has failed.
|
||||
* @XE_GT_SRIOV_STATE_SAVED: indicates that VF data is saved.
|
||||
* @XE_GT_SRIOV_STATE_RESTORE_WIP: indicates that VF restore operation is in progress.
|
||||
* @XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA: indicates that VF migration data is being consumed.
|
||||
* @XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA: indicates that PF awaits for data in migration data ring.
|
||||
* @XE_GT_SRIOV_STATE_RESTORE_DATA_DONE: indicates that all migration data was produced by the user.
|
||||
* @XE_GT_SRIOV_STATE_RESTORE_FAILED: indicates that VF restore operation has failed.
|
||||
* @XE_GT_SRIOV_STATE_RESTORED: indicates that VF data is restored.
|
||||
* @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress.
|
||||
* @XE_GT_SRIOV_STATE_RESUME_SEND_RESUME: indicates that the PF is about to send RESUME command.
|
||||
* @XE_GT_SRIOV_STATE_RESUME_FAILED: indicates that a VF resume operation has failed.
|
||||
@@ -59,10 +70,23 @@ enum xe_gt_sriov_control_bits {
|
||||
XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE,
|
||||
XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC,
|
||||
XE_GT_SRIOV_STATE_PAUSE_GUC_DONE,
|
||||
XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC,
|
||||
XE_GT_SRIOV_STATE_PAUSE_FAILED,
|
||||
XE_GT_SRIOV_STATE_PAUSED,
|
||||
|
||||
XE_GT_SRIOV_STATE_SAVE_WIP,
|
||||
XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA,
|
||||
XE_GT_SRIOV_STATE_SAVE_WAIT_DATA,
|
||||
XE_GT_SRIOV_STATE_SAVE_DATA_DONE,
|
||||
XE_GT_SRIOV_STATE_SAVE_FAILED,
|
||||
XE_GT_SRIOV_STATE_SAVED,
|
||||
|
||||
XE_GT_SRIOV_STATE_RESTORE_WIP,
|
||||
XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA,
|
||||
XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA,
|
||||
XE_GT_SRIOV_STATE_RESTORE_DATA_DONE,
|
||||
XE_GT_SRIOV_STATE_RESTORE_FAILED,
|
||||
XE_GT_SRIOV_STATE_RESTORED,
|
||||
|
||||
XE_GT_SRIOV_STATE_RESUME_WIP,
|
||||
XE_GT_SRIOV_STATE_RESUME_SEND_RESUME,
|
||||
XE_GT_SRIOV_STATE_RESUME_FAILED,
|
||||
@@ -73,9 +97,11 @@ enum xe_gt_sriov_control_bits {
|
||||
XE_GT_SRIOV_STATE_STOP_FAILED,
|
||||
XE_GT_SRIOV_STATE_STOPPED,
|
||||
|
||||
XE_GT_SRIOV_STATE_MISMATCH = BITS_PER_LONG - 1,
|
||||
XE_GT_SRIOV_STATE_MISMATCH, /* always keep as last */
|
||||
};
|
||||
|
||||
#define XE_GT_SRIOV_NUM_STATES (XE_GT_SRIOV_STATE_MISMATCH + 1)
|
||||
|
||||
/**
|
||||
* struct xe_gt_sriov_control_state - GT-level per-VF control state.
|
||||
*
|
||||
@@ -83,7 +109,7 @@ enum xe_gt_sriov_control_bits {
|
||||
*/
|
||||
struct xe_gt_sriov_control_state {
|
||||
/** @state: VF state bits */
|
||||
unsigned long state;
|
||||
DECLARE_BITMAP(state, XE_GT_SRIOV_NUM_STATES);
|
||||
|
||||
/** @done: completion of async operations */
|
||||
struct completion done;
|
||||
|
||||
@@ -327,9 +327,6 @@ static const struct {
|
||||
{ "stop", xe_gt_sriov_pf_control_stop_vf },
|
||||
{ "pause", xe_gt_sriov_pf_control_pause_vf },
|
||||
{ "resume", xe_gt_sriov_pf_control_resume_vf },
|
||||
#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
|
||||
{ "restore!", xe_gt_sriov_pf_migration_restore_guc_state },
|
||||
#endif
|
||||
};
|
||||
|
||||
static ssize_t control_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
|
||||
@@ -393,47 +390,6 @@ static const struct file_operations control_ops = {
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
/*
|
||||
* /sys/kernel/debug/dri/BDF/
|
||||
* ├── sriov
|
||||
* : ├── vf1
|
||||
* : ├── tile0
|
||||
* : ├── gt0
|
||||
* : ├── guc_state
|
||||
*/
|
||||
|
||||
static ssize_t guc_state_read(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
struct dentry *dent = file_dentry(file);
|
||||
struct dentry *parent = dent->d_parent;
|
||||
struct xe_gt *gt = extract_gt(parent);
|
||||
unsigned int vfid = extract_vfid(parent);
|
||||
|
||||
return xe_gt_sriov_pf_migration_read_guc_state(gt, vfid, buf, count, pos);
|
||||
}
|
||||
|
||||
static ssize_t guc_state_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
struct dentry *dent = file_dentry(file);
|
||||
struct dentry *parent = dent->d_parent;
|
||||
struct xe_gt *gt = extract_gt(parent);
|
||||
unsigned int vfid = extract_vfid(parent);
|
||||
|
||||
if (*pos)
|
||||
return -EINVAL;
|
||||
|
||||
return xe_gt_sriov_pf_migration_write_guc_state(gt, vfid, buf, count);
|
||||
}
|
||||
|
||||
static const struct file_operations guc_state_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = guc_state_read,
|
||||
.write = guc_state_write,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
/*
|
||||
* /sys/kernel/debug/dri/BDF/
|
||||
* ├── sriov
|
||||
@@ -568,9 +524,6 @@ static void pf_populate_gt(struct xe_gt *gt, struct dentry *dent, unsigned int v
|
||||
|
||||
/* for testing/debugging purposes only! */
|
||||
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
|
||||
debugfs_create_file("guc_state",
|
||||
IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400,
|
||||
dent, NULL, &guc_state_ops);
|
||||
debugfs_create_file("config_blob",
|
||||
IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400,
|
||||
dent, NULL, &config_blob_ops);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,16 +9,46 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
struct xe_gt;
|
||||
struct xe_sriov_packet;
|
||||
enum xe_sriov_packet_type;
|
||||
|
||||
/* TODO: get this information by querying GuC in the future */
|
||||
#define XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE SZ_8M
|
||||
|
||||
int xe_gt_sriov_pf_migration_init(struct xe_gt *gt);
|
||||
int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
|
||||
struct xe_sriov_packet *data);
|
||||
int xe_gt_sriov_pf_migration_ggtt_save(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_migration_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
|
||||
struct xe_sriov_packet *data);
|
||||
int xe_gt_sriov_pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid,
|
||||
struct xe_sriov_packet *data);
|
||||
int xe_gt_sriov_pf_migration_vram_save(struct xe_gt *gt, unsigned int vfid);
|
||||
int xe_gt_sriov_pf_migration_vram_restore(struct xe_gt *gt, unsigned int vfid,
|
||||
struct xe_sriov_packet *data);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
|
||||
char __user *buf, size_t count, loff_t *pos);
|
||||
ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
|
||||
const char __user *buf, size_t count);
|
||||
#endif
|
||||
ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid);
|
||||
|
||||
bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid);
|
||||
bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid);
|
||||
void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid);
|
||||
|
||||
void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid);
|
||||
bool xe_gt_sriov_pf_migration_save_data_pending(struct xe_gt *gt, unsigned int vfid,
|
||||
enum xe_sriov_packet_type type);
|
||||
void xe_gt_sriov_pf_migration_save_data_complete(struct xe_gt *gt, unsigned int vfid,
|
||||
enum xe_sriov_packet_type type);
|
||||
|
||||
int xe_gt_sriov_pf_migration_save_produce(struct xe_gt *gt, unsigned int vfid,
|
||||
struct xe_sriov_packet *data);
|
||||
struct xe_sriov_packet *
|
||||
xe_gt_sriov_pf_migration_restore_consume(struct xe_gt *gt, unsigned int vfid);
|
||||
|
||||
int xe_gt_sriov_pf_migration_restore_produce(struct xe_gt *gt, unsigned int vfid,
|
||||
struct xe_sriov_packet *data);
|
||||
struct xe_sriov_packet *
|
||||
xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -6,35 +6,23 @@
|
||||
#ifndef _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
|
||||
#define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptr_ring.h>
|
||||
|
||||
/**
|
||||
* struct xe_gt_sriov_state_snapshot - GT-level per-VF state snapshot data.
|
||||
* struct xe_gt_sriov_migration_data - GT-level per-VF migration data.
|
||||
*
|
||||
* Used by the PF driver to maintain per-VF migration data.
|
||||
*/
|
||||
struct xe_gt_sriov_state_snapshot {
|
||||
/** @guc: GuC VF state snapshot */
|
||||
struct xe_gt_sriov_migration_data {
|
||||
/** @ring: queue containing VF save / restore migration data */
|
||||
struct ptr_ring ring;
|
||||
/** @save: structure for currently processed save migration data */
|
||||
struct {
|
||||
/** @guc.buff: buffer with the VF state */
|
||||
u32 *buff;
|
||||
/** @guc.size: size of the buffer (must be dwords aligned) */
|
||||
u32 size;
|
||||
} guc;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xe_gt_sriov_pf_migration - GT-level data.
|
||||
*
|
||||
* Used by the PF driver to maintain non-VF specific per-GT data.
|
||||
*/
|
||||
struct xe_gt_sriov_pf_migration {
|
||||
/** @supported: indicates whether the feature is supported */
|
||||
bool supported;
|
||||
|
||||
/** @snapshot_lock: protects all VFs snapshots */
|
||||
struct mutex snapshot_lock;
|
||||
/** @save.data_remaining: bitmap of migration types that need to be saved */
|
||||
unsigned long data_remaining;
|
||||
/** @save.vram_offset: last saved offset within VRAM, used for chunked VRAM save */
|
||||
loff_t vram_offset;
|
||||
} save;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -99,11 +99,30 @@ static const struct xe_reg ver_3000_runtime_regs[] = {
|
||||
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
|
||||
};
|
||||
|
||||
static const struct xe_reg ver_35_runtime_regs[] = {
|
||||
RPM_CONFIG0, /* _MMIO(0x0d00) */
|
||||
XEHP_FUSE4, /* _MMIO(0x9114) */
|
||||
MIRROR_FUSE3, /* _MMIO(0x9118) */
|
||||
MIRROR_L3BANK_ENABLE, /* _MMIO(0x9130) */
|
||||
XELP_EU_ENABLE, /* _MMIO(0x9134) */
|
||||
XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
|
||||
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
|
||||
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
|
||||
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
|
||||
XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
|
||||
XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
|
||||
XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
|
||||
SERVICE_COPY_ENABLE, /* _MMIO(0x9170) */
|
||||
};
|
||||
|
||||
static const struct xe_reg *pick_runtime_regs(struct xe_device *xe, unsigned int *count)
|
||||
{
|
||||
const struct xe_reg *regs;
|
||||
|
||||
if (GRAPHICS_VERx100(xe) >= 3000) {
|
||||
if (GRAPHICS_VER(xe) >= 35) {
|
||||
*count = ARRAY_SIZE(ver_35_runtime_regs);
|
||||
regs = ver_35_runtime_regs;
|
||||
} else if (GRAPHICS_VERx100(xe) >= 3000) {
|
||||
*count = ARRAY_SIZE(ver_3000_runtime_regs);
|
||||
regs = ver_3000_runtime_regs;
|
||||
} else if (GRAPHICS_VERx100(xe) >= 2000) {
|
||||
|
||||
@@ -31,8 +31,8 @@ struct xe_gt_sriov_metadata {
|
||||
/** @version: negotiated VF/PF ABI version */
|
||||
struct xe_gt_sriov_pf_service_version version;
|
||||
|
||||
/** @snapshot: snapshot of the VF state data */
|
||||
struct xe_gt_sriov_state_snapshot snapshot;
|
||||
/** @migration: per-VF migration data. */
|
||||
struct xe_gt_sriov_migration_data migration;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -58,7 +58,6 @@ struct xe_gt_sriov_pf {
|
||||
struct xe_gt_sriov_pf_service service;
|
||||
struct xe_gt_sriov_pf_control control;
|
||||
struct xe_gt_sriov_pf_policy policy;
|
||||
struct xe_gt_sriov_pf_migration migration;
|
||||
struct xe_gt_sriov_spare_config spare;
|
||||
struct xe_gt_sriov_metadata *vfs;
|
||||
};
|
||||
|
||||
@@ -22,9 +22,15 @@
|
||||
* Their availability depend on the platform and some may not be visible if that
|
||||
* reason is not available.
|
||||
*
|
||||
* The ``reasons`` attribute can be used by sysadmin to monitor all possible
|
||||
* reasons for throttling and report them. It's preferred over monitoring
|
||||
* ``status`` and then reading the reason from individual attributes since that
|
||||
* is racy. If there's no throttling happening, "none" is returned.
|
||||
*
|
||||
* The following attributes are available on Crescent Island platform:
|
||||
*
|
||||
* - ``status``: Overall throttle status
|
||||
* - ``status``: Overall throttle status (0: no throttling, 1: throttling)
|
||||
* - ``reasons``: Array of reasons causing throttling separated by space
|
||||
* - ``reason_pl1``: package PL1
|
||||
* - ``reason_pl2``: package PL2
|
||||
* - ``reason_pl4``: package PL4
|
||||
@@ -43,7 +49,8 @@
|
||||
*
|
||||
* Other platforms support the following reasons:
|
||||
*
|
||||
* - ``status``: Overall status
|
||||
* - ``status``: Overall throttle status (0: no throttling, 1: throttling)
|
||||
* - ``reasons``: Array of reasons causing throttling separated by space
|
||||
* - ``reason_pl1``: package PL1
|
||||
* - ``reason_pl2``: package PL2
|
||||
* - ``reason_pl4``: package PL4, Iccmax etc.
|
||||
@@ -111,12 +118,57 @@ static ssize_t reason_show(struct kobject *kobj,
|
||||
return sysfs_emit(buff, "%u\n", is_throttled_by(gt, ta->mask));
|
||||
}
|
||||
|
||||
static const struct attribute_group *get_platform_throttle_group(struct xe_device *xe);
|
||||
|
||||
static ssize_t reasons_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buff)
|
||||
{
|
||||
struct xe_gt *gt = throttle_to_gt(kobj);
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
const struct attribute_group *group;
|
||||
struct attribute **pother;
|
||||
ssize_t ret = 0;
|
||||
u32 reasons;
|
||||
|
||||
reasons = xe_gt_throttle_get_limit_reasons(gt);
|
||||
if (!reasons)
|
||||
goto ret_none;
|
||||
|
||||
group = get_platform_throttle_group(xe);
|
||||
for (pother = group->attrs; *pother; pother++) {
|
||||
struct kobj_attribute *kattr = container_of(*pother, struct kobj_attribute, attr);
|
||||
struct throttle_attribute *other_ta = kobj_attribute_to_throttle(kattr);
|
||||
|
||||
if (other_ta->mask != U32_MAX && reasons & other_ta->mask)
|
||||
ret += sysfs_emit_at(buff, ret, "%s ", (*pother)->name);
|
||||
}
|
||||
|
||||
if (drm_WARN_ONCE(&xe->drm, !ret, "Unknown reason: %#x\n", reasons))
|
||||
goto ret_none;
|
||||
|
||||
/* Drop extra space from last iteration above */
|
||||
ret--;
|
||||
ret += sysfs_emit_at(buff, ret, "\n");
|
||||
|
||||
return ret;
|
||||
|
||||
ret_none:
|
||||
return sysfs_emit(buff, "none\n");
|
||||
}
|
||||
|
||||
#define THROTTLE_ATTR_RO(name, _mask) \
|
||||
struct throttle_attribute attr_##name = { \
|
||||
.attr = __ATTR(name, 0444, reason_show, NULL), \
|
||||
.mask = _mask, \
|
||||
}
|
||||
|
||||
#define THROTTLE_ATTR_RO_FUNC(name, _mask, _show) \
|
||||
struct throttle_attribute attr_##name = { \
|
||||
.attr = __ATTR(name, 0444, _show, NULL), \
|
||||
.mask = _mask, \
|
||||
}
|
||||
|
||||
static THROTTLE_ATTR_RO_FUNC(reasons, 0, reasons_show);
|
||||
static THROTTLE_ATTR_RO(status, U32_MAX);
|
||||
static THROTTLE_ATTR_RO(reason_pl1, POWER_LIMIT_1_MASK);
|
||||
static THROTTLE_ATTR_RO(reason_pl2, POWER_LIMIT_2_MASK);
|
||||
@@ -128,6 +180,7 @@ static THROTTLE_ATTR_RO(reason_vr_thermalert, VR_THERMALERT_MASK);
|
||||
static THROTTLE_ATTR_RO(reason_vr_tdc, VR_TDC_MASK);
|
||||
|
||||
static struct attribute *throttle_attrs[] = {
|
||||
&attr_reasons.attr.attr,
|
||||
&attr_status.attr.attr,
|
||||
&attr_reason_pl1.attr.attr,
|
||||
&attr_reason_pl2.attr.attr,
|
||||
@@ -153,6 +206,7 @@ static THROTTLE_ATTR_RO(reason_psys_crit, PSYS_CRIT_MASK);
|
||||
|
||||
static struct attribute *cri_throttle_attrs[] = {
|
||||
/* Common */
|
||||
&attr_reasons.attr.attr,
|
||||
&attr_status.attr.attr,
|
||||
&attr_reason_pl1.attr.attr,
|
||||
&attr_reason_pl2.attr.attr,
|
||||
|
||||
119
drivers/gpu/drm/xe/xe_guard.h
Normal file
119
drivers/gpu/drm/xe/xe_guard.h
Normal file
@@ -0,0 +1,119 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_GUARD_H_
|
||||
#define _XE_GUARD_H_
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
/**
|
||||
* struct xe_guard - Simple logic to protect a feature.
|
||||
*
|
||||
* Implements simple semaphore-like logic that can be used to lockdown the
|
||||
* feature unless it is already in use. Allows enabling of the otherwise
|
||||
* incompatible features, where we can't follow the strict owner semantics
|
||||
* required by the &rw_semaphore.
|
||||
*
|
||||
* NOTE! It shouldn't be used to protect a data, use &rw_semaphore instead.
|
||||
*/
|
||||
struct xe_guard {
|
||||
/**
|
||||
* @counter: implements simple exclusive/lockdown logic:
|
||||
* if == 0 then guard/feature is idle/not in use,
|
||||
* if < 0 then feature is active and can't be locked-down,
|
||||
* if > 0 then feature is lockded-down and can't be activated.
|
||||
*/
|
||||
int counter;
|
||||
|
||||
/** @name: the name of the guard (useful for debug) */
|
||||
const char *name;
|
||||
|
||||
/** @owner: the info about the last owner of the guard (for debug) */
|
||||
void *owner;
|
||||
|
||||
/** @lock: protects guard's data */
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/**
|
||||
* xe_guard_init() - Initialize the guard.
|
||||
* @guard: the &xe_guard to init
|
||||
* @name: name of the guard
|
||||
*/
|
||||
static inline void xe_guard_init(struct xe_guard *guard, const char *name)
|
||||
{
|
||||
spin_lock_init(&guard->lock);
|
||||
guard->counter = 0;
|
||||
guard->name = name;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_guard_arm() - Arm the guard for the exclusive/lockdown mode.
|
||||
* @guard: the &xe_guard to arm
|
||||
* @lockdown: arm for lockdown(true) or exclusive(false) mode
|
||||
* @who: optional owner info (for debug only)
|
||||
*
|
||||
* Multiple lockdown requests are allowed.
|
||||
* Only single exclusive access can be granted.
|
||||
* Will fail if the guard is already in exclusive mode.
|
||||
* On success, must call the xe_guard_disarm() to release.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
static inline int xe_guard_arm(struct xe_guard *guard, bool lockdown, void *who)
|
||||
{
|
||||
guard(spinlock)(&guard->lock);
|
||||
|
||||
if (lockdown) {
|
||||
if (guard->counter < 0)
|
||||
return -EBUSY;
|
||||
guard->counter++;
|
||||
} else {
|
||||
if (guard->counter > 0)
|
||||
return -EPERM;
|
||||
if (guard->counter < 0)
|
||||
return -EUSERS;
|
||||
guard->counter--;
|
||||
}
|
||||
|
||||
guard->owner = who;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_guard_disarm() - Disarm the guard from exclusive/lockdown mode.
|
||||
* @guard: the &xe_guard to disarm
|
||||
* @lockdown: disarm from lockdown(true) or exclusive(false) mode
|
||||
*
|
||||
* Return: true if successfully disarmed or false in case of mismatch.
|
||||
*/
|
||||
static inline bool xe_guard_disarm(struct xe_guard *guard, bool lockdown)
|
||||
{
|
||||
guard(spinlock)(&guard->lock);
|
||||
|
||||
if (lockdown) {
|
||||
if (guard->counter <= 0)
|
||||
return false;
|
||||
guard->counter--;
|
||||
} else {
|
||||
if (guard->counter != -1)
|
||||
return false;
|
||||
guard->counter++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_guard_mode_str() - Convert guard mode into a string.
|
||||
* @lockdown: flag used to select lockdown or exclusive mode
|
||||
*
|
||||
* Return: "lockdown" or "exclusive" string.
|
||||
*/
|
||||
static inline const char *xe_guard_mode_str(bool lockdown)
|
||||
{
|
||||
return lockdown ? "lockdown" : "exclusive";
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -24,6 +24,7 @@
|
||||
#include "xe_gt_printk.h"
|
||||
#include "xe_gt_sriov_vf.h"
|
||||
#include "xe_gt_throttle.h"
|
||||
#include "xe_gt_sriov_pf_migration.h"
|
||||
#include "xe_guc_ads.h"
|
||||
#include "xe_guc_buf.h"
|
||||
#include "xe_guc_capture.h"
|
||||
@@ -40,6 +41,7 @@
|
||||
#include "xe_mmio.h"
|
||||
#include "xe_platform_types.h"
|
||||
#include "xe_sriov.h"
|
||||
#include "xe_sriov_pf_migration.h"
|
||||
#include "xe_uc.h"
|
||||
#include "xe_uc_fw.h"
|
||||
#include "xe_wa.h"
|
||||
@@ -821,6 +823,14 @@ static int vf_guc_init_post_hwconfig(struct xe_guc *guc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 guc_additional_cache_size(struct xe_device *xe)
|
||||
{
|
||||
if (IS_SRIOV_PF(xe) && xe_sriov_pf_migration_supported(xe))
|
||||
return XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE;
|
||||
else
|
||||
return 0; /* Fallback to default size */
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
|
||||
* @guc: The GuC object
|
||||
@@ -860,7 +870,8 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = xe_guc_buf_cache_init(&guc->buf);
|
||||
ret = xe_guc_buf_cache_init_with_size(&guc->buf,
|
||||
guc_additional_cache_size(guc_to_xe(guc)));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
#include "xe_guc_buf.h"
|
||||
#include "xe_sa.h"
|
||||
|
||||
#define XE_GUC_BUF_CACHE_DEFAULT_SIZE SZ_8K
|
||||
|
||||
static struct xe_guc *cache_to_guc(struct xe_guc_buf_cache *cache)
|
||||
{
|
||||
return container_of(cache, struct xe_guc, buf);
|
||||
@@ -23,21 +25,12 @@ static struct xe_gt *cache_to_gt(struct xe_guc_buf_cache *cache)
|
||||
return guc_to_gt(cache_to_guc(cache));
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache.
|
||||
* @cache: the &xe_guc_buf_cache to initialize
|
||||
*
|
||||
* The Buffer Cache allows to obtain a reusable buffer that can be used to pass
|
||||
* indirect H2G data to GuC without a need to create a ad-hoc allocation.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
|
||||
static int guc_buf_cache_init(struct xe_guc_buf_cache *cache, u32 size)
|
||||
{
|
||||
struct xe_gt *gt = cache_to_gt(cache);
|
||||
struct xe_sa_manager *sam;
|
||||
|
||||
sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32));
|
||||
sam = __xe_sa_bo_manager_init(gt_to_tile(gt), size, 0, sizeof(u32));
|
||||
if (IS_ERR(sam))
|
||||
return PTR_ERR(sam);
|
||||
cache->sam = sam;
|
||||
@@ -48,6 +41,35 @@ int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache.
|
||||
* @cache: the &xe_guc_buf_cache to initialize
|
||||
*
|
||||
* The Buffer Cache allows to obtain a reusable buffer that can be used to pass
|
||||
* data to GuC or read data from GuC without a need to create a ad-hoc allocation.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
|
||||
{
|
||||
return guc_buf_cache_init(cache, XE_GUC_BUF_CACHE_DEFAULT_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_guc_buf_cache_init_with_size() - Initialize the GuC Buffer Cache.
|
||||
* @cache: the &xe_guc_buf_cache to initialize
|
||||
* @size: size in bytes
|
||||
*
|
||||
* Like xe_guc_buf_cache_init(), except it allows the caller to make the cache
|
||||
* buffer larger, allowing to accommodate larger objects.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_guc_buf_cache_init_with_size(struct xe_guc_buf_cache *cache, u32 size)
|
||||
{
|
||||
return guc_buf_cache_init(cache, max(XE_GUC_BUF_CACHE_DEFAULT_SIZE, size));
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_guc_buf_cache_dwords() - Number of dwords the GuC Buffer Cache supports.
|
||||
* @cache: the &xe_guc_buf_cache to query
|
||||
@@ -115,6 +137,19 @@ void xe_guc_buf_release(const struct xe_guc_buf buf)
|
||||
xe_sa_bo_free(buf.sa, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_guc_buf_sync_read() - Copy the data from the GPU memory to the sub-allocation.
|
||||
* @buf: the &xe_guc_buf to sync
|
||||
*
|
||||
* Return: a CPU pointer of the sub-allocation.
|
||||
*/
|
||||
void *xe_guc_buf_sync_read(const struct xe_guc_buf buf)
|
||||
{
|
||||
xe_sa_bo_sync_read(buf.sa);
|
||||
|
||||
return xe_sa_bo_cpu_addr(buf.sa);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_guc_buf_flush() - Copy the data from the sub-allocation to the GPU memory.
|
||||
* @buf: the &xe_guc_buf to flush
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include "xe_guc_buf_types.h"
|
||||
|
||||
int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache);
|
||||
int xe_guc_buf_cache_init_with_size(struct xe_guc_buf_cache *cache, u32 size);
|
||||
u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache);
|
||||
struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords);
|
||||
struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache,
|
||||
@@ -30,6 +31,7 @@ static inline bool xe_guc_buf_is_valid(const struct xe_guc_buf buf)
|
||||
}
|
||||
|
||||
void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf);
|
||||
void *xe_guc_buf_sync_read(const struct xe_guc_buf buf);
|
||||
u64 xe_guc_buf_flush(const struct xe_guc_buf buf);
|
||||
u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf);
|
||||
u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size);
|
||||
|
||||
@@ -331,7 +331,7 @@ static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
|
||||
* Our goal is to have the admin choices respected.
|
||||
*/
|
||||
pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
|
||||
freq < pc->rpe_freq);
|
||||
freq < xe_guc_pc_get_rpe_freq(pc));
|
||||
|
||||
return pc_action_set_param(pc,
|
||||
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
|
||||
@@ -363,7 +363,7 @@ static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
|
||||
freq);
|
||||
}
|
||||
|
||||
static void mtl_update_rpa_value(struct xe_guc_pc *pc)
|
||||
static u32 mtl_get_rpa_freq(struct xe_guc_pc *pc)
|
||||
{
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
u32 reg;
|
||||
@@ -373,10 +373,10 @@ static void mtl_update_rpa_value(struct xe_guc_pc *pc)
|
||||
else
|
||||
reg = xe_mmio_read32(>->mmio, MTL_GT_RPA_FREQUENCY);
|
||||
|
||||
pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
|
||||
return decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
|
||||
}
|
||||
|
||||
static void mtl_update_rpe_value(struct xe_guc_pc *pc)
|
||||
static u32 mtl_get_rpe_freq(struct xe_guc_pc *pc)
|
||||
{
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
u32 reg;
|
||||
@@ -386,68 +386,56 @@ static void mtl_update_rpe_value(struct xe_guc_pc *pc)
|
||||
else
|
||||
reg = xe_mmio_read32(>->mmio, MTL_GT_RPE_FREQUENCY);
|
||||
|
||||
pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
|
||||
return decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
|
||||
}
|
||||
|
||||
static void tgl_update_rpa_value(struct xe_guc_pc *pc)
|
||||
static u32 pvc_get_rpa_freq(struct xe_guc_pc *pc)
|
||||
{
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
u32 reg;
|
||||
|
||||
/*
|
||||
* For PVC we still need to use fused RP0 as the approximation for RPa
|
||||
* For other platforms than PVC we get the resolved RPa directly from
|
||||
* PCODE at a different register
|
||||
*/
|
||||
if (xe->info.platform == XE_PVC) {
|
||||
reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP);
|
||||
pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
|
||||
} else {
|
||||
reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC);
|
||||
pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
|
||||
}
|
||||
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
u32 reg;
|
||||
|
||||
reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP);
|
||||
return REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
|
||||
}
|
||||
|
||||
static void tgl_update_rpe_value(struct xe_guc_pc *pc)
|
||||
static u32 tgl_get_rpa_freq(struct xe_guc_pc *pc)
|
||||
{
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
u32 reg;
|
||||
|
||||
reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC);
|
||||
return REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
|
||||
}
|
||||
|
||||
static u32 pvc_get_rpe_freq(struct xe_guc_pc *pc)
|
||||
{
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
u32 reg;
|
||||
|
||||
/*
|
||||
* For PVC we still need to use fused RP1 as the approximation for RPe
|
||||
* For other platforms than PVC we get the resolved RPe directly from
|
||||
* PCODE at a different register
|
||||
*/
|
||||
if (xe->info.platform == XE_PVC) {
|
||||
reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP);
|
||||
pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
|
||||
} else {
|
||||
reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC);
|
||||
pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
|
||||
}
|
||||
reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP);
|
||||
return REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
|
||||
}
|
||||
|
||||
static void pc_update_rp_values(struct xe_guc_pc *pc)
|
||||
static u32 tgl_get_rpe_freq(struct xe_guc_pc *pc)
|
||||
{
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
|
||||
if (GRAPHICS_VERx100(xe) >= 1270) {
|
||||
mtl_update_rpa_value(pc);
|
||||
mtl_update_rpe_value(pc);
|
||||
} else {
|
||||
tgl_update_rpa_value(pc);
|
||||
tgl_update_rpe_value(pc);
|
||||
}
|
||||
u32 reg;
|
||||
|
||||
/*
|
||||
* RPe is decided at runtime by PCODE. In the rare case where that's
|
||||
* smaller than the fused min, we will trust the PCODE and use that
|
||||
* as our minimum one.
|
||||
* For other platforms than PVC, we get the resolved RPe directly from
|
||||
* PCODE at a different register
|
||||
*/
|
||||
pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
|
||||
reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC);
|
||||
return REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -548,9 +536,15 @@ u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
|
||||
*/
|
||||
u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
|
||||
{
|
||||
pc_update_rp_values(pc);
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
|
||||
return pc->rpa_freq;
|
||||
if (GRAPHICS_VERx100(xe) == 1260)
|
||||
return pvc_get_rpa_freq(pc);
|
||||
else if (GRAPHICS_VERx100(xe) >= 1270)
|
||||
return mtl_get_rpa_freq(pc);
|
||||
else
|
||||
return tgl_get_rpa_freq(pc);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -561,9 +555,17 @@ u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
|
||||
*/
|
||||
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
|
||||
{
|
||||
pc_update_rp_values(pc);
|
||||
struct xe_device *xe = pc_to_xe(pc);
|
||||
u32 freq;
|
||||
|
||||
return pc->rpe_freq;
|
||||
if (GRAPHICS_VERx100(xe) == 1260)
|
||||
freq = pvc_get_rpe_freq(pc);
|
||||
else if (GRAPHICS_VERx100(xe) >= 1270)
|
||||
freq = mtl_get_rpe_freq(pc);
|
||||
else
|
||||
freq = tgl_get_rpe_freq(pc);
|
||||
|
||||
return freq;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1022,7 +1024,7 @@ static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
|
||||
/*
|
||||
* Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
|
||||
*/
|
||||
ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
|
||||
ret = pc_set_min_freq(pc, min(xe_guc_pc_get_rpe_freq(pc), pc_max_freq_cap(pc)));
|
||||
if (!ret)
|
||||
ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
|
||||
|
||||
@@ -1133,8 +1135,6 @@ static int pc_init_freqs(struct xe_guc_pc *pc)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
pc_update_rp_values(pc);
|
||||
|
||||
pc_init_pcode_freq(pc);
|
||||
|
||||
/*
|
||||
@@ -1340,7 +1340,7 @@ static void xe_guc_pc_fini_hw(void *arg)
|
||||
XE_WARN_ON(xe_guc_pc_stop(pc));
|
||||
|
||||
/* Bind requested freq to mert_freq_cap before unload */
|
||||
pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
|
||||
pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), xe_guc_pc_get_rpe_freq(pc)));
|
||||
|
||||
xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref);
|
||||
}
|
||||
|
||||
@@ -19,10 +19,6 @@ struct xe_guc_pc {
|
||||
atomic_t flush_freq_limit;
|
||||
/** @rp0_freq: HW RP0 frequency - The Maximum one */
|
||||
u32 rp0_freq;
|
||||
/** @rpa_freq: HW RPa frequency - The Achievable one */
|
||||
u32 rpa_freq;
|
||||
/** @rpe_freq: HW RPe frequency - The Efficient one */
|
||||
u32 rpe_freq;
|
||||
/** @rpn_freq: HW RPN frequency - The Minimum one */
|
||||
u32 rpn_freq;
|
||||
/** @user_requested_min: Stash the minimum requested freq by user */
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include "xe_lrc.h"
|
||||
#include "xe_map.h"
|
||||
#include "xe_mocs.h"
|
||||
#include "xe_printk.h"
|
||||
#include "xe_pt.h"
|
||||
#include "xe_res_cursor.h"
|
||||
#include "xe_sa.h"
|
||||
@@ -1210,6 +1211,128 @@ struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
|
||||
return migrate->q;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_migrate_vram_copy_chunk() - Copy a chunk of a VRAM buffer object.
|
||||
* @vram_bo: The VRAM buffer object.
|
||||
* @vram_offset: The VRAM offset.
|
||||
* @sysmem_bo: The sysmem buffer object.
|
||||
* @sysmem_offset: The sysmem offset.
|
||||
* @size: The size of VRAM chunk to copy.
|
||||
* @dir: The direction of the copy operation.
|
||||
*
|
||||
* Copies a portion of a buffer object between VRAM and system memory.
|
||||
* On Xe2 platforms that support flat CCS, VRAM data is decompressed when
|
||||
* copying to system memory.
|
||||
*
|
||||
* Return: Pointer to a dma_fence representing the last copy batch, or
|
||||
* an error pointer on failure. If there is a failure, any copy operation
|
||||
* started by the function call has been synced.
|
||||
*/
|
||||
struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
|
||||
struct xe_bo *sysmem_bo, u64 sysmem_offset,
|
||||
u64 size, enum xe_migrate_copy_dir dir)
|
||||
{
|
||||
struct xe_device *xe = xe_bo_device(vram_bo);
|
||||
struct xe_tile *tile = vram_bo->tile;
|
||||
struct xe_gt *gt = tile->primary_gt;
|
||||
struct xe_migrate *m = tile->migrate;
|
||||
struct dma_fence *fence = NULL;
|
||||
struct ttm_resource *vram = vram_bo->ttm.resource;
|
||||
struct ttm_resource *sysmem = sysmem_bo->ttm.resource;
|
||||
struct xe_res_cursor vram_it, sysmem_it;
|
||||
u64 vram_L0_ofs, sysmem_L0_ofs;
|
||||
u32 vram_L0_pt, sysmem_L0_pt;
|
||||
u64 vram_L0, sysmem_L0;
|
||||
bool to_sysmem = (dir == XE_MIGRATE_COPY_TO_SRAM);
|
||||
bool use_comp_pat = to_sysmem &&
|
||||
GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe);
|
||||
int pass = 0;
|
||||
int err;
|
||||
|
||||
xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE));
|
||||
xe_assert(xe, xe_bo_is_vram(vram_bo));
|
||||
xe_assert(xe, !xe_bo_is_vram(sysmem_bo));
|
||||
xe_assert(xe, !range_overflows(vram_offset, size, (u64)vram_bo->ttm.base.size));
|
||||
xe_assert(xe, !range_overflows(sysmem_offset, size, (u64)sysmem_bo->ttm.base.size));
|
||||
|
||||
xe_res_first(vram, vram_offset, size, &vram_it);
|
||||
xe_res_first_sg(xe_bo_sg(sysmem_bo), sysmem_offset, size, &sysmem_it);
|
||||
|
||||
while (size) {
|
||||
u32 pte_flags = PTE_UPDATE_FLAG_IS_VRAM;
|
||||
u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
|
||||
struct xe_sched_job *job;
|
||||
struct xe_bb *bb;
|
||||
u32 update_idx;
|
||||
bool usm = xe->info.has_usm;
|
||||
u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
|
||||
|
||||
sysmem_L0 = xe_migrate_res_sizes(m, &sysmem_it);
|
||||
vram_L0 = min(xe_migrate_res_sizes(m, &vram_it), sysmem_L0);
|
||||
|
||||
xe_dbg(xe, "Pass %u, size: %llu\n", pass++, vram_L0);
|
||||
|
||||
pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
|
||||
batch_size += pte_update_size(m, pte_flags, vram, &vram_it, &vram_L0,
|
||||
&vram_L0_ofs, &vram_L0_pt, 0, 0, avail_pts);
|
||||
|
||||
batch_size += pte_update_size(m, 0, sysmem, &sysmem_it, &vram_L0, &sysmem_L0_ofs,
|
||||
&sysmem_L0_pt, 0, avail_pts, avail_pts);
|
||||
batch_size += EMIT_COPY_DW;
|
||||
|
||||
bb = xe_bb_new(gt, batch_size, usm);
|
||||
if (IS_ERR(bb)) {
|
||||
err = PTR_ERR(bb);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
if (xe_migrate_allow_identity(vram_L0, &vram_it))
|
||||
xe_res_next(&vram_it, vram_L0);
|
||||
else
|
||||
emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram);
|
||||
|
||||
emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem);
|
||||
|
||||
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
|
||||
update_idx = bb->len;
|
||||
|
||||
if (to_sysmem)
|
||||
emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE);
|
||||
else
|
||||
emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE);
|
||||
|
||||
job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm),
|
||||
update_idx);
|
||||
if (IS_ERR(job)) {
|
||||
xe_bb_free(bb, NULL);
|
||||
err = PTR_ERR(job);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
|
||||
|
||||
xe_assert(xe, dma_resv_test_signaled(vram_bo->ttm.base.resv,
|
||||
DMA_RESV_USAGE_BOOKKEEP));
|
||||
xe_assert(xe, dma_resv_test_signaled(sysmem_bo->ttm.base.resv,
|
||||
DMA_RESV_USAGE_BOOKKEEP));
|
||||
|
||||
scoped_guard(mutex, &m->job_mutex) {
|
||||
xe_sched_job_arm(job);
|
||||
dma_fence_put(fence);
|
||||
fence = dma_fence_get(&job->drm.s_fence->finished);
|
||||
xe_sched_job_push(job);
|
||||
|
||||
dma_fence_put(m->fence);
|
||||
m->fence = dma_fence_get(fence);
|
||||
}
|
||||
|
||||
xe_bb_free(bb, fence);
|
||||
size -= vram_L0;
|
||||
}
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
|
||||
u32 size, u32 pitch)
|
||||
{
|
||||
@@ -1912,11 +2035,6 @@ static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr,
|
||||
return true;
|
||||
}
|
||||
|
||||
enum xe_migrate_copy_dir {
|
||||
XE_MIGRATE_COPY_TO_VRAM,
|
||||
XE_MIGRATE_COPY_TO_SRAM,
|
||||
};
|
||||
|
||||
#define XE_CACHELINE_BYTES 64ull
|
||||
#define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1)
|
||||
|
||||
|
||||
@@ -28,6 +28,11 @@ struct xe_vma;
|
||||
|
||||
enum xe_sriov_vf_ccs_rw_ctxs;
|
||||
|
||||
enum xe_migrate_copy_dir {
|
||||
XE_MIGRATE_COPY_TO_VRAM,
|
||||
XE_MIGRATE_COPY_TO_SRAM,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xe_migrate_pt_update_ops - Callbacks for the
|
||||
* xe_migrate_update_pgtables() function.
|
||||
@@ -131,6 +136,9 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
|
||||
|
||||
struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate);
|
||||
struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate);
|
||||
struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
|
||||
struct xe_bo *sysmem_bo, u64 sysmem_offset,
|
||||
u64 size, enum xe_migrate_copy_dir dir);
|
||||
int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
|
||||
unsigned long offset, void *buf, int len,
|
||||
int write);
|
||||
|
||||
@@ -870,7 +870,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
|
||||
|
||||
xe_oa_free_oa_buffer(stream);
|
||||
|
||||
xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
||||
xe_force_wake_put(gt_to_fw(gt), stream->fw_ref);
|
||||
xe_pm_runtime_put(stream->oa->xe);
|
||||
|
||||
/* Wa_1509372804:pvc: Unset the override of GUCRC mode to enable rc6 */
|
||||
@@ -1717,7 +1717,6 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
|
||||
struct xe_oa_open_param *param)
|
||||
{
|
||||
struct xe_gt *gt = param->hwe->gt;
|
||||
unsigned int fw_ref;
|
||||
int ret;
|
||||
|
||||
stream->exec_q = param->exec_q;
|
||||
@@ -1772,8 +1771,8 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
|
||||
|
||||
/* Take runtime pm ref and forcewake to disable RC6 */
|
||||
xe_pm_runtime_get(stream->oa->xe);
|
||||
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
||||
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
|
||||
stream->fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
||||
if (!xe_force_wake_ref_has_domain(stream->fw_ref, XE_FORCEWAKE_ALL)) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto err_fw_put;
|
||||
}
|
||||
@@ -1818,7 +1817,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
|
||||
err_free_oa_buf:
|
||||
xe_oa_free_oa_buffer(stream);
|
||||
err_fw_put:
|
||||
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
||||
xe_force_wake_put(gt_to_fw(gt), stream->fw_ref);
|
||||
xe_pm_runtime_put(stream->oa->xe);
|
||||
if (stream->override_gucrc)
|
||||
xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(>->uc.guc.pc));
|
||||
|
||||
@@ -264,5 +264,8 @@ struct xe_oa_stream {
|
||||
|
||||
/** @syncs: syncs to wait on and to signal */
|
||||
struct xe_sync_entry *syncs;
|
||||
|
||||
/** @fw_ref: Forcewake reference */
|
||||
unsigned int fw_ref;
|
||||
};
|
||||
#endif
|
||||
|
||||
@@ -333,7 +333,7 @@ static const struct xe_device_desc mtl_desc = {
|
||||
.has_pxp = true,
|
||||
.max_gt_per_tile = 2,
|
||||
.va_bits = 48,
|
||||
.vm_max_level = 4,
|
||||
.vm_max_level = 3,
|
||||
};
|
||||
|
||||
static const struct xe_device_desc lnl_desc = {
|
||||
|
||||
@@ -94,6 +94,20 @@ static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs)
|
||||
return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes));
|
||||
}
|
||||
|
||||
static int pf_prepare_vfs_enabling(struct xe_device *xe)
|
||||
{
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
/* make sure we are not locked-down by other components */
|
||||
return xe_sriov_pf_arm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, false, NULL);
|
||||
}
|
||||
|
||||
static void pf_finish_vfs_enabling(struct xe_device *xe)
|
||||
{
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
/* allow other components to lockdown VFs enabling */
|
||||
xe_sriov_pf_disarm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, false, NULL);
|
||||
}
|
||||
|
||||
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
|
||||
@@ -109,6 +123,10 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = pf_prepare_vfs_enabling(xe);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We must hold additional reference to the runtime PM to keep PF in D0
|
||||
* during VFs lifetime, as our VFs do not implement the PM capability.
|
||||
@@ -148,6 +166,7 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
|
||||
failed:
|
||||
xe_sriov_pf_unprovision_vfs(xe, num_vfs);
|
||||
xe_pm_runtime_put(xe);
|
||||
pf_finish_vfs_enabling(xe);
|
||||
out:
|
||||
xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n",
|
||||
num_vfs, str_plural(num_vfs), ERR_PTR(err));
|
||||
@@ -179,6 +198,8 @@ static int pf_disable_vfs(struct xe_device *xe)
|
||||
/* not needed anymore - see pf_enable_vfs() */
|
||||
xe_pm_runtime_put(xe);
|
||||
|
||||
pf_finish_vfs_enabling(xe);
|
||||
|
||||
xe_sriov_info(xe, "Disabled %u VF%s\n", num_vfs, str_plural(num_vfs));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -32,27 +32,39 @@
|
||||
|
||||
static int pcode_mailbox_status(struct xe_tile *tile)
|
||||
{
|
||||
const char *err_str;
|
||||
int err_decode;
|
||||
u32 err;
|
||||
static const struct pcode_err_decode err_decode[] = {
|
||||
[PCODE_ILLEGAL_CMD] = {-ENXIO, "Illegal Command"},
|
||||
[PCODE_TIMEOUT] = {-ETIMEDOUT, "Timed out"},
|
||||
[PCODE_ILLEGAL_DATA] = {-EINVAL, "Illegal Data"},
|
||||
[PCODE_ILLEGAL_SUBCOMMAND] = {-ENXIO, "Illegal Subcommand"},
|
||||
[PCODE_LOCKED] = {-EBUSY, "PCODE Locked"},
|
||||
[PCODE_GT_RATIO_OUT_OF_RANGE] = {-EOVERFLOW,
|
||||
"GT ratio out of range"},
|
||||
[PCODE_REJECTED] = {-EACCES, "PCODE Rejected"},
|
||||
[PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
|
||||
};
|
||||
|
||||
#define CASE_ERR(_err, _err_decode, _err_str) \
|
||||
case _err: \
|
||||
err_decode = _err_decode; \
|
||||
err_str = _err_str; \
|
||||
break
|
||||
|
||||
err = xe_mmio_read32(&tile->mmio, PCODE_MAILBOX) & PCODE_ERROR_MASK;
|
||||
switch (err) {
|
||||
CASE_ERR(PCODE_ILLEGAL_CMD, -ENXIO, "Illegal Command");
|
||||
CASE_ERR(PCODE_TIMEOUT, -ETIMEDOUT, "Timed out");
|
||||
CASE_ERR(PCODE_ILLEGAL_DATA, -EINVAL, "Illegal Data");
|
||||
CASE_ERR(PCODE_ILLEGAL_SUBCOMMAND, -ENXIO, "Illegal Subcommand");
|
||||
CASE_ERR(PCODE_LOCKED, -EBUSY, "PCODE Locked");
|
||||
CASE_ERR(PCODE_GT_RATIO_OUT_OF_RANGE, -EOVERFLOW, "GT ratio out of range");
|
||||
CASE_ERR(PCODE_REJECTED, -EACCES, "PCODE Rejected");
|
||||
default:
|
||||
err_decode = -EPROTO;
|
||||
err_str = "Unknown";
|
||||
}
|
||||
|
||||
if (err) {
|
||||
drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", err,
|
||||
err_decode[err].str ?: "Unknown");
|
||||
return err_decode[err].errno ?: -EPROTO;
|
||||
drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s",
|
||||
err_decode, err_str);
|
||||
|
||||
return err_decode;
|
||||
}
|
||||
|
||||
return 0;
|
||||
#undef CASE_ERR
|
||||
}
|
||||
|
||||
static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
|
||||
|
||||
@@ -92,9 +92,3 @@
|
||||
#define BMG_PCIE_CAP XE_REG(0x138340)
|
||||
#define LINK_DOWNGRADE REG_GENMASK(1, 0)
|
||||
#define DOWNGRADE_CAPABLE 2
|
||||
|
||||
struct pcode_err_decode {
|
||||
int errno;
|
||||
const char *str;
|
||||
};
|
||||
|
||||
|
||||
@@ -112,7 +112,7 @@ void xe_pm_might_block_on_suspend(void)
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_pm_might_block_on_suspend() - Block pending suspend.
|
||||
* xe_pm_block_on_suspend() - Block pending suspend.
|
||||
* @xe: The xe device about to be suspended.
|
||||
*
|
||||
* Block if the pm notifier has start evicting bos, to avoid
|
||||
|
||||
@@ -110,6 +110,10 @@ struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size,
|
||||
return drm_suballoc_new(&sa_manager->base, size, gfp, true, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sa_bo_flush_write() - Copy the data from the sub-allocation to the GPU memory.
|
||||
* @sa_bo: the &drm_suballoc to flush
|
||||
*/
|
||||
void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo)
|
||||
{
|
||||
struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
|
||||
@@ -123,6 +127,23 @@ void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo)
|
||||
drm_suballoc_size(sa_bo));
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sa_bo_sync_read() - Copy the data from GPU memory to the sub-allocation.
|
||||
* @sa_bo: the &drm_suballoc to sync
|
||||
*/
|
||||
void xe_sa_bo_sync_read(struct drm_suballoc *sa_bo)
|
||||
{
|
||||
struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
|
||||
struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
|
||||
|
||||
if (!sa_manager->bo->vmap.is_iomem)
|
||||
return;
|
||||
|
||||
xe_map_memcpy_from(xe, xe_sa_bo_cpu_addr(sa_bo), &sa_manager->bo->vmap,
|
||||
drm_suballoc_soffset(sa_bo),
|
||||
drm_suballoc_size(sa_bo));
|
||||
}
|
||||
|
||||
void xe_sa_bo_free(struct drm_suballoc *sa_bo,
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
|
||||
@@ -37,6 +37,7 @@ static inline struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager
|
||||
}
|
||||
|
||||
void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo);
|
||||
void xe_sa_bo_sync_read(struct drm_suballoc *sa_bo);
|
||||
void xe_sa_bo_free(struct drm_suballoc *sa_bo, struct dma_fence *fence);
|
||||
|
||||
static inline struct xe_sa_manager *
|
||||
|
||||
520
drivers/gpu/drm/xe/xe_sriov_packet.c
Normal file
520
drivers/gpu/drm/xe/xe_sriov_packet.c
Normal file
@@ -0,0 +1,520 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "xe_bo.h"
|
||||
#include "xe_device.h"
|
||||
#include "xe_guc_klv_helpers.h"
|
||||
#include "xe_printk.h"
|
||||
#include "xe_sriov_packet.h"
|
||||
#include "xe_sriov_packet_types.h"
|
||||
#include "xe_sriov_pf_helpers.h"
|
||||
#include "xe_sriov_pf_migration.h"
|
||||
#include "xe_sriov_printk.h"
|
||||
|
||||
static struct mutex *pf_migration_mutex(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe));
|
||||
|
||||
return &xe->sriov.pf.vfs[vfid].migration.lock;
|
||||
}
|
||||
|
||||
static struct xe_sriov_packet **pf_pick_pending(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe));
|
||||
lockdep_assert_held(pf_migration_mutex(xe, vfid));
|
||||
|
||||
return &xe->sriov.pf.vfs[vfid].migration.pending;
|
||||
}
|
||||
|
||||
static struct xe_sriov_packet **
|
||||
pf_pick_descriptor(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe));
|
||||
lockdep_assert_held(pf_migration_mutex(xe, vfid));
|
||||
|
||||
return &xe->sriov.pf.vfs[vfid].migration.descriptor;
|
||||
}
|
||||
|
||||
static struct xe_sriov_packet **pf_pick_trailer(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe));
|
||||
lockdep_assert_held(pf_migration_mutex(xe, vfid));
|
||||
|
||||
return &xe->sriov.pf.vfs[vfid].migration.trailer;
|
||||
}
|
||||
|
||||
static struct xe_sriov_packet **pf_pick_read_packet(struct xe_device *xe,
|
||||
unsigned int vfid)
|
||||
{
|
||||
struct xe_sriov_packet **data;
|
||||
|
||||
data = pf_pick_descriptor(xe, vfid);
|
||||
if (*data)
|
||||
return data;
|
||||
|
||||
data = pf_pick_pending(xe, vfid);
|
||||
if (!*data)
|
||||
*data = xe_sriov_pf_migration_save_consume(xe, vfid);
|
||||
if (*data)
|
||||
return data;
|
||||
|
||||
data = pf_pick_trailer(xe, vfid);
|
||||
if (*data)
|
||||
return data;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool pkt_needs_bo(struct xe_sriov_packet *data)
|
||||
{
|
||||
return data->hdr.type == XE_SRIOV_PACKET_TYPE_VRAM;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_packet_alloc() - Allocate migration data packet
|
||||
* @xe: the &xe_device
|
||||
*
|
||||
* Only allocates the "outer" structure, without initializing the migration
|
||||
* data backing storage.
|
||||
*
|
||||
* Return: Pointer to &xe_sriov_packet on success,
|
||||
* NULL in case of error.
|
||||
*/
|
||||
struct xe_sriov_packet *xe_sriov_packet_alloc(struct xe_device *xe)
|
||||
{
|
||||
struct xe_sriov_packet *data;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return NULL;
|
||||
|
||||
data->xe = xe;
|
||||
data->hdr_remaining = sizeof(data->hdr);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_packet_free() - Free migration data packet.
|
||||
* @data: the &xe_sriov_packet
|
||||
*/
|
||||
void xe_sriov_packet_free(struct xe_sriov_packet *data)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(data))
|
||||
return;
|
||||
|
||||
if (pkt_needs_bo(data))
|
||||
xe_bo_unpin_map_no_vm(data->bo);
|
||||
else
|
||||
kvfree(data->buff);
|
||||
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static int pkt_init(struct xe_sriov_packet *data)
|
||||
{
|
||||
struct xe_gt *gt = xe_device_get_gt(data->xe, data->hdr.gt_id);
|
||||
|
||||
if (!gt)
|
||||
return -EINVAL;
|
||||
|
||||
if (data->hdr.size == 0)
|
||||
return 0;
|
||||
|
||||
if (pkt_needs_bo(data)) {
|
||||
struct xe_bo *bo;
|
||||
|
||||
bo = xe_bo_create_pin_map_novm(data->xe, gt->tile, PAGE_ALIGN(data->hdr.size),
|
||||
ttm_bo_type_kernel,
|
||||
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED, false);
|
||||
if (IS_ERR(bo))
|
||||
return PTR_ERR(bo);
|
||||
|
||||
data->bo = bo;
|
||||
data->vaddr = bo->vmap.vaddr;
|
||||
} else {
|
||||
void *buff = kvzalloc(data->hdr.size, GFP_KERNEL);
|
||||
|
||||
if (!buff)
|
||||
return -ENOMEM;
|
||||
|
||||
data->buff = buff;
|
||||
data->vaddr = buff;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define XE_SRIOV_PACKET_SUPPORTED_VERSION 1
|
||||
|
||||
/**
|
||||
* xe_sriov_packet_init() - Initialize migration packet header and backing storage.
|
||||
* @data: the &xe_sriov_packet
|
||||
* @tile_id: tile identifier
|
||||
* @gt_id: GT identifier
|
||||
* @type: &xe_sriov_packet_type
|
||||
* @offset: offset of data packet payload (within wider resource)
|
||||
* @size: size of data packet payload
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_packet_init(struct xe_sriov_packet *data, u8 tile_id, u8 gt_id,
|
||||
enum xe_sriov_packet_type type, loff_t offset, size_t size)
|
||||
{
|
||||
data->hdr.version = XE_SRIOV_PACKET_SUPPORTED_VERSION;
|
||||
data->hdr.type = type;
|
||||
data->hdr.tile_id = tile_id;
|
||||
data->hdr.gt_id = gt_id;
|
||||
data->hdr.offset = offset;
|
||||
data->hdr.size = size;
|
||||
data->remaining = size;
|
||||
|
||||
return pkt_init(data);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_packet_init_from_hdr() - Initialize migration packet backing storage based on header.
|
||||
* @data: the &xe_sriov_packet
|
||||
*
|
||||
* Header data is expected to be filled prior to calling this function.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_packet_init_from_hdr(struct xe_sriov_packet *data)
|
||||
{
|
||||
xe_assert(data->xe, !data->hdr_remaining);
|
||||
|
||||
if (data->hdr.version != XE_SRIOV_PACKET_SUPPORTED_VERSION)
|
||||
return -EINVAL;
|
||||
|
||||
data->remaining = data->hdr.size;
|
||||
|
||||
return pkt_init(data);
|
||||
}
|
||||
|
||||
static ssize_t pkt_hdr_read(struct xe_sriov_packet *data,
|
||||
char __user *buf, size_t len)
|
||||
{
|
||||
loff_t offset = sizeof(data->hdr) - data->hdr_remaining;
|
||||
|
||||
if (!data->hdr_remaining)
|
||||
return -EINVAL;
|
||||
|
||||
if (len > data->hdr_remaining)
|
||||
len = data->hdr_remaining;
|
||||
|
||||
if (copy_to_user(buf, (void *)&data->hdr + offset, len))
|
||||
return -EFAULT;
|
||||
|
||||
data->hdr_remaining -= len;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t pkt_data_read(struct xe_sriov_packet *data,
|
||||
char __user *buf, size_t len)
|
||||
{
|
||||
if (len > data->remaining)
|
||||
len = data->remaining;
|
||||
|
||||
if (copy_to_user(buf, data->vaddr + (data->hdr.size - data->remaining), len))
|
||||
return -EFAULT;
|
||||
|
||||
data->remaining -= len;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t pkt_read_single(struct xe_sriov_packet **data,
|
||||
unsigned int vfid, char __user *buf, size_t len)
|
||||
{
|
||||
ssize_t copied = 0;
|
||||
|
||||
if ((*data)->hdr_remaining)
|
||||
copied = pkt_hdr_read(*data, buf, len);
|
||||
else
|
||||
copied = pkt_data_read(*data, buf, len);
|
||||
|
||||
if ((*data)->remaining == 0 && (*data)->hdr_remaining == 0) {
|
||||
xe_sriov_packet_free(*data);
|
||||
*data = NULL;
|
||||
}
|
||||
|
||||
return copied;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_packet_read_single() - Read migration data from a single packet.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
* @buf: start address of userspace buffer
|
||||
* @len: requested read size from userspace
|
||||
*
|
||||
* Return: number of bytes that has been successfully read,
|
||||
* 0 if no more migration data is available,
|
||||
* -errno on failure.
|
||||
*/
|
||||
ssize_t xe_sriov_packet_read_single(struct xe_device *xe, unsigned int vfid,
|
||||
char __user *buf, size_t len)
|
||||
{
|
||||
struct xe_sriov_packet **data = pf_pick_read_packet(xe, vfid);
|
||||
|
||||
if (!data)
|
||||
return -ENODATA;
|
||||
if (IS_ERR(*data))
|
||||
return PTR_ERR(*data);
|
||||
|
||||
return pkt_read_single(data, vfid, buf, len);
|
||||
}
|
||||
|
||||
static ssize_t pkt_hdr_write(struct xe_sriov_packet *data,
|
||||
const char __user *buf, size_t len)
|
||||
{
|
||||
loff_t offset = sizeof(data->hdr) - data->hdr_remaining;
|
||||
int ret;
|
||||
|
||||
if (len > data->hdr_remaining)
|
||||
len = data->hdr_remaining;
|
||||
|
||||
if (copy_from_user((void *)&data->hdr + offset, buf, len))
|
||||
return -EFAULT;
|
||||
|
||||
data->hdr_remaining -= len;
|
||||
|
||||
if (!data->hdr_remaining) {
|
||||
ret = xe_sriov_packet_init_from_hdr(data);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t pkt_data_write(struct xe_sriov_packet *data,
|
||||
const char __user *buf, size_t len)
|
||||
{
|
||||
if (len > data->remaining)
|
||||
len = data->remaining;
|
||||
|
||||
if (copy_from_user(data->vaddr + (data->hdr.size - data->remaining), buf, len))
|
||||
return -EFAULT;
|
||||
|
||||
data->remaining -= len;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_packet_write_single() - Write migration data to a single packet.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
* @buf: start address of userspace buffer
|
||||
* @len: requested write size from userspace
|
||||
*
|
||||
* Return: number of bytes that has been successfully written,
|
||||
* -errno on failure.
|
||||
*/
|
||||
ssize_t xe_sriov_packet_write_single(struct xe_device *xe, unsigned int vfid,
|
||||
const char __user *buf, size_t len)
|
||||
{
|
||||
struct xe_sriov_packet **data = pf_pick_pending(xe, vfid);
|
||||
int ret;
|
||||
ssize_t copied;
|
||||
|
||||
if (IS_ERR_OR_NULL(*data)) {
|
||||
*data = xe_sriov_packet_alloc(xe);
|
||||
if (!*data)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if ((*data)->hdr_remaining)
|
||||
copied = pkt_hdr_write(*data, buf, len);
|
||||
else
|
||||
copied = pkt_data_write(*data, buf, len);
|
||||
|
||||
if ((*data)->hdr_remaining == 0 && (*data)->remaining == 0) {
|
||||
ret = xe_sriov_pf_migration_restore_produce(xe, vfid, *data);
|
||||
if (ret) {
|
||||
xe_sriov_packet_free(*data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*data = NULL;
|
||||
}
|
||||
|
||||
return copied;
|
||||
}
|
||||
|
||||
#define MIGRATION_KLV_DEVICE_DEVID_KEY 0xf001u
|
||||
#define MIGRATION_KLV_DEVICE_DEVID_LEN 1u
|
||||
#define MIGRATION_KLV_DEVICE_REVID_KEY 0xf002u
|
||||
#define MIGRATION_KLV_DEVICE_REVID_LEN 1u
|
||||
|
||||
#define MIGRATION_DESCRIPTOR_DWORDS (GUC_KLV_LEN_MIN + MIGRATION_KLV_DEVICE_DEVID_LEN + \
|
||||
GUC_KLV_LEN_MIN + MIGRATION_KLV_DEVICE_REVID_LEN)
|
||||
static size_t pf_descriptor_init(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
struct xe_sriov_packet **desc = pf_pick_descriptor(xe, vfid);
|
||||
struct xe_sriov_packet *data;
|
||||
unsigned int len = 0;
|
||||
u32 *klvs;
|
||||
int ret;
|
||||
|
||||
data = xe_sriov_packet_alloc(xe);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = xe_sriov_packet_init(data, 0, 0, XE_SRIOV_PACKET_TYPE_DESCRIPTOR,
|
||||
0, MIGRATION_DESCRIPTOR_DWORDS * sizeof(u32));
|
||||
if (ret) {
|
||||
xe_sriov_packet_free(data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
klvs = data->vaddr;
|
||||
klvs[len++] = PREP_GUC_KLV_CONST(MIGRATION_KLV_DEVICE_DEVID_KEY,
|
||||
MIGRATION_KLV_DEVICE_DEVID_LEN);
|
||||
klvs[len++] = xe->info.devid;
|
||||
klvs[len++] = PREP_GUC_KLV_CONST(MIGRATION_KLV_DEVICE_REVID_KEY,
|
||||
MIGRATION_KLV_DEVICE_REVID_LEN);
|
||||
klvs[len++] = xe->info.revid;
|
||||
|
||||
xe_assert(xe, len == MIGRATION_DESCRIPTOR_DWORDS);
|
||||
|
||||
*desc = data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_packet_process_descriptor() - Process migration data descriptor packet.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
* @data: the &xe_sriov_packet containing the descriptor
|
||||
*
|
||||
* The descriptor uses the same KLV format as GuC, and contains metadata used for
|
||||
* checking migration data compatibility.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure.
|
||||
*/
|
||||
int xe_sriov_packet_process_descriptor(struct xe_device *xe, unsigned int vfid,
|
||||
struct xe_sriov_packet *data)
|
||||
{
|
||||
u32 num_dwords = data->hdr.size / sizeof(u32);
|
||||
u32 *klvs = data->vaddr;
|
||||
|
||||
xe_assert(xe, data->hdr.type == XE_SRIOV_PACKET_TYPE_DESCRIPTOR);
|
||||
|
||||
if (data->hdr.size % sizeof(u32)) {
|
||||
xe_sriov_warn(xe, "Aborting migration, descriptor not in KLV format (size=%llu)\n",
|
||||
data->hdr.size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
while (num_dwords >= GUC_KLV_LEN_MIN) {
|
||||
u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
|
||||
u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
|
||||
|
||||
klvs += GUC_KLV_LEN_MIN;
|
||||
num_dwords -= GUC_KLV_LEN_MIN;
|
||||
|
||||
if (len > num_dwords) {
|
||||
xe_sriov_warn(xe, "Aborting migration, truncated KLV %#x, len %u\n",
|
||||
key, len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (key) {
|
||||
case MIGRATION_KLV_DEVICE_DEVID_KEY:
|
||||
if (*klvs != xe->info.devid) {
|
||||
xe_sriov_warn(xe,
|
||||
"Aborting migration, devid mismatch %#06x!=%#06x\n",
|
||||
*klvs, xe->info.devid);
|
||||
return -ENODEV;
|
||||
}
|
||||
break;
|
||||
case MIGRATION_KLV_DEVICE_REVID_KEY:
|
||||
if (*klvs != xe->info.revid) {
|
||||
xe_sriov_warn(xe,
|
||||
"Aborting migration, revid mismatch %#06x!=%#06x\n",
|
||||
*klvs, xe->info.revid);
|
||||
return -ENODEV;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
xe_sriov_dbg(xe,
|
||||
"Skipping unknown migration KLV %#x, len=%u\n",
|
||||
key, len);
|
||||
print_hex_dump_bytes("desc: ", DUMP_PREFIX_OFFSET, klvs,
|
||||
min(SZ_64, len * sizeof(u32)));
|
||||
break;
|
||||
}
|
||||
|
||||
klvs += len;
|
||||
num_dwords -= len;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pf_pending_init(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
struct xe_sriov_packet **data = pf_pick_pending(xe, vfid);
|
||||
|
||||
*data = NULL;
|
||||
}
|
||||
|
||||
#define MIGRATION_TRAILER_SIZE 0
|
||||
static int pf_trailer_init(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
struct xe_sriov_packet **trailer = pf_pick_trailer(xe, vfid);
|
||||
struct xe_sriov_packet *data;
|
||||
int ret;
|
||||
|
||||
data = xe_sriov_packet_alloc(xe);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = xe_sriov_packet_init(data, 0, 0, XE_SRIOV_PACKET_TYPE_TRAILER,
|
||||
0, MIGRATION_TRAILER_SIZE);
|
||||
if (ret) {
|
||||
xe_sriov_packet_free(data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*trailer = data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_packet_save_init() - Initialize the pending save migration packets.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* Return: 0 on success, -errno on failure.
|
||||
*/
|
||||
int xe_sriov_packet_save_init(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
int ret;
|
||||
|
||||
scoped_cond_guard(mutex_intr, return -EINTR, pf_migration_mutex(xe, vfid)) {
|
||||
ret = pf_descriptor_init(xe, vfid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pf_trailer_init(xe, vfid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pf_pending_init(xe, vfid);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
30
drivers/gpu/drm/xe/xe_sriov_packet.h
Normal file
30
drivers/gpu/drm/xe/xe_sriov_packet.h
Normal file
@@ -0,0 +1,30 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_SRIOV_PACKET_H_
|
||||
#define _XE_SRIOV_PACKET_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct xe_device;
|
||||
struct xe_sriov_packet;
|
||||
enum xe_sriov_packet_type;
|
||||
|
||||
struct xe_sriov_packet *xe_sriov_packet_alloc(struct xe_device *xe);
|
||||
void xe_sriov_packet_free(struct xe_sriov_packet *data);
|
||||
|
||||
int xe_sriov_packet_init(struct xe_sriov_packet *data, u8 tile_id, u8 gt_id,
|
||||
enum xe_sriov_packet_type, loff_t offset, size_t size);
|
||||
int xe_sriov_packet_init_from_hdr(struct xe_sriov_packet *data);
|
||||
|
||||
ssize_t xe_sriov_packet_read_single(struct xe_device *xe, unsigned int vfid,
|
||||
char __user *buf, size_t len);
|
||||
ssize_t xe_sriov_packet_write_single(struct xe_device *xe, unsigned int vfid,
|
||||
const char __user *buf, size_t len);
|
||||
int xe_sriov_packet_save_init(struct xe_device *xe, unsigned int vfid);
|
||||
int xe_sriov_packet_process_descriptor(struct xe_device *xe, unsigned int vfid,
|
||||
struct xe_sriov_packet *data);
|
||||
|
||||
#endif
|
||||
75
drivers/gpu/drm/xe/xe_sriov_packet_types.h
Normal file
75
drivers/gpu/drm/xe/xe_sriov_packet_types.h
Normal file
@@ -0,0 +1,75 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_SRIOV_PACKET_TYPES_H_
|
||||
#define _XE_SRIOV_PACKET_TYPES_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/**
|
||||
* enum xe_sriov_packet_type - Xe SR-IOV VF migration data packet type
|
||||
* @XE_SRIOV_PACKET_TYPE_DESCRIPTOR: Descriptor with VF device metadata
|
||||
* @XE_SRIOV_PACKET_TYPE_TRAILER: Trailer indicating end-of-stream
|
||||
* @XE_SRIOV_PACKET_TYPE_GGTT: Global GTT migration data
|
||||
* @XE_SRIOV_PACKET_TYPE_MMIO: MMIO registers migration data
|
||||
* @XE_SRIOV_PACKET_TYPE_GUC: GuC firmware migration data
|
||||
* @XE_SRIOV_PACKET_TYPE_VRAM: VRAM migration data
|
||||
*/
|
||||
enum xe_sriov_packet_type {
|
||||
/* Skipping 0 to catch uninitialized data */
|
||||
XE_SRIOV_PACKET_TYPE_DESCRIPTOR = 1,
|
||||
XE_SRIOV_PACKET_TYPE_TRAILER,
|
||||
XE_SRIOV_PACKET_TYPE_GGTT,
|
||||
XE_SRIOV_PACKET_TYPE_MMIO,
|
||||
XE_SRIOV_PACKET_TYPE_GUC,
|
||||
XE_SRIOV_PACKET_TYPE_VRAM,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xe_sriov_packet_hdr - Xe SR-IOV VF migration data packet header
|
||||
*/
|
||||
struct xe_sriov_packet_hdr {
|
||||
/** @version: migration data protocol version */
|
||||
u8 version;
|
||||
/** @type: migration data type */
|
||||
u8 type;
|
||||
/** @tile_id: migration data tile id */
|
||||
u8 tile_id;
|
||||
/** @gt_id: migration data gt id */
|
||||
u8 gt_id;
|
||||
/** @flags: migration data flags */
|
||||
u32 flags;
|
||||
/**
|
||||
* @offset: offset into the resource;
|
||||
* used when multiple packets of given type are used for migration
|
||||
*/
|
||||
u64 offset;
|
||||
/** @size: migration data size */
|
||||
u64 size;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct xe_sriov_packet - Xe SR-IOV VF migration data packet
|
||||
*/
|
||||
struct xe_sriov_packet {
|
||||
/** @xe: the PF &xe_device this data packet belongs to */
|
||||
struct xe_device *xe;
|
||||
/** @vaddr: CPU pointer to payload data */
|
||||
void *vaddr;
|
||||
/** @remaining: payload data remaining */
|
||||
size_t remaining;
|
||||
/** @hdr_remaining: header data remaining */
|
||||
size_t hdr_remaining;
|
||||
union {
|
||||
/** @bo: Buffer object with migration data */
|
||||
struct xe_bo *bo;
|
||||
/** @buff: Buffer with migration data */
|
||||
void *buff;
|
||||
};
|
||||
/** @hdr: data packet header */
|
||||
struct xe_sriov_packet_hdr hdr;
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -15,6 +15,7 @@
|
||||
#include "xe_sriov.h"
|
||||
#include "xe_sriov_pf.h"
|
||||
#include "xe_sriov_pf_helpers.h"
|
||||
#include "xe_sriov_pf_migration.h"
|
||||
#include "xe_sriov_pf_service.h"
|
||||
#include "xe_sriov_pf_sysfs.h"
|
||||
#include "xe_sriov_printk.h"
|
||||
@@ -102,6 +103,12 @@ int xe_sriov_pf_init_early(struct xe_device *xe)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = xe_sriov_pf_migration_init(xe);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
xe_guard_init(&xe->sriov.pf.guard_vfs_enabling, "vfs_enabling");
|
||||
|
||||
xe_sriov_pf_service_init(xe);
|
||||
|
||||
return 0;
|
||||
@@ -162,6 +169,101 @@ int xe_sriov_pf_wait_ready(struct xe_device *xe)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_arm_guard() - Arm the guard for exclusive/lockdown mode.
|
||||
* @xe: the PF &xe_device
|
||||
* @guard: the &xe_guard to arm
|
||||
* @lockdown: arm for lockdown(true) or exclusive(false) mode
|
||||
* @who: the address of the new owner, or NULL if it's a caller
|
||||
*
|
||||
* This function can only be called on PF.
|
||||
*
|
||||
* It is a simple wrapper for xe_guard_arm() with additional debug
|
||||
* messages.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_pf_arm_guard(struct xe_device *xe, struct xe_guard *guard,
|
||||
bool lockdown, void *who)
|
||||
{
|
||||
void *new_owner = who ?: __builtin_return_address(0);
|
||||
int err;
|
||||
|
||||
err = xe_guard_arm(guard, lockdown, new_owner);
|
||||
if (err) {
|
||||
xe_sriov_dbg(xe, "%s/%s mode denied (%pe) last owner %ps\n",
|
||||
guard->name, xe_guard_mode_str(lockdown),
|
||||
ERR_PTR(err), guard->owner);
|
||||
return err;
|
||||
}
|
||||
|
||||
xe_sriov_dbg_verbose(xe, "%s/%s by %ps\n",
|
||||
guard->name, xe_guard_mode_str(lockdown),
|
||||
new_owner);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_disarm_guard() - Disarm the guard.
|
||||
* @xe: the PF &xe_device
|
||||
* @guard: the &xe_guard to disarm
|
||||
* @lockdown: disarm from lockdown(true) or exclusive(false) mode
|
||||
* @who: the address of the indirect owner, or NULL if it's a caller
|
||||
*
|
||||
* This function can only be called on PF.
|
||||
*
|
||||
* It is a simple wrapper for xe_guard_disarm() with additional debug
|
||||
* messages and xe_assert() to easily catch any illegal calls.
|
||||
*/
|
||||
void xe_sriov_pf_disarm_guard(struct xe_device *xe, struct xe_guard *guard,
|
||||
bool lockdown, void *who)
|
||||
{
|
||||
bool disarmed;
|
||||
|
||||
xe_sriov_dbg_verbose(xe, "%s/%s by %ps\n",
|
||||
guard->name, xe_guard_mode_str(lockdown),
|
||||
who ?: __builtin_return_address(0));
|
||||
|
||||
disarmed = xe_guard_disarm(guard, lockdown);
|
||||
xe_assert_msg(xe, disarmed, "%s/%s not armed? last owner %ps",
|
||||
guard->name, xe_guard_mode_str(lockdown), guard->owner);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_lockdown() - Lockdown the PF to prevent VFs enabling.
|
||||
* @xe: the PF &xe_device
|
||||
*
|
||||
* This function can only be called on PF.
|
||||
*
|
||||
* Once the PF is locked down, it will not enable VFs.
|
||||
* If VFs are already enabled, the -EBUSY will be returned.
|
||||
* To allow the PF enable VFs again call xe_sriov_pf_end_lockdown().
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_pf_lockdown(struct xe_device *xe)
|
||||
{
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
|
||||
return xe_sriov_pf_arm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, true,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_end_lockdown() - Allow the PF to enable VFs again.
|
||||
* @xe: the PF &xe_device
|
||||
*
|
||||
* This function can only be called on PF.
|
||||
* See xe_sriov_pf_lockdown() for details.
|
||||
*/
|
||||
void xe_sriov_pf_end_lockdown(struct xe_device *xe)
|
||||
{
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
|
||||
xe_sriov_pf_disarm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, true,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information.
|
||||
* @xe: the &xe_device to print info from
|
||||
|
||||
@@ -17,11 +17,15 @@ bool xe_sriov_pf_readiness(struct xe_device *xe);
|
||||
int xe_sriov_pf_init_early(struct xe_device *xe);
|
||||
int xe_sriov_pf_init_late(struct xe_device *xe);
|
||||
int xe_sriov_pf_wait_ready(struct xe_device *xe);
|
||||
int xe_sriov_pf_lockdown(struct xe_device *xe);
|
||||
void xe_sriov_pf_end_lockdown(struct xe_device *xe);
|
||||
void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p);
|
||||
#else
|
||||
static inline bool xe_sriov_pf_readiness(struct xe_device *xe) { return false; }
|
||||
static inline int xe_sriov_pf_init_early(struct xe_device *xe) { return 0; }
|
||||
static inline int xe_sriov_pf_init_late(struct xe_device *xe) { return 0; }
|
||||
static inline int xe_sriov_pf_lockdown(struct xe_device *xe) { return 0; }
|
||||
static inline void xe_sriov_pf_end_lockdown(struct xe_device *xe) { }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
|
||||
#include "xe_device.h"
|
||||
#include "xe_gt_sriov_pf_control.h"
|
||||
#include "xe_gt_sriov_pf_migration.h"
|
||||
#include "xe_sriov_packet.h"
|
||||
#include "xe_sriov_pf_control.h"
|
||||
#include "xe_sriov_printk.h"
|
||||
|
||||
@@ -121,6 +123,30 @@ int xe_sriov_pf_control_reset_vf(struct xe_device *xe, unsigned int vfid)
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_control_wait_flr() - Wait for a VF reset (FLR) to complete.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_pf_control_wait_flr(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
struct xe_gt *gt;
|
||||
unsigned int id;
|
||||
int result = 0;
|
||||
int err;
|
||||
|
||||
for_each_gt(gt, xe, id) {
|
||||
err = xe_gt_sriov_pf_control_wait_flr(gt, vfid);
|
||||
result = result ? -EUCLEAN : err;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_control_sync_flr() - Synchronize a VF FLR between all GTs.
|
||||
* @xe: the &xe_device
|
||||
@@ -149,3 +175,105 @@ int xe_sriov_pf_control_sync_flr(struct xe_device *xe, unsigned int vfid)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_control_trigger_save_vf() - Start VF migration data SAVE sequence on all GTs.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_pf_control_trigger_save_vf(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
struct xe_gt *gt;
|
||||
unsigned int id;
|
||||
int ret;
|
||||
|
||||
ret = xe_sriov_packet_save_init(xe, vfid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_gt(gt, xe, id) {
|
||||
xe_gt_sriov_pf_migration_save_init(gt, vfid);
|
||||
|
||||
ret = xe_gt_sriov_pf_control_trigger_save_vf(gt, vfid);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_control_finish_save_vf() - Complete VF migration data SAVE sequence on all GTs.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_pf_control_finish_save_vf(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
struct xe_gt *gt;
|
||||
unsigned int id;
|
||||
int ret;
|
||||
|
||||
for_each_gt(gt, xe, id) {
|
||||
ret = xe_gt_sriov_pf_control_finish_save_vf(gt, vfid);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_control_trigger_restore_vf() - Start VF migration data RESTORE sequence on all GTs.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_pf_control_trigger_restore_vf(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
struct xe_gt *gt;
|
||||
unsigned int id;
|
||||
int ret;
|
||||
|
||||
for_each_gt(gt, xe, id) {
|
||||
ret = xe_gt_sriov_pf_control_trigger_restore_vf(gt, vfid);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_control_finish_restore_vf() - Complete VF migration data RESTORE sequence on all GTs.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_pf_control_finish_restore_vf(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
struct xe_gt *gt;
|
||||
unsigned int id;
|
||||
int ret;
|
||||
|
||||
for_each_gt(gt, xe, id) {
|
||||
ret = xe_gt_sriov_pf_control_finish_restore_vf(gt, vfid);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -12,6 +12,11 @@ int xe_sriov_pf_control_pause_vf(struct xe_device *xe, unsigned int vfid);
|
||||
int xe_sriov_pf_control_resume_vf(struct xe_device *xe, unsigned int vfid);
|
||||
int xe_sriov_pf_control_stop_vf(struct xe_device *xe, unsigned int vfid);
|
||||
int xe_sriov_pf_control_reset_vf(struct xe_device *xe, unsigned int vfid);
|
||||
int xe_sriov_pf_control_wait_flr(struct xe_device *xe, unsigned int vfid);
|
||||
int xe_sriov_pf_control_sync_flr(struct xe_device *xe, unsigned int vfid);
|
||||
int xe_sriov_pf_control_trigger_save_vf(struct xe_device *xe, unsigned int vfid);
|
||||
int xe_sriov_pf_control_finish_save_vf(struct xe_device *xe, unsigned int vfid);
|
||||
int xe_sriov_pf_control_trigger_restore_vf(struct xe_device *xe, unsigned int vfid);
|
||||
int xe_sriov_pf_control_finish_restore_vf(struct xe_device *xe, unsigned int vfid);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include "xe_sriov_pf_control.h"
|
||||
#include "xe_sriov_pf_debugfs.h"
|
||||
#include "xe_sriov_pf_helpers.h"
|
||||
#include "xe_sriov_pf_migration.h"
|
||||
#include "xe_sriov_pf_provision.h"
|
||||
#include "xe_sriov_pf_service.h"
|
||||
#include "xe_sriov_printk.h"
|
||||
@@ -98,10 +99,40 @@ static inline int xe_sriov_pf_restore_auto_provisioning(struct xe_device *xe)
|
||||
|
||||
DEFINE_SRIOV_ATTRIBUTE(restore_auto_provisioning);
|
||||
|
||||
static int lockdown_vfs_enabling_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct dentry *dent = file_dentry(file);
|
||||
struct xe_device *xe = extract_xe(dent);
|
||||
ssize_t ret;
|
||||
|
||||
ret = xe_sriov_pf_lockdown(xe);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
file->private_data = xe;
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
static int lockdown_vfs_enabling_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct xe_device *xe = file->private_data;
|
||||
|
||||
xe_sriov_pf_end_lockdown(xe);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations lockdown_vfs_enabling_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = lockdown_vfs_enabling_open,
|
||||
.release = lockdown_vfs_enabling_release,
|
||||
};
|
||||
|
||||
static void pf_populate_root(struct xe_device *xe, struct dentry *dent)
|
||||
{
|
||||
debugfs_create_file("restore_auto_provisioning", 0200, dent, xe,
|
||||
&restore_auto_provisioning_fops);
|
||||
debugfs_create_file("lockdown_vfs_enabling", 0400, dent, xe,
|
||||
&lockdown_vfs_enabling_fops);
|
||||
}
|
||||
|
||||
static int simple_show(struct seq_file *m, void *data)
|
||||
@@ -132,15 +163,36 @@ static void pf_populate_pf(struct xe_device *xe, struct dentry *pfdent)
|
||||
* /sys/kernel/debug/dri/BDF/
|
||||
* ├── sriov
|
||||
* │ ├── vf1
|
||||
* │ │ ├── migration_data
|
||||
* │ │ ├── pause
|
||||
* │ │ ├── reset
|
||||
* │ │ ├── resume
|
||||
* │ │ ├── stop
|
||||
* │ │ ├── save
|
||||
* │ │ ├── restore
|
||||
* │ │ :
|
||||
* │ ├── vf2
|
||||
* │ │ ├── ...
|
||||
*/
|
||||
|
||||
static int from_file_read_to_vf_call(struct seq_file *s,
|
||||
int (*call)(struct xe_device *, unsigned int))
|
||||
{
|
||||
struct dentry *dent = file_dentry(s->file)->d_parent;
|
||||
struct xe_device *xe = extract_xe(dent);
|
||||
unsigned int vfid = extract_vfid(dent);
|
||||
int ret;
|
||||
|
||||
xe_pm_runtime_get(xe);
|
||||
ret = call(xe, vfid);
|
||||
xe_pm_runtime_put(xe);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t from_file_write_to_vf_call(struct file *file, const char __user *userbuf,
|
||||
size_t count, loff_t *ppos,
|
||||
int (*call)(struct xe_device *, unsigned int))
|
||||
@@ -179,10 +231,85 @@ static ssize_t OP##_write(struct file *file, const char __user *userbuf, \
|
||||
} \
|
||||
DEFINE_SHOW_STORE_ATTRIBUTE(OP)
|
||||
|
||||
#define DEFINE_VF_CONTROL_ATTRIBUTE_RW(OP) \
|
||||
static int OP##_show(struct seq_file *s, void *unused) \
|
||||
{ \
|
||||
return from_file_read_to_vf_call(s, \
|
||||
xe_sriov_pf_control_finish_##OP); \
|
||||
} \
|
||||
static ssize_t OP##_write(struct file *file, const char __user *userbuf, \
|
||||
size_t count, loff_t *ppos) \
|
||||
{ \
|
||||
return from_file_write_to_vf_call(file, userbuf, count, ppos, \
|
||||
xe_sriov_pf_control_trigger_##OP); \
|
||||
} \
|
||||
DEFINE_SHOW_STORE_ATTRIBUTE(OP)
|
||||
|
||||
DEFINE_VF_CONTROL_ATTRIBUTE(pause_vf);
|
||||
DEFINE_VF_CONTROL_ATTRIBUTE(resume_vf);
|
||||
DEFINE_VF_CONTROL_ATTRIBUTE(stop_vf);
|
||||
DEFINE_VF_CONTROL_ATTRIBUTE(reset_vf);
|
||||
DEFINE_VF_CONTROL_ATTRIBUTE_RW(save_vf);
|
||||
DEFINE_VF_CONTROL_ATTRIBUTE_RW(restore_vf);
|
||||
|
||||
static ssize_t data_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
|
||||
{
|
||||
struct dentry *dent = file_dentry(file)->d_parent;
|
||||
struct xe_device *xe = extract_xe(dent);
|
||||
unsigned int vfid = extract_vfid(dent);
|
||||
|
||||
if (*pos)
|
||||
return -ESPIPE;
|
||||
|
||||
return xe_sriov_pf_migration_write(xe, vfid, buf, count);
|
||||
}
|
||||
|
||||
static ssize_t data_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct dentry *dent = file_dentry(file)->d_parent;
|
||||
struct xe_device *xe = extract_xe(dent);
|
||||
unsigned int vfid = extract_vfid(dent);
|
||||
|
||||
if (*ppos)
|
||||
return -ESPIPE;
|
||||
|
||||
return xe_sriov_pf_migration_read(xe, vfid, buf, count);
|
||||
}
|
||||
|
||||
static const struct file_operations data_vf_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.write = data_write,
|
||||
.read = data_read,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static ssize_t size_read(struct file *file, char __user *ubuf, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct dentry *dent = file_dentry(file)->d_parent;
|
||||
struct xe_device *xe = extract_xe(dent);
|
||||
unsigned int vfid = extract_vfid(dent);
|
||||
char buf[21];
|
||||
ssize_t ret;
|
||||
int len;
|
||||
|
||||
xe_pm_runtime_get(xe);
|
||||
ret = xe_sriov_pf_migration_size(xe, vfid);
|
||||
xe_pm_runtime_put(xe);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
len = scnprintf(buf, sizeof(buf), "%zd\n", ret);
|
||||
|
||||
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
|
||||
}
|
||||
|
||||
static const struct file_operations size_vf_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = size_read,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static void pf_populate_vf(struct xe_device *xe, struct dentry *vfdent)
|
||||
{
|
||||
@@ -190,6 +317,10 @@ static void pf_populate_vf(struct xe_device *xe, struct dentry *vfdent)
|
||||
debugfs_create_file("resume", 0200, vfdent, xe, &resume_vf_fops);
|
||||
debugfs_create_file("stop", 0200, vfdent, xe, &stop_vf_fops);
|
||||
debugfs_create_file("reset", 0200, vfdent, xe, &reset_vf_fops);
|
||||
debugfs_create_file("save", 0600, vfdent, xe, &save_vf_fops);
|
||||
debugfs_create_file("restore", 0600, vfdent, xe, &restore_vf_fops);
|
||||
debugfs_create_file("migration_data", 0600, vfdent, xe, &data_vf_fops);
|
||||
debugfs_create_file("migration_size", 0400, vfdent, xe, &size_vf_fops);
|
||||
}
|
||||
|
||||
static void pf_populate_with_tiles(struct xe_device *xe, struct dentry *dent, unsigned int vfid)
|
||||
|
||||
@@ -48,10 +48,26 @@ static inline unsigned int xe_sriov_pf_num_vfs(const struct xe_device *xe)
|
||||
return pci_num_vf(to_pci_dev(xe->drm.dev));
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_admin_only() - Check if PF is mainly used for VFs administration.
|
||||
* @xe: the PF &xe_device
|
||||
*
|
||||
* Return: True if PF is mainly used for VFs administration.
|
||||
*/
|
||||
static inline bool xe_sriov_pf_admin_only(const struct xe_device *xe)
|
||||
{
|
||||
return !xe->info.probe_display;
|
||||
}
|
||||
|
||||
static inline struct mutex *xe_sriov_pf_master_mutex(struct xe_device *xe)
|
||||
{
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
return &xe->sriov.pf.master_lock;
|
||||
}
|
||||
|
||||
int xe_sriov_pf_arm_guard(struct xe_device *xe, struct xe_guard *guard,
|
||||
bool write, void *who);
|
||||
void xe_sriov_pf_disarm_guard(struct xe_device *xe, struct xe_guard *guard,
|
||||
bool write, void *who);
|
||||
|
||||
#endif
|
||||
|
||||
342
drivers/gpu/drm/xe/xe_sriov_pf_migration.c
Normal file
342
drivers/gpu/drm/xe/xe_sriov_pf_migration.c
Normal file
@@ -0,0 +1,342 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <drm/drm_managed.h>
|
||||
|
||||
#include "xe_device.h"
|
||||
#include "xe_gt_sriov_pf_control.h"
|
||||
#include "xe_gt_sriov_pf_migration.h"
|
||||
#include "xe_pm.h"
|
||||
#include "xe_sriov.h"
|
||||
#include "xe_sriov_packet.h"
|
||||
#include "xe_sriov_packet_types.h"
|
||||
#include "xe_sriov_pf_helpers.h"
|
||||
#include "xe_sriov_pf_migration.h"
|
||||
#include "xe_sriov_printk.h"
|
||||
|
||||
static struct xe_sriov_migration_state *pf_pick_migration(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe));
|
||||
|
||||
return &xe->sriov.pf.vfs[vfid].migration;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_migration_waitqueue() - Get waitqueue for migration.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* Return: pointer to the migration waitqueue.
|
||||
*/
|
||||
wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
return &pf_pick_migration(xe, vfid)->wq;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_migration_supported() - Check if SR-IOV VF migration is supported by the device
|
||||
* @xe: the &xe_device
|
||||
*
|
||||
* Return: true if migration is supported, false otherwise
|
||||
*/
|
||||
bool xe_sriov_pf_migration_supported(struct xe_device *xe)
|
||||
{
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
|
||||
return xe->sriov.pf.migration.supported;
|
||||
}
|
||||
|
||||
static bool pf_check_migration_support(struct xe_device *xe)
|
||||
{
|
||||
/* XXX: for now this is for feature enabling only */
|
||||
return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
|
||||
}
|
||||
|
||||
static void pf_migration_cleanup(void *arg)
|
||||
{
|
||||
struct xe_sriov_migration_state *migration = arg;
|
||||
|
||||
xe_sriov_packet_free(migration->pending);
|
||||
xe_sriov_packet_free(migration->trailer);
|
||||
xe_sriov_packet_free(migration->descriptor);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_migration_init() - Initialize support for SR-IOV VF migration.
|
||||
* @xe: the &xe_device
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_pf_migration_init(struct xe_device *xe)
|
||||
{
|
||||
unsigned int n, totalvfs;
|
||||
int err;
|
||||
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
|
||||
xe->sriov.pf.migration.supported = pf_check_migration_support(xe);
|
||||
if (!xe_sriov_pf_migration_supported(xe))
|
||||
return 0;
|
||||
|
||||
totalvfs = xe_sriov_pf_get_totalvfs(xe);
|
||||
for (n = 1; n <= totalvfs; n++) {
|
||||
struct xe_sriov_migration_state *migration = pf_pick_migration(xe, n);
|
||||
|
||||
err = drmm_mutex_init(&xe->drm, &migration->lock);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
init_waitqueue_head(&migration->wq);
|
||||
|
||||
err = devm_add_action_or_reset(xe->drm.dev, pf_migration_cleanup, migration);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool pf_migration_data_ready(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
struct xe_gt *gt;
|
||||
u8 gt_id;
|
||||
|
||||
for_each_gt(gt, xe, gt_id) {
|
||||
if (xe_gt_sriov_pf_control_check_save_failed(gt, vfid) ||
|
||||
xe_gt_sriov_pf_control_check_save_data_done(gt, vfid) ||
|
||||
!xe_gt_sriov_pf_migration_ring_empty(gt, vfid))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct xe_sriov_packet *
|
||||
pf_migration_consume(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
struct xe_sriov_packet *data;
|
||||
bool more_data = false;
|
||||
struct xe_gt *gt;
|
||||
u8 gt_id;
|
||||
|
||||
for_each_gt(gt, xe, gt_id) {
|
||||
data = xe_gt_sriov_pf_migration_save_consume(gt, vfid);
|
||||
if (data && PTR_ERR(data) != EAGAIN)
|
||||
return data;
|
||||
if (PTR_ERR(data) == -EAGAIN)
|
||||
more_data = true;
|
||||
}
|
||||
|
||||
if (!more_data)
|
||||
return NULL;
|
||||
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_migration_save_consume() - Consume a VF migration data packet from the device.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
*
|
||||
* Called by the save migration data consumer (userspace) when
|
||||
* processing migration data.
|
||||
* If there is no migration data to process, wait until more data is available.
|
||||
*
|
||||
* Return: Pointer to &xe_sriov_packet on success,
|
||||
* NULL if ring is empty and no more migration data is expected,
|
||||
* ERR_PTR value in case of error.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
struct xe_sriov_packet *
|
||||
xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
struct xe_sriov_migration_state *migration = pf_pick_migration(xe, vfid);
|
||||
struct xe_sriov_packet *data;
|
||||
int ret;
|
||||
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
|
||||
for (;;) {
|
||||
data = pf_migration_consume(xe, vfid);
|
||||
if (PTR_ERR(data) != -EAGAIN)
|
||||
break;
|
||||
|
||||
ret = wait_event_interruptible(migration->wq,
|
||||
pf_migration_data_ready(xe, vfid));
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
static int pf_handle_descriptor(struct xe_device *xe, unsigned int vfid,
|
||||
struct xe_sriov_packet *data)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (data->hdr.tile_id != 0 || data->hdr.gt_id != 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = xe_sriov_packet_process_descriptor(xe, vfid, data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
xe_sriov_packet_free(data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pf_handle_trailer(struct xe_device *xe, unsigned int vfid,
|
||||
struct xe_sriov_packet *data)
|
||||
{
|
||||
struct xe_gt *gt;
|
||||
u8 gt_id;
|
||||
|
||||
if (data->hdr.tile_id != 0 || data->hdr.gt_id != 0)
|
||||
return -EINVAL;
|
||||
if (data->hdr.offset != 0 || data->hdr.size != 0 || data->buff || data->bo)
|
||||
return -EINVAL;
|
||||
|
||||
xe_sriov_packet_free(data);
|
||||
|
||||
for_each_gt(gt, xe, gt_id)
|
||||
xe_gt_sriov_pf_control_restore_data_done(gt, vfid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_migration_restore_produce() - Produce a VF migration data packet to the device.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
* @data: Pointer to &xe_sriov_packet
|
||||
*
|
||||
* Called by the restore migration data producer (userspace) when processing
|
||||
* migration data.
|
||||
* If the underlying data structure is full, wait until there is space.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid,
|
||||
struct xe_sriov_packet *data)
|
||||
{
|
||||
struct xe_gt *gt;
|
||||
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
|
||||
if (data->hdr.type == XE_SRIOV_PACKET_TYPE_DESCRIPTOR)
|
||||
return pf_handle_descriptor(xe, vfid, data);
|
||||
if (data->hdr.type == XE_SRIOV_PACKET_TYPE_TRAILER)
|
||||
return pf_handle_trailer(xe, vfid, data);
|
||||
|
||||
gt = xe_device_get_gt(xe, data->hdr.gt_id);
|
||||
if (!gt || data->hdr.tile_id != gt->tile->id || data->hdr.type == 0) {
|
||||
xe_sriov_err_ratelimited(xe, "Received invalid restore packet for VF%u (type:%u, tile:%u, GT:%u)\n",
|
||||
vfid, data->hdr.type, data->hdr.tile_id, data->hdr.gt_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return xe_gt_sriov_pf_migration_restore_produce(gt, vfid, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_migration_read() - Read migration data from the device.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
* @buf: start address of userspace buffer
|
||||
* @len: requested read size from userspace
|
||||
*
|
||||
* Return: number of bytes that has been successfully read,
|
||||
* 0 if no more migration data is available,
|
||||
* -errno on failure.
|
||||
*/
|
||||
ssize_t xe_sriov_pf_migration_read(struct xe_device *xe, unsigned int vfid,
|
||||
char __user *buf, size_t len)
|
||||
{
|
||||
struct xe_sriov_migration_state *migration = pf_pick_migration(xe, vfid);
|
||||
ssize_t ret, consumed = 0;
|
||||
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
|
||||
scoped_cond_guard(mutex_intr, return -EINTR, &migration->lock) {
|
||||
while (consumed < len) {
|
||||
ret = xe_sriov_packet_read_single(xe, vfid, buf, len - consumed);
|
||||
if (ret == -ENODATA)
|
||||
break;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
consumed += ret;
|
||||
buf += ret;
|
||||
}
|
||||
}
|
||||
|
||||
return consumed;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_migration_write() - Write migration data to the device.
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier
|
||||
* @buf: start address of userspace buffer
|
||||
* @len: requested write size from userspace
|
||||
*
|
||||
* Return: number of bytes that has been successfully written,
|
||||
* -errno on failure.
|
||||
*/
|
||||
ssize_t xe_sriov_pf_migration_write(struct xe_device *xe, unsigned int vfid,
|
||||
const char __user *buf, size_t len)
|
||||
{
|
||||
struct xe_sriov_migration_state *migration = pf_pick_migration(xe, vfid);
|
||||
ssize_t ret, produced = 0;
|
||||
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
|
||||
scoped_cond_guard(mutex_intr, return -EINTR, &migration->lock) {
|
||||
while (produced < len) {
|
||||
ret = xe_sriov_packet_write_single(xe, vfid, buf, len - produced);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
produced += ret;
|
||||
buf += ret;
|
||||
}
|
||||
}
|
||||
|
||||
return produced;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_pf_migration_size() - Total size of migration data from all components within a device
|
||||
* @xe: the &xe_device
|
||||
* @vfid: the VF identifier (can't be 0)
|
||||
*
|
||||
* This function is for PF only.
|
||||
*
|
||||
* Return: total migration data size in bytes or a negative error code on failure.
|
||||
*/
|
||||
ssize_t xe_sriov_pf_migration_size(struct xe_device *xe, unsigned int vfid)
|
||||
{
|
||||
size_t size = 0;
|
||||
struct xe_gt *gt;
|
||||
ssize_t ret;
|
||||
u8 gt_id;
|
||||
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
xe_assert(xe, vfid);
|
||||
|
||||
for_each_gt(gt, xe, gt_id) {
|
||||
ret = xe_gt_sriov_pf_migration_size(gt, vfid);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
size += ret;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
29
drivers/gpu/drm/xe/xe_sriov_pf_migration.h
Normal file
29
drivers/gpu/drm/xe/xe_sriov_pf_migration.h
Normal file
@@ -0,0 +1,29 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_SRIOV_PF_MIGRATION_H_
|
||||
#define _XE_SRIOV_PF_MIGRATION_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
struct xe_device;
|
||||
struct xe_sriov_packet;
|
||||
|
||||
int xe_sriov_pf_migration_init(struct xe_device *xe);
|
||||
bool xe_sriov_pf_migration_supported(struct xe_device *xe);
|
||||
int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid,
|
||||
struct xe_sriov_packet *data);
|
||||
struct xe_sriov_packet *
|
||||
xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid);
|
||||
ssize_t xe_sriov_pf_migration_size(struct xe_device *xe, unsigned int vfid);
|
||||
wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid);
|
||||
|
||||
ssize_t xe_sriov_pf_migration_read(struct xe_device *xe, unsigned int vfid,
|
||||
char __user *buf, size_t len);
|
||||
ssize_t xe_sriov_pf_migration_write(struct xe_device *xe, unsigned int vfid,
|
||||
const char __user *buf, size_t len);
|
||||
|
||||
#endif
|
||||
37
drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h
Normal file
37
drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h
Normal file
@@ -0,0 +1,37 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_SRIOV_PF_MIGRATION_TYPES_H_
|
||||
#define _XE_SRIOV_PF_MIGRATION_TYPES_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/mutex_types.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
/**
|
||||
* struct xe_sriov_pf_migration - Xe device level VF migration data
|
||||
*/
|
||||
struct xe_sriov_pf_migration {
|
||||
/** @supported: indicates whether VF migration feature is supported */
|
||||
bool supported;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xe_sriov_migration_state - Per VF device-level migration related data
|
||||
*/
|
||||
struct xe_sriov_migration_state {
|
||||
/** @wq: waitqueue used to avoid busy-waiting for snapshot production/consumption */
|
||||
wait_queue_head_t wq;
|
||||
/** @lock: Mutex protecting the migration data */
|
||||
struct mutex lock;
|
||||
/** @pending: currently processed data packet of VF resource */
|
||||
struct xe_sriov_packet *pending;
|
||||
/** @trailer: data packet used to indicate the end of stream */
|
||||
struct xe_sriov_packet *trailer;
|
||||
/** @descriptor: data packet containing the metadata describing the device */
|
||||
struct xe_sriov_packet *descriptor;
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -9,6 +9,8 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "xe_guard.h"
|
||||
#include "xe_sriov_pf_migration_types.h"
|
||||
#include "xe_sriov_pf_provision_types.h"
|
||||
#include "xe_sriov_pf_service_types.h"
|
||||
|
||||
@@ -23,6 +25,8 @@ struct xe_sriov_metadata {
|
||||
|
||||
/** @version: negotiated VF/PF ABI version */
|
||||
struct xe_sriov_pf_service_version version;
|
||||
/** @migration: migration state */
|
||||
struct xe_sriov_migration_state migration;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -38,12 +42,18 @@ struct xe_device_pf {
|
||||
/** @driver_max_vfs: Maximum number of VFs supported by the driver. */
|
||||
u16 driver_max_vfs;
|
||||
|
||||
/** @guard_vfs_enabling: guards VFs enabling */
|
||||
struct xe_guard guard_vfs_enabling;
|
||||
|
||||
/** @master_lock: protects all VFs configurations across GTs */
|
||||
struct mutex master_lock;
|
||||
|
||||
/** @provision: device level provisioning data. */
|
||||
struct xe_sriov_pf_provision provision;
|
||||
|
||||
/** @migration: device level migration data. */
|
||||
struct xe_sriov_pf_migration migration;
|
||||
|
||||
/** @service: device level service data. */
|
||||
struct xe_sriov_pf_service service;
|
||||
|
||||
|
||||
@@ -156,14 +156,6 @@ void xe_sriov_vf_migration_disable(struct xe_device *xe, const char *fmt, ...)
|
||||
|
||||
static void vf_migration_init_early(struct xe_device *xe)
|
||||
{
|
||||
/*
|
||||
* TODO: Add conditions to allow specific platforms, when they're
|
||||
* supported at production quality.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG))
|
||||
return xe_sriov_vf_migration_disable(xe,
|
||||
"experimental feature not available on production builds");
|
||||
|
||||
if (!xe_device_has_memirq(xe))
|
||||
return xe_sriov_vf_migration_disable(xe, "requires memory-based IRQ support");
|
||||
|
||||
|
||||
@@ -183,12 +183,17 @@ static int determine_lmem_bar_size(struct xe_device *xe, struct xe_vram_region *
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size)
|
||||
static int get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size, u64 *poffset)
|
||||
{
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
unsigned int fw_ref;
|
||||
u64 offset;
|
||||
u32 reg;
|
||||
|
||||
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
|
||||
if (!fw_ref)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
if (GRAPHICS_VER(xe) >= 20) {
|
||||
u64 ccs_size = tile_size / 512;
|
||||
u64 offset_hi, offset_lo;
|
||||
@@ -218,7 +223,10 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size)
|
||||
offset = (u64)REG_FIELD_GET(XEHP_FLAT_CCS_PTR, reg) * SZ_64K;
|
||||
}
|
||||
|
||||
return offset;
|
||||
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
||||
*poffset = offset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -245,7 +253,6 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
|
||||
{
|
||||
struct xe_device *xe = tile_to_xe(tile);
|
||||
struct xe_gt *gt = tile->primary_gt;
|
||||
unsigned int fw_ref;
|
||||
u64 offset;
|
||||
u32 reg;
|
||||
|
||||
@@ -265,23 +272,22 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
|
||||
return 0;
|
||||
}
|
||||
|
||||
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
|
||||
if (!fw_ref)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
/* actual size */
|
||||
if (unlikely(xe->info.platform == XE_DG1)) {
|
||||
*tile_size = pci_resource_len(to_pci_dev(xe->drm.dev), LMEM_BAR);
|
||||
*tile_offset = 0;
|
||||
} else {
|
||||
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE_ADDR_RANGE(gt->info.id));
|
||||
reg = xe_mmio_read32(&tile->mmio, SG_TILE_ADDR_RANGE(tile->id));
|
||||
*tile_size = (u64)REG_FIELD_GET(GENMASK(14, 8), reg) * SZ_1G;
|
||||
*tile_offset = (u64)REG_FIELD_GET(GENMASK(7, 1), reg) * SZ_1G;
|
||||
}
|
||||
|
||||
/* minus device usage */
|
||||
if (xe->info.has_flat_ccs) {
|
||||
offset = get_flat_ccs_offset(gt, *tile_size);
|
||||
int ret = get_flat_ccs_offset(gt, *tile_size, &offset);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
offset = xe_mmio_read64_2x32(&tile->mmio, GSMBASE);
|
||||
}
|
||||
@@ -289,8 +295,6 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
|
||||
/* remove the tile offset so we have just the available size */
|
||||
*vram_size = offset - *tile_offset;
|
||||
|
||||
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -922,6 +922,11 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
|
||||
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3005), ENGINE_CLASS(RENDER)),
|
||||
XE_RTP_ACTIONS(SET(XEHP_SLICE_COMMON_ECO_CHICKEN1, FAST_CLEAR_VALIGN_FIX))
|
||||
},
|
||||
{ XE_RTP_NAME("15016589081"),
|
||||
XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
|
||||
ENGINE_CLASS(RENDER)),
|
||||
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
|
||||
},
|
||||
};
|
||||
|
||||
static __maybe_unused const struct xe_rtp_entry oob_was[] = {
|
||||
|
||||
@@ -861,7 +861,7 @@
|
||||
MACRO__(0xE216, ## __VA_ARGS__)
|
||||
|
||||
#define INTEL_BMG_IDS(MACRO__, ...) \
|
||||
INTEL_BMG_G21_IDS(MACRO__, __VA_ARGS__), \
|
||||
INTEL_BMG_G21_IDS(MACRO__, ## __VA_ARGS__), \
|
||||
MACRO__(0xE220, ## __VA_ARGS__), \
|
||||
MACRO__(0xE221, ## __VA_ARGS__), \
|
||||
MACRO__(0xE222, ## __VA_ARGS__), \
|
||||
|
||||
Reference in New Issue
Block a user