Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
ice: cleanups and preparation for live migration

Jake Keller says:

Various cleanups and preparation to the ice driver code for supporting
SR-IOV live migration.

The logic for unpacking Rx queue context data is added. This is the inverse
of the existing packing logic. Thanks to <linux/packing.h> this is trivial
to add.

Code to enable both reading and writing the Tx queue context for a queue
over a shared hardware register interface is added. Thanks to ice_adapter,
this is locked across all PFs that need to use it, preventing concurrency
issues with multiple PFs.

The RSS hash configuration requested by a VF is cached within the VF
structure. This will be used to track and restore the same configuration
during migration load.

ice_sriov_set_msix_vec_count() is updated to use pci_iov_vf_id() instead of
open-coding a worse equivalent, and checks to avoid rebuilding MSI-X if the
current request is for the existing amount of vectors.

A new ice_get_vf_by_dev() helper function is added to simplify accessing a
VF from its PCI device structure. This will be used more heavily within the
live migration code itself.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  ice: introduce ice_get_vf_by_dev() wrapper
  ice: avoid rebuilding if MSI-X vector count is unchanged
  ice: use pci_iov_vf_id() to get VF ID
  ice: expose VF functions used by live migration
  ice: move ice_vsi_update_l2tsel to ice_lib.c
  ice: save RSS hash configuration for migration
  ice: add functions to get and set Tx queue context
  ice: add support for reading and unpacking Rx queue context
====================

Link: https://patch.msgid.link/20250710214518.1824208-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-07-11 17:33:06 -07:00
14 changed files with 378 additions and 69 deletions

View File

@@ -32,6 +32,7 @@ static struct ice_adapter *ice_adapter_new(u64 dsn)
adapter->device_serial_number = dsn;
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
spin_lock_init(&adapter->txq_ctx_lock);
refcount_set(&adapter->refcount, 1);
mutex_init(&adapter->ports.lock);

View File

@@ -27,9 +27,10 @@ struct ice_port_list {
/**
* struct ice_adapter - PCI adapter resources shared across PFs
* @refcount: Reference count. struct ice_pf objects hold the references.
* @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME
* register of the PTP clock.
* @refcount: Reference count. struct ice_pf objects hold the references.
* @txq_ctx_lock: Spinlock protecting access to the GLCOMM_QTX_CNTX_CTL register
* @ctrl_pf: Control PF of the adapter
* @ports: Ports list
* @device_serial_number: DSN cached for collision detection on 32bit systems
@@ -38,6 +39,8 @@ struct ice_adapter {
refcount_t refcount;
/* For access to the GLTSYN_TIME register */
spinlock_t ptp_gltsyn_time_lock;
/* For access to GLCOMM_QTX_CNTX_CTL register */
spinlock_t txq_ctx_lock;
struct ice_pf *ctrl_pf;
struct ice_port_list ports;

View File

@@ -14,11 +14,23 @@
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TXQ_CTX_SZ 22
typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t;
/* The Tx queue context is 40 bytes, and includes some internal state. The
* Admin Queue buffers don't include the internal state, so only include the
* first 22 bytes of the context.
*/
#define ICE_TXQ_CTX_SZ 22
typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t;
#define ICE_TXQ_CTX_FULL_SIZE_DWORDS 10
#define ICE_TXQ_CTX_FULL_SZ \
(ICE_TXQ_CTX_FULL_SIZE_DWORDS * sizeof(u32))
typedef struct __packed { u8 buf[ICE_TXQ_CTX_FULL_SZ]; } ice_txq_ctx_buf_full_t;
struct ice_aqc_generic {
__le32 param0;
__le32 param1;

View File

@@ -1342,6 +1342,26 @@ static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw,
}
}
/**
* ice_copy_rxq_ctx_from_hw - Copy packed Rx Queue context from HW registers
* @hw: pointer to the hardware structure
* @rxq_ctx: pointer to the packed Rx queue context
* @rxq_index: the index of the Rx queue
*/
static void ice_copy_rxq_ctx_from_hw(struct ice_hw *hw,
ice_rxq_ctx_buf_t *rxq_ctx,
u32 rxq_index)
{
u32 *ctx = (u32 *)rxq_ctx;
/* Copy each dword separately from HW */
for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++, ctx++) {
*ctx = rd32(hw, QRX_CONTEXT(i, rxq_index));
ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
}
}
#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \
PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field)
@@ -1385,6 +1405,21 @@ static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx,
QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
}
/**
* ice_unpack_rxq_ctx - Unpack Rx queue context from a HW buffer
* @buf: the HW buffer to unpack from
* @ctx: the Rx queue context to unpack
*
* Unpack the Rx queue context from the HW buffer into the CPU-friendly
* structure.
*/
static void ice_unpack_rxq_ctx(const ice_rxq_ctx_buf_t *buf,
struct ice_rlan_ctx *ctx)
{
unpack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
}
/**
* ice_write_rxq_ctx - Write Rx Queue context to hardware
* @hw: pointer to the hardware structure
@@ -1410,6 +1445,31 @@ int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
return 0;
}
/**
* ice_read_rxq_ctx - Read Rx queue context from HW
* @hw: pointer to the hardware structure
* @rlan_ctx: pointer to the Rx queue context
* @rxq_index: the index of the Rx queue
*
* Read the Rx queue context from the hardware registers, and unpack it into
* the sparse Rx queue context structure.
*
* Returns: 0 on success, or -EINVAL if the Rx queue index is invalid.
*/
int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
ice_rxq_ctx_buf_t buf = {};
if (rxq_index > QRX_CTRL_MAX_INDEX)
return -EINVAL;
ice_copy_rxq_ctx_from_hw(hw, &buf, rxq_index);
ice_unpack_rxq_ctx(&buf, rlan_ctx);
return 0;
}
/* LAN Tx Queue Context */
static const struct packed_field_u8 ice_tlan_ctx_fields[] = {
/* Field Width LSB */
@@ -1443,12 +1503,12 @@ static const struct packed_field_u8 ice_tlan_ctx_fields[] = {
};
/**
* ice_pack_txq_ctx - Pack Tx queue context into a HW buffer
* ice_pack_txq_ctx - Pack Tx queue context into Admin Queue buffer
* @ctx: the Tx queue context to pack
* @buf: the HW buffer to pack into
* @buf: the Admin Queue HW buffer to pack into
*
* Pack the Tx queue context from the CPU-friendly unpacked buffer into its
* bit-packed HW layout.
* bit-packed Admin Queue layout.
*/
void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
{
@@ -1456,6 +1516,173 @@ void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
}
/**
* ice_pack_txq_ctx_full - Pack Tx queue context into a HW buffer
* @ctx: the Tx queue context to pack
* @buf: the HW buffer to pack into
*
* Pack the Tx queue context from the CPU-friendly unpacked buffer into its
* bit-packed HW layout, including the internal data portion.
*/
static void ice_pack_txq_ctx_full(const struct ice_tlan_ctx *ctx,
ice_txq_ctx_buf_full_t *buf)
{
pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
}
/**
* ice_unpack_txq_ctx_full - Unpack Tx queue context from a HW buffer
* @buf: the HW buffer to unpack from
* @ctx: the Tx queue context to unpack
*
* Unpack the Tx queue context from the HW buffer (including the full internal
* state) into the CPU-friendly structure.
*/
static void ice_unpack_txq_ctx_full(const ice_txq_ctx_buf_full_t *buf,
struct ice_tlan_ctx *ctx)
{
unpack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
}
/**
* ice_copy_txq_ctx_from_hw - Copy Tx Queue context from HW registers
* @hw: pointer to the hardware structure
* @txq_ctx: pointer to the packed Tx queue context, including internal state
* @txq_index: the index of the Tx queue
*
* Copy Tx Queue context from HW register space to dense structure
*/
static void ice_copy_txq_ctx_from_hw(struct ice_hw *hw,
ice_txq_ctx_buf_full_t *txq_ctx,
u32 txq_index)
{
struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u32 *ctx = (u32 *)txq_ctx;
u32 txq_base, reg;
/* Get Tx queue base within card space */
txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
GLCOMM_QTX_CNTX_CTL_CMD_READ) |
FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
txq_base + txq_index) |
GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
/* Prevent other PFs on the same adapter from accessing the Tx queue
* context interface concurrently.
*/
spin_lock(&pf->adapter->txq_ctx_lock);
wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
ice_flush(hw);
/* Copy each dword separately from HW */
for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++, ctx++) {
*ctx = rd32(hw, GLCOMM_QTX_CNTX_DATA(i));
ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, *ctx);
}
spin_unlock(&pf->adapter->txq_ctx_lock);
}
/**
* ice_copy_txq_ctx_to_hw - Copy Tx Queue context into HW registers
* @hw: pointer to the hardware structure
* @txq_ctx: pointer to the packed Tx queue context, including internal state
* @txq_index: the index of the Tx queue
*/
static void ice_copy_txq_ctx_to_hw(struct ice_hw *hw,
const ice_txq_ctx_buf_full_t *txq_ctx,
u32 txq_index)
{
struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u32 txq_base, reg;
/* Get Tx queue base within card space */
txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN) |
FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
txq_base + txq_index) |
GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
/* Prevent other PFs on the same adapter from accessing the Tx queue
* context interface concurrently.
*/
spin_lock(&pf->adapter->txq_ctx_lock);
/* Copy each dword separately to HW */
for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++) {
u32 ctx = ((const u32 *)txq_ctx)[i];
wr32(hw, GLCOMM_QTX_CNTX_DATA(i), ctx);
ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, ctx);
}
wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
ice_flush(hw);
spin_unlock(&pf->adapter->txq_ctx_lock);
}
/**
* ice_read_txq_ctx - Read Tx queue context from HW
* @hw: pointer to the hardware structure
* @tlan_ctx: pointer to the Tx queue context
* @txq_index: the index of the Tx queue
*
* Read the Tx queue context from the HW registers, then unpack it into the
* ice_tlan_ctx structure for use.
*
* Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
*/
int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
u32 txq_index)
{
ice_txq_ctx_buf_full_t buf = {};
if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
return -EINVAL;
ice_copy_txq_ctx_from_hw(hw, &buf, txq_index);
ice_unpack_txq_ctx_full(&buf, tlan_ctx);
return 0;
}
/**
* ice_write_txq_ctx - Write Tx queue context to HW
* @hw: pointer to the hardware structure
* @tlan_ctx: pointer to the Tx queue context
* @txq_index: the index of the Tx queue
*
* Pack the Tx queue context into the dense HW layout, then write it into the
* HW registers.
*
* Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
*/
int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
u32 txq_index)
{
ice_txq_ctx_buf_full_t buf = {};
if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
return -EINVAL;
ice_pack_txq_ctx_full(tlan_ctx, &buf);
ice_copy_txq_ctx_to_hw(hw, &buf, txq_index);
return 0;
}
/* Sideband Queue command wrappers */
/**

View File

@@ -118,6 +118,12 @@ void ice_set_safe_mode_caps(struct ice_hw *hw);
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
u32 txq_index);
int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
u32 txq_index);
int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);

View File

@@ -16,6 +16,7 @@
#define GLCOMM_QUANTA_PROF_MAX_DESC_M ICE_M(0x3F, 24)
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD_MAX_INDEX 16383
#define QTX_COMM_HEAD_HEAD_S 0
#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0)
#define PF_FW_ARQBAH 0x00080180
@@ -272,6 +273,8 @@
#define VPINT_ALLOC_PCI_VALID_M BIT(31)
#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4))
#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30)
#define PFLAN_TX_QALLOC(_PF) (0x001D2580 + ((_PF) * 4))
#define PFLAN_TX_QALLOC_FIRSTQ_M GENMASK(13, 0)
#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
@@ -376,6 +379,15 @@
#define GLNVM_ULD_POR_DONE_1_M BIT(8)
#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
#define GLNVM_ULD_PE_DONE_M BIT(10)
#define GLCOMM_QTX_CNTX_CTL 0x002D2DC8
#define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M GENMASK(13, 0)
#define GLCOMM_QTX_CNTX_CTL_CMD_M GENMASK(18, 16)
#define GLCOMM_QTX_CNTX_CTL_CMD_READ 0
#define GLCOMM_QTX_CNTX_CTL_CMD_WRITE 1
#define GLCOMM_QTX_CNTX_CTL_CMD_RESET 3
#define GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN 4
#define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M BIT(19)
#define GLCOMM_QTX_CNTX_DATA(_i) (0x002D2D40 + ((_i) * 4))
#define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880

View File

@@ -4020,3 +4020,38 @@ ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
vsi->info = ctx.info;
return 0;
}
/**
* ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
* @vsi: VSI used to update l2tsel on
* @l2tsel: l2tsel setting requested
*
* Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
* This will modify which descriptor field the first offloaded VLAN will be
* stripped into.
*/
void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
{
struct ice_hw *hw = &vsi->back->hw;
u32 l2tsel_bit;
int i;
if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
l2tsel_bit = 0;
else
l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
for (i = 0; i < vsi->alloc_rxq; i++) {
u16 pfq = vsi->rxq_map[i];
u32 qrx_context_offset;
u32 regval;
qrx_context_offset =
QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
regval = rd32(hw, qrx_context_offset);
regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
regval |= l2tsel_bit;
wr32(hw, qrx_context_offset, regval);
}
}

View File

@@ -11,6 +11,13 @@
#define ICE_VSI_FLAG_INIT BIT(0)
#define ICE_VSI_FLAG_NO_INIT 0
#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
#define ICE_L2TSEL_BIT_OFFSET 23
enum ice_l2tsel {
ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
};
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
bool ice_pf_state_is_nominal(struct ice_pf *pf);
@@ -116,4 +123,5 @@ void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel);
#endif /* !_ICE_LIB_H_ */

View File

@@ -933,7 +933,6 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
bool needs_rebuild = false;
struct ice_vsi *vsi;
struct ice_vf *vf;
int id;
if (!ice_get_num_vfs(pf))
return -ENOENT;
@@ -952,17 +951,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (msix_vec_count < ICE_MIN_INTR_PER_VF)
return -EINVAL;
/* Transition of PCI VF function number to function_id */
for (id = 0; id < pci_num_vf(pdev); id++) {
if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id))
break;
}
if (id == pci_num_vf(pdev))
return -ENOENT;
vf = ice_get_vf_by_id(pf, id);
vf = ice_get_vf_by_dev(pf, vf_dev);
if (!vf)
return -ENOENT;
@@ -972,6 +961,12 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
return -ENOENT;
}
/* No need to rebuild if we're setting to the same value */
if (msix_vec_count == vf->num_msix) {
ice_put_vf(vf);
return 0;
}
prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs;

View File

@@ -64,6 +64,7 @@ bool
ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev);
int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count);
int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id);
#else /* CONFIG_PCI_IOV */
static inline void ice_process_vflr_event(struct ice_pf *pf) { }
static inline void ice_free_vfs(struct ice_pf *pf) { }
@@ -164,5 +165,11 @@ ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
{
return -EOPNOTSUPP;
}
static inline int ice_vf_vsi_dis_single_txq(struct ice_vf *vf,
struct ice_vsi *vsi, u16 q_id)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_SRIOV_H_ */

View File

@@ -1022,6 +1022,9 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
vf->num_msix = vfs->num_msix_per;
vf->num_vf_qs = vfs->num_qps_per;
/* set default RSS hash configuration */
vf->rss_hashcfg = ICE_DEFAULT_RSS_HASHCFG;
/* ctrl_vsi_idx will be set to a valid value only when iAVF
* creates its first fdir rule.
*/

View File

@@ -106,8 +106,7 @@ struct ice_vf {
u16 ctrl_vsi_idx;
struct ice_vf_fdir fdir;
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
/* first vector index of this VF in the PF space */
int first_vector_idx;
u64 rss_hashcfg; /* RSS hash configuration */
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
@@ -126,10 +125,14 @@ struct ice_vf {
u8 link_up:1; /* only valid if VF link is forced */
u8 lldp_tx_ena:1;
u16 num_msix; /* num of MSI-X configured on this VF */
u32 ptp_caps;
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
/* first vector index of this VF in the PF space */
int first_vector_idx;
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
unsigned long vf_caps; /* VF's adv. capabilities */
@@ -154,7 +157,6 @@ struct ice_vf {
u16 lldp_recipe_id;
u16 lldp_rule_id;
u16 num_msix; /* num of MSI-X configured on this VF */
struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF];
};
@@ -237,6 +239,18 @@ static inline bool ice_vf_is_lldp_ena(struct ice_vf *vf)
#ifdef CONFIG_PCI_IOV
struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf,
struct pci_dev *vf_dev)
{
int vf_id = pci_iov_vf_id(vf_dev);
if (vf_id < 0)
return NULL;
return ice_get_vf_by_id(pf, pci_iov_vf_id(vf_dev));
}
void ice_put_vf(struct ice_vf *vf);
bool ice_has_vfs(struct ice_pf *pf);
u16 ice_get_num_vfs(struct ice_pf *pf);
@@ -263,6 +277,12 @@ static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
return NULL;
}
static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf,
struct pci_dev *vf_dev)
{
return NULL;
}
static inline void ice_put_vf(struct ice_vf *vf)
{
}

View File

@@ -1427,7 +1427,7 @@ static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
* @vsi: VSI of the VF to configure
* @q_idx: VF queue index used to determine the queue in the PF's space
*/
static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
{
struct ice_hw *hw = &vsi->back->hw;
u32 pfq = vsi->txq_map[q_idx];
@@ -1450,7 +1450,7 @@ static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
* @vsi: VSI of the VF to configure
* @q_idx: VF queue index used to determine the queue in the PF's space
*/
static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
{
struct ice_hw *hw = &vsi->back->hw;
u32 pfq = vsi->rxq_map[q_idx];
@@ -1566,8 +1566,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
* disabled then clear q_id bit in the enabled queues bitmap and return
* success. Otherwise return error.
*/
static int
ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
{
struct ice_txq_meta txq_meta = { 0 };
struct ice_tx_ring *ring;
@@ -2621,7 +2620,7 @@ static bool ice_vf_vlan_offload_ena(u32 caps)
* ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
* @vf: VF used to determine if VLAN promiscuous config is allowed
*/
static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
{
if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
@@ -2640,8 +2639,8 @@ static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
* This function should only be called if VLAN promiscuous mode is allowed,
* which can be determined via ice_is_vlan_promisc_allowed().
*/
static int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
struct ice_vlan *vlan)
int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
struct ice_vlan *vlan)
{
u8 promisc_m = 0;
int status;
@@ -3094,6 +3093,10 @@ static int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
v_ret = ice_err_to_virt_err(status);
}
/* save the requested VF configuration */
if (!v_ret)
vf->rss_hashcfg = vrh->hashcfg;
/* send the response to the VF */
err:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
@@ -3856,48 +3859,6 @@ ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
return 0;
}
#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
#define ICE_L2TSEL_BIT_OFFSET 23
enum ice_l2tsel {
ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
};
/**
* ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
* @vsi: VSI used to update l2tsel on
* @l2tsel: l2tsel setting requested
*
* Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
* This will modify which descriptor field the first offloaded VLAN will be
* stripped into.
*/
static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
{
struct ice_hw *hw = &vsi->back->hw;
u32 l2tsel_bit;
int i;
if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
l2tsel_bit = 0;
else
l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
for (i = 0; i < vsi->alloc_rxq; i++) {
u16 pfq = vsi->rxq_map[i];
u32 qrx_context_offset;
u32 regval;
qrx_context_offset =
QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
regval = rd32(hw, qrx_context_offset);
regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
regval |= l2tsel_bit;
wr32(hw, qrx_context_offset, regval);
}
}
/**
* ice_vc_ena_vlan_stripping_v2_msg
* @vf: VF the message was received from

View File

@@ -92,12 +92,31 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
struct ice_mbx_data *mbxdata);
void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx);
void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx);
int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
struct ice_vlan *vlan);
bool ice_is_vlan_promisc_allowed(struct ice_vf *vf);
#else /* CONFIG_PCI_IOV */
static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { }
static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { }
static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
static inline void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) { }
static inline void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) { }
static inline int ice_vf_ena_vlan_promisc(struct ice_vf *vf,
struct ice_vsi *vsi,
struct ice_vlan *vlan)
{
return -EOPNOTSUPP;
}
static inline bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
{
return false;
}
static inline int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,