Merge branch '200GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2025-07-18 (idpf, ice, igc, igbvf, ixgbevf)

For idpf:
Ahmed and Sudheer add support for flow steering via ntuple filters.
Current support is for IPv4 and TCP/UDP only.

Milena adds support for cross timestamping.

Ahmed preserves coalesce settings across resets.

For ice:
Alex adds reporting of 40GbE speed in devlink port split.

Dawid adds support for E835 devices.

Jesse refactors profile ptype processing for cleaner, more readable,
code.

Dave adds a couple of helper functions for LAG to reduce code
duplication.

For igc:
Siang adds support to configure "Default Queue" during runtime using
ethtool's Network Flow Classification (NFC) wildcard rule approach.

For igbvf:
Yuto Ohnuki removes unused fields from igbvf_adapter.

For ixgbevf:
Yuto Ohnuki removes unused fields from ixgbevf_adapter.

* '200GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  ixgbevf: remove unused fields from struct ixgbevf_adapter
  igbvf: remove unused fields from struct igbvf_adapter
  igc: Add wildcard rule support to ethtool NFC using Default Queue
  igc: Relocate RSS field definitions to igc_defines.h
  ice: breakout common LAG code into helpers
  ice: convert ice_add_prof() to bitmap
  ice: add E835 device IDs
  ice: add 40G speed to Admin Command GET PORT OPTION
  idpf: preserve coalescing settings across resets
  idpf: add cross timestamping
  idpf: add flow steering support
  virtchnl2: add flow steering support
  virtchnl2: rename enum virtchnl2_cap_rss
====================

Link: https://patch.msgid.link/20250718185118.2042772-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-07-21 17:18:33 -07:00
31 changed files with 1122 additions and 195 deletions

View File

@@ -30,6 +30,8 @@ static const char *ice_devlink_port_opt_speed_str(u8 speed)
return "10";
case ICE_AQC_PORT_OPT_MAX_LANE_25G:
return "25";
case ICE_AQC_PORT_OPT_MAX_LANE_40G:
return "40";
case ICE_AQC_PORT_OPT_MAX_LANE_50G:
return "50";
case ICE_AQC_PORT_OPT_MAX_LANE_100G:

View File

@@ -1684,6 +1684,7 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
#define ICE_AQC_PORT_OPT_MAX_LANE_40G 9
u8 global_scid[2];
u8 phy_scid[2];

View File

@@ -171,6 +171,15 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E830_XXV_QSFP:
case ICE_DEV_ID_E830C_SFP:
case ICE_DEV_ID_E830_XXV_SFP:
case ICE_DEV_ID_E835CC_BACKPLANE:
case ICE_DEV_ID_E835CC_QSFP56:
case ICE_DEV_ID_E835CC_SFP:
case ICE_DEV_ID_E835C_BACKPLANE:
case ICE_DEV_ID_E835C_QSFP:
case ICE_DEV_ID_E835C_SFP:
case ICE_DEV_ID_E835_L_BACKPLANE:
case ICE_DEV_ID_E835_L_QSFP:
case ICE_DEV_ID_E835_L_SFP:
hw->mac_type = ICE_MAC_E830;
break;
default:
@@ -4307,7 +4316,7 @@ int ice_get_phy_lane_number(struct ice_hw *hw)
speed = options[active_idx].max_lane_speed;
/* If we don't get speed for this lane, it's unoccupied */
if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G)
if (speed > ICE_AQC_PORT_OPT_MAX_LANE_40G)
continue;
if (hw->pf_id == lport) {

View File

@@ -6,6 +6,24 @@
/* Device IDs */
#define ICE_DEV_ID_E822_SI_DFLT 0x1888
/* Intel(R) Ethernet Controller E835-CC for backplane */
#define ICE_DEV_ID_E835CC_BACKPLANE 0x1248
/* Intel(R) Ethernet Controller E835-CC for QSFP */
#define ICE_DEV_ID_E835CC_QSFP56 0x1249
/* Intel(R) Ethernet Controller E835-CC for SFP */
#define ICE_DEV_ID_E835CC_SFP 0x124A
/* Intel(R) Ethernet Controller E835-C for backplane */
#define ICE_DEV_ID_E835C_BACKPLANE 0x1261
/* Intel(R) Ethernet Controller E835-C for QSFP */
#define ICE_DEV_ID_E835C_QSFP 0x1262
/* Intel(R) Ethernet Controller E835-C for SFP */
#define ICE_DEV_ID_E835C_SFP 0x1263
/* Intel(R) Ethernet Controller E835-L for backplane */
#define ICE_DEV_ID_E835_L_BACKPLANE 0x1265
/* Intel(R) Ethernet Controller E835-L for QSFP */
#define ICE_DEV_ID_E835_L_QSFP 0x1266
/* Intel(R) Ethernet Controller E835-L for SFP */
#define ICE_DEV_ID_E835_L_SFP 0x1267
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */

View File

@@ -667,7 +667,8 @@ static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
port_topology->serdes_lane_count = 4;
else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G)
else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G ||
max_speed == ICE_AQC_PORT_OPT_MAX_LANE_40G)
port_topology->serdes_lane_count = 2;
else
port_topology->serdes_lane_count = 1;

View File

@@ -3043,16 +3043,16 @@ ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id)
* the ID value used here.
*/
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap)
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
unsigned long *ptypes, const struct ice_ptype_attributes *attr,
u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool symm,
bool fd_swap)
{
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
u8 byte = 0;
u8 prof_id;
int status;
u8 prof_id;
u16 ptype;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -3102,57 +3102,35 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
prof->context = 0;
/* build list of ptgs */
while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
u8 bit;
for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) {
u8 ptg;
if (!ptypes[byte]) {
bytes--;
byte++;
/* The package should place all ptypes in a non-zero
* PTG, so the following call should never fail.
*/
if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
continue;
}
/* Examine 8 bits per byte */
for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
BITS_PER_BYTE) {
u16 ptype;
u8 ptg;
/* If PTG is already added, skip and continue */
if (test_bit(ptg, ptgs_used))
continue;
ptype = byte * BITS_PER_BYTE + bit;
set_bit(ptg, ptgs_used);
/* Check to see there are any attributes for this ptype, and
* add them if found.
*/
status = ice_add_prof_attrib(prof, ptg, ptype, attr, attr_cnt);
if (status == -ENOSPC)
break;
if (status) {
/* This is simple a ptype/PTG with no attribute */
prof->ptg[prof->ptg_cnt] = ptg;
prof->attr[prof->ptg_cnt].flags = 0;
prof->attr[prof->ptg_cnt].mask = 0;
/* The package should place all ptypes in a non-zero
* PTG, so the following call should never fail.
*/
if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
continue;
/* If PTG is already added, skip and continue */
if (test_bit(ptg, ptgs_used))
continue;
__set_bit(ptg, ptgs_used);
/* Check to see there are any attributes for
* this PTYPE, and add them if found.
*/
status = ice_add_prof_attrib(prof, ptg, ptype,
attr, attr_cnt);
if (status == -ENOSPC)
if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
break;
if (status) {
/* This is simple a PTYPE/PTG with no
* attribute
*/
prof->ptg[prof->ptg_cnt] = ptg;
prof->attr[prof->ptg_cnt].flags = 0;
prof->attr[prof->ptg_cnt].mask = 0;
if (++prof->ptg_cnt >=
ICE_MAX_PTG_PER_PROFILE)
break;
}
}
bytes--;
byte++;
}
list_add(&prof->list, &hw->blk[blk].es.prof_map);

View File

@@ -39,9 +39,10 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
/* XLT2/VSI group functions */
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap);
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
unsigned long *ptypes, const struct ice_ptype_attributes *attr,
u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool symm,
bool fd_swap);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int

View File

@@ -1421,7 +1421,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
}
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
status = ice_add_prof(hw, blk, prof_id, params->ptypes,
params->attr, params->attr_cnt, params->es,
params->mask, symm, true);
if (status) {
@@ -1617,7 +1617,7 @@ ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
break;
}
status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
status = ice_add_prof(hw, blk, id, prof->ptypes,
params->attr, params->attr_cnt,
params->es, params->mask, false, false);
if (status)

View File

@@ -822,6 +822,48 @@ ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add)
kfree(s_rule);
}
/**
* ice_lag_prepare_vf_reset - helper to adjust vf lag for reset
* @lag: lag struct for interface that owns VF
*
* Context: must be called with the lag_mutex lock held.
*
* Return: active lport value or ICE_LAG_INVALID_PORT if nothing moved.
*/
u8 ice_lag_prepare_vf_reset(struct ice_lag *lag)
{
u8 pri_prt, act_prt;
if (lag && lag->bonded && lag->primary && lag->upper_netdev) {
pri_prt = lag->pf->hw.port_info->lport;
act_prt = lag->active_port;
if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT) {
ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
return act_prt;
}
}
return ICE_LAG_INVALID_PORT;
}
/**
* ice_lag_complete_vf_reset - helper for lag after reset
* @lag: lag struct for primary interface
* @act_prt: which port should be active for lag
*
* Context: must be called while holding the lag_mutex.
*/
void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt)
{
u8 pri_prt;
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT) {
pri_prt = lag->pf->hw.port_info->lport;
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
}
}
/**
* ice_lag_info_event - handle NETDEV_BONDING_INFO event
* @lag: LAG info struct

View File

@@ -70,4 +70,6 @@ void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
bool ice_lag_is_switchdev_running(struct ice_pf *pf);
void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
u8 ice_lag_prepare_vf_reset(struct ice_lag *lag);
void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt);
#endif /* _ICE_LAG_H_ */

View File

@@ -5897,6 +5897,15 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_BACKPLANE), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_QSFP56), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_SFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_BACKPLANE), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_QSFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_SFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_BACKPLANE), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_QSFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_SFP), },
/* required last entry */
{}
};

View File

@@ -859,16 +859,13 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags)
{
struct ice_pf *pf = vf->pf;
struct ice_lag *lag;
struct ice_vsi *vsi;
u8 act_prt, pri_prt;
struct device *dev;
int err = 0;
u8 act_prt;
bool rsd;
dev = ice_pf_to_dev(pf);
act_prt = ICE_LAG_INVALID_PORT;
pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@@ -884,16 +881,8 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
else
lockdep_assert_held(&vf->cfg_lock);
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
if (lag && lag->bonded && lag->primary) {
act_prt = lag->active_port;
if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
lag->upper_netdev)
ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
else
act_prt = ICE_LAG_INVALID_PORT;
}
act_prt = ice_lag_prepare_vf_reset(pf->lag);
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
@@ -979,9 +968,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_reset_vf_mbx_cnt(vf);
out_unlock:
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
ice_lag_complete_vf_reset(pf->lag, act_prt);
mutex_unlock(&pf->lag_mutex);
if (flags & ICE_VF_RESET_LOCK)

View File

@@ -1996,24 +1996,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
struct ice_lag *lag;
struct ice_vsi *vsi;
u8 act_prt, pri_prt;
int i = -1, q_idx;
bool ena_ts;
u8 act_prt;
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
act_prt = ICE_LAG_INVALID_PORT;
pri_prt = pf->hw.port_info->lport;
if (lag && lag->bonded && lag->primary) {
act_prt = lag->active_port;
if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
lag->upper_netdev)
ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
else
act_prt = ICE_LAG_INVALID_PORT;
}
act_prt = ice_lag_prepare_vf_reset(pf->lag);
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
goto error_param;
@@ -2141,9 +2130,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
}
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
ice_lag_complete_vf_reset(pf->lag, act_prt);
mutex_unlock(&pf->lag_mutex);
/* send the response to the VF */
@@ -2160,9 +2147,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vf->vf_id, i);
}
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
ice_lag_complete_vf_reset(pf->lag, act_prt);
mutex_unlock(&pf->lag_mutex);
ice_lag_move_new_vf_nodes(vf);

View File

@@ -269,6 +269,12 @@ struct idpf_port_stats {
struct virtchnl2_vport_stats vport_stats;
};
struct idpf_fsteer_fltr {
struct list_head list;
u32 loc;
u32 q_index;
};
/**
* struct idpf_vport - Handle for netdevices and queue resources
* @num_txq: Number of allocated TX queues
@@ -399,10 +405,28 @@ struct idpf_rss_data {
u32 *cached_lut;
};
/**
* struct idpf_q_coalesce - User defined coalescing configuration values for
* a single queue.
* @tx_intr_mode: Dynamic TX ITR or not
* @rx_intr_mode: Dynamic RX ITR or not
* @tx_coalesce_usecs: TX interrupt throttling rate
* @rx_coalesce_usecs: RX interrupt throttling rate
*
* Used to restore user coalescing configuration after a reset.
*/
struct idpf_q_coalesce {
u32 tx_intr_mode;
u32 rx_intr_mode;
u32 tx_coalesce_usecs;
u32 rx_coalesce_usecs;
};
/**
* struct idpf_vport_user_config_data - User defined configuration values for
* each vport.
* @rss_data: See struct idpf_rss_data
* @q_coalesce: Array of per queue coalescing data
* @num_req_tx_qs: Number of user requested TX queues through ethtool
* @num_req_rx_qs: Number of user requested RX queues through ethtool
* @num_req_txq_desc: Number of user requested TX queue descriptors through
@@ -411,17 +435,22 @@ struct idpf_rss_data {
* ethtool
* @user_flags: User toggled config flags
* @mac_filter_list: List of MAC filters
* @num_fsteer_fltrs: number of flow steering filters
* @flow_steer_list: list of flow steering filters
*
* Used to restore configuration after a reset as the vport will get wiped.
*/
struct idpf_vport_user_config_data {
struct idpf_rss_data rss_data;
struct idpf_q_coalesce *q_coalesce;
u16 num_req_tx_qs;
u16 num_req_rx_qs;
u32 num_req_txq_desc;
u32 num_req_rxq_desc;
DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS);
struct list_head mac_filter_list;
u32 num_fsteer_fltrs;
struct list_head flow_steer_list;
};
/**
@@ -667,16 +696,16 @@ static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter)
}
#define IDPF_CAP_RSS (\
VIRTCHNL2_CAP_RSS_IPV4_TCP |\
VIRTCHNL2_CAP_RSS_IPV4_TCP |\
VIRTCHNL2_CAP_RSS_IPV4_UDP |\
VIRTCHNL2_CAP_RSS_IPV4_SCTP |\
VIRTCHNL2_CAP_RSS_IPV4_OTHER |\
VIRTCHNL2_CAP_RSS_IPV6_TCP |\
VIRTCHNL2_CAP_RSS_IPV6_TCP |\
VIRTCHNL2_CAP_RSS_IPV6_UDP |\
VIRTCHNL2_CAP_RSS_IPV6_SCTP |\
VIRTCHNL2_CAP_RSS_IPV6_OTHER)
VIRTCHNL2_FLOW_IPV4_TCP |\
VIRTCHNL2_FLOW_IPV4_TCP |\
VIRTCHNL2_FLOW_IPV4_UDP |\
VIRTCHNL2_FLOW_IPV4_SCTP |\
VIRTCHNL2_FLOW_IPV4_OTHER |\
VIRTCHNL2_FLOW_IPV6_TCP |\
VIRTCHNL2_FLOW_IPV6_TCP |\
VIRTCHNL2_FLOW_IPV6_UDP |\
VIRTCHNL2_FLOW_IPV6_SCTP |\
VIRTCHNL2_FLOW_IPV6_OTHER)
#define IDPF_CAP_RSC (\
VIRTCHNL2_CAP_RSC_IPV4_TCP |\
@@ -960,4 +989,7 @@ void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info);
void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info,
enum iidc_rdma_event_type event_type);
int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
struct virtchnl2_flow_rule_add_del *rule,
enum virtchnl2_op opcode);
#endif /* !_IDPF_H_ */

View File

@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_ptp.h"
#include "idpf_virtchnl.h"
/**
* idpf_get_rxnfc - command to get RX flow classification rules
@@ -13,26 +14,312 @@
* Returns Success if the command is supported.
*/
static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
u32 __always_unused *rule_locs)
u32 *rule_locs)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_user_config_data *user_config;
struct idpf_fsteer_fltr *f;
struct idpf_vport *vport;
unsigned int cnt = 0;
int err = 0;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = vport->num_rxq;
idpf_vport_ctrl_unlock(netdev);
return 0;
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = user_config->num_fsteer_fltrs;
cmd->data = idpf_fsteer_max_rules(vport);
break;
case ETHTOOL_GRXCLSRULE:
err = -EINVAL;
list_for_each_entry(f, &user_config->flow_steer_list, list)
if (f->loc == cmd->fs.location) {
cmd->fs.ring_cookie = f->q_index;
err = 0;
break;
}
break;
case ETHTOOL_GRXCLSRLALL:
cmd->data = idpf_fsteer_max_rules(vport);
list_for_each_entry(f, &user_config->flow_steer_list, list) {
if (cnt == cmd->rule_cnt) {
err = -EMSGSIZE;
break;
}
rule_locs[cnt] = f->loc;
cnt++;
}
if (!err)
cmd->rule_cnt = user_config->num_fsteer_fltrs;
break;
default:
break;
}
idpf_vport_ctrl_unlock(netdev);
return -EOPNOTSUPP;
return err;
}
static void idpf_fsteer_fill_ipv4(struct virtchnl2_proto_hdrs *hdrs,
struct ethtool_rx_flow_spec *fsp)
{
struct iphdr *iph;
hdrs->proto_hdr[0].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_IPV4);
iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_spec;
iph->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
iph->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_mask;
iph->saddr = fsp->m_u.tcp_ip4_spec.ip4src;
iph->daddr = fsp->m_u.tcp_ip4_spec.ip4dst;
}
static void idpf_fsteer_fill_udp(struct virtchnl2_proto_hdrs *hdrs,
struct ethtool_rx_flow_spec *fsp,
bool v4)
{
struct udphdr *udph, *udpm;
hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_UDP);
udph = (struct udphdr *)hdrs->proto_hdr[1].buffer_spec;
udpm = (struct udphdr *)hdrs->proto_hdr[1].buffer_mask;
if (v4) {
udph->source = fsp->h_u.udp_ip4_spec.psrc;
udph->dest = fsp->h_u.udp_ip4_spec.pdst;
udpm->source = fsp->m_u.udp_ip4_spec.psrc;
udpm->dest = fsp->m_u.udp_ip4_spec.pdst;
} else {
udph->source = fsp->h_u.udp_ip6_spec.psrc;
udph->dest = fsp->h_u.udp_ip6_spec.pdst;
udpm->source = fsp->m_u.udp_ip6_spec.psrc;
udpm->dest = fsp->m_u.udp_ip6_spec.pdst;
}
}
static void idpf_fsteer_fill_tcp(struct virtchnl2_proto_hdrs *hdrs,
struct ethtool_rx_flow_spec *fsp,
bool v4)
{
struct tcphdr *tcph, *tcpm;
hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_TCP);
tcph = (struct tcphdr *)hdrs->proto_hdr[1].buffer_spec;
tcpm = (struct tcphdr *)hdrs->proto_hdr[1].buffer_mask;
if (v4) {
tcph->source = fsp->h_u.tcp_ip4_spec.psrc;
tcph->dest = fsp->h_u.tcp_ip4_spec.pdst;
tcpm->source = fsp->m_u.tcp_ip4_spec.psrc;
tcpm->dest = fsp->m_u.tcp_ip4_spec.pdst;
} else {
tcph->source = fsp->h_u.tcp_ip6_spec.psrc;
tcph->dest = fsp->h_u.tcp_ip6_spec.pdst;
tcpm->source = fsp->m_u.tcp_ip6_spec.psrc;
tcpm->dest = fsp->m_u.tcp_ip6_spec.pdst;
}
}
/**
* idpf_add_flow_steer - add a Flow Steering filter
* @netdev: network interface device structure
* @cmd: command to add Flow Steering filter
*
* Return: 0 on success and negative values for failure
*/
static int idpf_add_flow_steer(struct net_device *netdev,
struct ethtool_rxnfc *cmd)
{
struct idpf_fsteer_fltr *fltr, *parent = NULL, *f;
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_user_config_data *user_config;
struct ethtool_rx_flow_spec *fsp = &cmd->fs;
struct virtchnl2_flow_rule_add_del *rule;
struct idpf_vport_config *vport_config;
struct virtchnl2_rule_action_set *acts;
struct virtchnl2_flow_rule_info *info;
struct virtchnl2_proto_hdrs *hdrs;
struct idpf_vport *vport;
u32 flow_type, q_index;
u16 num_rxq;
int err;
vport = idpf_netdev_to_vport(netdev);
vport_config = vport->adapter->vport_config[np->vport_idx];
user_config = &vport_config->user_config;
num_rxq = user_config->num_req_rx_qs;
flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
if (flow_type != fsp->flow_type)
return -EINVAL;
if (!idpf_sideband_action_ena(vport, fsp) ||
!idpf_sideband_flow_type_ena(vport, flow_type))
return -EOPNOTSUPP;
if (user_config->num_fsteer_fltrs > idpf_fsteer_max_rules(vport))
return -ENOSPC;
q_index = fsp->ring_cookie;
if (q_index >= num_rxq)
return -EINVAL;
rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
if (!rule)
return -ENOMEM;
rule->vport_id = cpu_to_le32(vport->vport_id);
rule->count = cpu_to_le32(1);
info = &rule->rule_info[0];
info->rule_id = cpu_to_le32(fsp->location);
hdrs = &info->rule_cfg.proto_hdrs;
hdrs->tunnel_level = 0;
hdrs->count = cpu_to_le32(2);
acts = &info->rule_cfg.action_set;
acts->count = cpu_to_le32(1);
acts->actions[0].action_type = cpu_to_le32(VIRTCHNL2_ACTION_QUEUE);
acts->actions[0].act_conf.q_id = cpu_to_le32(q_index);
switch (flow_type) {
case UDP_V4_FLOW:
idpf_fsteer_fill_ipv4(hdrs, fsp);
idpf_fsteer_fill_udp(hdrs, fsp, true);
break;
case TCP_V4_FLOW:
idpf_fsteer_fill_ipv4(hdrs, fsp);
idpf_fsteer_fill_tcp(hdrs, fsp, true);
break;
default:
err = -EINVAL;
goto out;
}
err = idpf_add_del_fsteer_filters(vport->adapter, rule,
VIRTCHNL2_OP_ADD_FLOW_RULE);
if (err)
goto out;
if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
err = -EIO;
goto out;
}
fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
if (!fltr) {
err = -ENOMEM;
goto out;
}
fltr->loc = fsp->location;
fltr->q_index = q_index;
list_for_each_entry(f, &user_config->flow_steer_list, list) {
if (f->loc >= fltr->loc)
break;
parent = f;
}
parent ? list_add(&fltr->list, &parent->list) :
list_add(&fltr->list, &user_config->flow_steer_list);
user_config->num_fsteer_fltrs++;
out:
kfree(rule);
return err;
}
/**
* idpf_del_flow_steer - delete a Flow Steering filter
* @netdev: network interface device structure
* @cmd: command to add Flow Steering filter
*
* Return: 0 on success and negative values for failure
*/
static int idpf_del_flow_steer(struct net_device *netdev,
struct ethtool_rxnfc *cmd)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_user_config_data *user_config;
struct ethtool_rx_flow_spec *fsp = &cmd->fs;
struct virtchnl2_flow_rule_add_del *rule;
struct idpf_vport_config *vport_config;
struct virtchnl2_flow_rule_info *info;
struct idpf_fsteer_fltr *f, *iter;
struct idpf_vport *vport;
int err;
vport = idpf_netdev_to_vport(netdev);
vport_config = vport->adapter->vport_config[np->vport_idx];
user_config = &vport_config->user_config;
if (!idpf_sideband_action_ena(vport, fsp))
return -EOPNOTSUPP;
rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
if (!rule)
return -ENOMEM;
rule->vport_id = cpu_to_le32(vport->vport_id);
rule->count = cpu_to_le32(1);
info = &rule->rule_info[0];
info->rule_id = cpu_to_le32(fsp->location);
err = idpf_add_del_fsteer_filters(vport->adapter, rule,
VIRTCHNL2_OP_DEL_FLOW_RULE);
if (err)
goto out;
if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
err = -EIO;
goto out;
}
list_for_each_entry_safe(f, iter,
&user_config->flow_steer_list, list) {
if (f->loc == fsp->location) {
list_del(&f->list);
kfree(f);
user_config->num_fsteer_fltrs--;
goto out;
}
}
err = -EINVAL;
out:
kfree(rule);
return err;
}
static int idpf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
int ret = -EOPNOTSUPP;
idpf_vport_ctrl_lock(netdev);
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
ret = idpf_add_flow_steer(netdev, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
ret = idpf_del_flow_steer(netdev, cmd);
break;
default:
break;
}
idpf_vport_ctrl_unlock(netdev);
return ret;
}
/**
@@ -1090,12 +1377,14 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* __idpf_set_q_coalesce - set ITR values for specific queue
* @ec: ethtool structure from user to update ITR settings
* @q_coal: per queue coalesce settings
* @qv: queue vector for which itr values has to be set
* @is_rxq: is queue type rx
*
* Returns 0 on success, negative otherwise.
*/
static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
struct idpf_q_coalesce *q_coal,
struct idpf_q_vector *qv, bool is_rxq)
{
u32 use_adaptive_coalesce, coalesce_usecs;
@@ -1139,20 +1428,25 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
if (is_rxq) {
qv->rx_itr_value = coalesce_usecs;
q_coal->rx_coalesce_usecs = coalesce_usecs;
if (use_adaptive_coalesce) {
qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
q_coal->rx_intr_mode = IDPF_ITR_DYNAMIC;
} else {
qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
idpf_vport_intr_write_itr(qv, qv->rx_itr_value,
false);
q_coal->rx_intr_mode = !IDPF_ITR_DYNAMIC;
idpf_vport_intr_write_itr(qv, coalesce_usecs, false);
}
} else {
qv->tx_itr_value = coalesce_usecs;
q_coal->tx_coalesce_usecs = coalesce_usecs;
if (use_adaptive_coalesce) {
qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
q_coal->tx_intr_mode = IDPF_ITR_DYNAMIC;
} else {
qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true);
q_coal->tx_intr_mode = !IDPF_ITR_DYNAMIC;
idpf_vport_intr_write_itr(qv, coalesce_usecs, true);
}
}
@@ -1165,6 +1459,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
/**
* idpf_set_q_coalesce - set ITR values for specific queue
* @vport: vport associated to the queue that need updating
* @q_coal: per queue coalesce settings
* @ec: coalesce settings to program the device with
* @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
* @is_rxq: is queue type rx
@@ -1172,6 +1467,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
* Return 0 on success, and negative on failure
*/
static int idpf_set_q_coalesce(const struct idpf_vport *vport,
struct idpf_q_coalesce *q_coal,
const struct ethtool_coalesce *ec,
int q_num, bool is_rxq)
{
@@ -1180,7 +1476,7 @@ static int idpf_set_q_coalesce(const struct idpf_vport *vport,
qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
idpf_find_txq_vec(vport, q_num);
if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq))
if (qv && __idpf_set_q_coalesce(ec, q_coal, qv, is_rxq))
return -EINVAL;
return 0;
@@ -1201,9 +1497,13 @@ static int idpf_set_coalesce(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_user_config_data *user_config;
struct idpf_q_coalesce *q_coal;
struct idpf_vport *vport;
int i, err = 0;
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
@@ -1211,13 +1511,15 @@ static int idpf_set_coalesce(struct net_device *netdev,
goto unlock_mutex;
for (i = 0; i < vport->num_txq; i++) {
err = idpf_set_q_coalesce(vport, ec, i, false);
q_coal = &user_config->q_coalesce[i];
err = idpf_set_q_coalesce(vport, q_coal, ec, i, false);
if (err)
goto unlock_mutex;
}
for (i = 0; i < vport->num_rxq; i++) {
err = idpf_set_q_coalesce(vport, ec, i, true);
q_coal = &user_config->q_coalesce[i];
err = idpf_set_q_coalesce(vport, q_coal, ec, i, true);
if (err)
goto unlock_mutex;
}
@@ -1239,20 +1541,25 @@ static int idpf_set_coalesce(struct net_device *netdev,
static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
struct ethtool_coalesce *ec)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_user_config_data *user_config;
struct idpf_q_coalesce *q_coal;
struct idpf_vport *vport;
int err;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
q_coal = &user_config->q_coalesce[q_num];
err = idpf_set_q_coalesce(vport, ec, q_num, false);
err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, false);
if (err) {
idpf_vport_ctrl_unlock(netdev);
return err;
}
err = idpf_set_q_coalesce(vport, ec, q_num, true);
err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, true);
idpf_vport_ctrl_unlock(netdev);
@@ -1394,6 +1701,7 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.get_sset_count = idpf_get_sset_count,
.get_channels = idpf_get_channels,
.get_rxnfc = idpf_get_rxnfc,
.set_rxnfc = idpf_set_rxnfc,
.get_rxfh_key_size = idpf_get_rxfh_key_size,
.get_rxfh_indir_size = idpf_get_rxfh_indir_size,
.get_rxfh = idpf_get_rxfh,

View File

@@ -804,6 +804,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
dflt_features |= NETIF_F_RXHASH;
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_FLOW_STEER) &&
idpf_vport_is_cap_ena(vport, VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER))
dflt_features |= NETIF_F_NTUPLE;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
csum_offloads |= NETIF_F_IP_CSUM;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
@@ -1130,8 +1134,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
if (!vport)
return vport;
num_max_q = max(max_q->max_txq, max_q->max_rxq);
if (!adapter->vport_config[idx]) {
struct idpf_vport_config *vport_config;
struct idpf_q_coalesce *q_coal;
vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
if (!vport_config) {
@@ -1140,6 +1146,21 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
return NULL;
}
q_coal = kcalloc(num_max_q, sizeof(*q_coal), GFP_KERNEL);
if (!q_coal) {
kfree(vport_config);
kfree(vport);
return NULL;
}
for (int i = 0; i < num_max_q; i++) {
q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC;
q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF;
q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC;
q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF;
}
vport_config->user_config.q_coalesce = q_coal;
adapter->vport_config[idx] = vport_config;
}
@@ -1149,7 +1170,6 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
vport->default_vport = adapter->num_alloc_vports <
idpf_get_default_vports(adapter);
num_max_q = max(max_q->max_txq, max_q->max_rxq);
vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
if (!vport->q_vector_idxs)
goto free_vport;
@@ -1532,6 +1552,7 @@ void idpf_init_task(struct work_struct *work)
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
err = idpf_check_supported_desc_ids(vport);
if (err) {

View File

@@ -62,6 +62,7 @@ static void idpf_remove(struct pci_dev *pdev)
destroy_workqueue(adapter->vc_event_wq);
for (i = 0; i < adapter->max_vports; i++) {
kfree(adapter->vport_config[i]->user_config.q_coalesce);
kfree(adapter->vport_config[i]);
adapter->vport_config[i] = NULL;
}

View File

@@ -42,6 +42,13 @@ void idpf_ptp_get_features_access(const struct idpf_adapter *adapter)
direct,
mailbox);
/* Get the cross timestamp */
direct = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME;
mailbox = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB;
ptp->get_cross_tstamp_access = idpf_ptp_get_access(adapter,
direct,
mailbox);
/* Set the device clock time */
direct = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
mailbox = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
@@ -171,6 +178,127 @@ static int idpf_ptp_read_src_clk_reg(struct idpf_adapter *adapter, u64 *src_clk,
return 0;
}
#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER) || IS_ENABLED(CONFIG_X86)
/**
* idpf_ptp_get_sync_device_time_direct - Get the cross time stamp values
* directly
* @adapter: Driver specific private structure
* @dev_time: 64bit main timer value
* @sys_time: 64bit system time value
*/
static void idpf_ptp_get_sync_device_time_direct(struct idpf_adapter *adapter,
u64 *dev_time, u64 *sys_time)
{
u32 dev_time_lo, dev_time_hi, sys_time_lo, sys_time_hi;
struct idpf_ptp *ptp = adapter->ptp;
spin_lock(&ptp->read_dev_clk_lock);
idpf_ptp_enable_shtime(adapter);
dev_time_lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
dev_time_hi = readl(ptp->dev_clk_regs.dev_clk_ns_h);
sys_time_lo = readl(ptp->dev_clk_regs.sys_time_ns_l);
sys_time_hi = readl(ptp->dev_clk_regs.sys_time_ns_h);
spin_unlock(&ptp->read_dev_clk_lock);
*dev_time = (u64)dev_time_hi << 32 | dev_time_lo;
*sys_time = (u64)sys_time_hi << 32 | sys_time_lo;
}
/**
* idpf_ptp_get_sync_device_time_mailbox - Get the cross time stamp values
* through mailbox
* @adapter: Driver specific private structure
* @dev_time: 64bit main timer value expressed in nanoseconds
* @sys_time: 64bit system time value expressed in nanoseconds
*
* Return: 0 on success, -errno otherwise.
*/
static int idpf_ptp_get_sync_device_time_mailbox(struct idpf_adapter *adapter,
u64 *dev_time, u64 *sys_time)
{
struct idpf_ptp_dev_timers cross_time;
int err;
err = idpf_ptp_get_cross_time(adapter, &cross_time);
if (err)
return err;
*dev_time = cross_time.dev_clk_time_ns;
*sys_time = cross_time.sys_time_ns;
return err;
}
/**
* idpf_ptp_get_sync_device_time - Get the cross time stamp info
* @device: Current device time
* @system: System counter value read synchronously with device time
* @ctx: Context provided by timekeeping code
*
* The device and the system clocks time read simultaneously.
*
* Return: 0 on success, -errno otherwise.
*/
static int idpf_ptp_get_sync_device_time(ktime_t *device,
struct system_counterval_t *system,
void *ctx)
{
struct idpf_adapter *adapter = ctx;
u64 ns_time_dev, ns_time_sys;
int err;
switch (adapter->ptp->get_cross_tstamp_access) {
case IDPF_PTP_NONE:
return -EOPNOTSUPP;
case IDPF_PTP_DIRECT:
idpf_ptp_get_sync_device_time_direct(adapter, &ns_time_dev,
&ns_time_sys);
break;
case IDPF_PTP_MAILBOX:
err = idpf_ptp_get_sync_device_time_mailbox(adapter,
&ns_time_dev,
&ns_time_sys);
if (err)
return err;
break;
default:
return -EOPNOTSUPP;
}
*device = ns_to_ktime(ns_time_dev);
system->cs_id = IS_ENABLED(CONFIG_X86) ? CSID_X86_ART
: CSID_ARM_ARCH_COUNTER;
system->cycles = ns_time_sys;
system->use_nsecs = true;
return 0;
}
/**
* idpf_ptp_get_crosststamp - Capture a device cross timestamp
* @info: the driver's PTP info structure
* @cts: The memory to fill the cross timestamp info
*
* Capture a cross timestamp between the system time and the device PTP hardware
* clock.
*
* Return: cross timestamp value on success, -errno on failure.
*/
static int idpf_ptp_get_crosststamp(struct ptp_clock_info *info,
struct system_device_crosststamp *cts)
{
struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
return get_device_system_crosststamp(idpf_ptp_get_sync_device_time,
adapter, NULL, cts);
}
#endif /* CONFIG_ARM_ARCH_TIMER || CONFIG_X86 */
/**
* idpf_ptp_gettimex64 - Get the time of the clock
* @info: the driver's PTP info structure
@@ -661,6 +789,14 @@ static void idpf_ptp_set_caps(const struct idpf_adapter *adapter)
info->verify = idpf_ptp_verify_pin;
info->enable = idpf_ptp_gpio_enable;
info->do_aux_work = idpf_ptp_do_aux_work;
#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER)
info->getcrosststamp = idpf_ptp_get_crosststamp;
#elif IS_ENABLED(CONFIG_X86)
if (pcie_ptm_enabled(adapter->pdev) &&
boot_cpu_has(X86_FEATURE_ART) &&
boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
info->getcrosststamp = idpf_ptp_get_crosststamp;
#endif /* CONFIG_ARM_ARCH_TIMER */
}
/**

View File

@@ -21,6 +21,8 @@ struct idpf_ptp_cmd {
* @dev_clk_ns_h: high part of the device clock register
* @phy_clk_ns_l: low part of the PHY clock register
* @phy_clk_ns_h: high part of the PHY clock register
* @sys_time_ns_l: low part of the system time register
* @sys_time_ns_h: high part of the system time register
* @incval_l: low part of the increment value register
* @incval_h: high part of the increment value register
* @shadj_l: low part of the shadow adjust register
@@ -42,6 +44,10 @@ struct idpf_ptp_dev_clk_regs {
void __iomem *phy_clk_ns_l;
void __iomem *phy_clk_ns_h;
/* System time */
void __iomem *sys_time_ns_l;
void __iomem *sys_time_ns_h;
/* Main timer adjustments */
void __iomem *incval_l;
void __iomem *incval_h;
@@ -162,6 +168,7 @@ struct idpf_ptp_vport_tx_tstamp_caps {
* @dev_clk_regs: the set of registers to access the device clock
* @caps: PTP capabilities negotiated with the Control Plane
* @get_dev_clk_time_access: access type for getting the device clock time
* @get_cross_tstamp_access: access type for the cross timestamping
* @set_dev_clk_time_access: access type for setting the device clock time
* @adj_dev_clk_time_access: access type for the adjusting the device clock
* @tx_tstamp_access: access type for the Tx timestamp value read
@@ -182,6 +189,7 @@ struct idpf_ptp {
struct idpf_ptp_dev_clk_regs dev_clk_regs;
u32 caps;
enum idpf_ptp_access get_dev_clk_time_access:2;
enum idpf_ptp_access get_cross_tstamp_access:2;
enum idpf_ptp_access set_dev_clk_time_access:2;
enum idpf_ptp_access adj_dev_clk_time_access:2;
enum idpf_ptp_access tx_tstamp_access:2;
@@ -264,6 +272,8 @@ void idpf_ptp_get_features_access(const struct idpf_adapter *adapter);
bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq);
int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
struct idpf_ptp_dev_timers *dev_clk_time);
int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
struct idpf_ptp_dev_timers *cross_time);
int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time);
int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval);
int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta);
@@ -305,6 +315,13 @@ idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
return -EOPNOTSUPP;
}
static inline int
idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
struct idpf_ptp_dev_timers *cross_time)
{
return -EOPNOTSUPP;
}
static inline int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter,
u64 time)
{

View File

@@ -4355,9 +4355,13 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
int idpf_vport_intr_alloc(struct idpf_vport *vport)
{
u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
struct idpf_vport_user_config_data *user_config;
struct idpf_q_vector *q_vector;
struct idpf_q_coalesce *q_coal;
u32 complqs_per_vector, v_idx;
u16 idx = vport->idx;
user_config = &vport->adapter->vport_config[idx]->user_config;
vport->q_vectors = kcalloc(vport->num_q_vectors,
sizeof(struct idpf_q_vector), GFP_KERNEL);
if (!vport->q_vectors)
@@ -4375,14 +4379,15 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
q_vector = &vport->q_vectors[v_idx];
q_coal = &user_config->q_coalesce[v_idx];
q_vector->vport = vport;
q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
q_vector->tx_itr_value = q_coal->tx_coalesce_usecs;
q_vector->tx_intr_mode = q_coal->tx_intr_mode;
q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
q_vector->rx_itr_value = q_coal->rx_coalesce_usecs;
q_vector->rx_intr_mode = q_coal->rx_intr_mode;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),

View File

@@ -850,14 +850,14 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
caps.rss_caps =
cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP |
VIRTCHNL2_CAP_RSS_IPV4_UDP |
VIRTCHNL2_CAP_RSS_IPV4_SCTP |
VIRTCHNL2_CAP_RSS_IPV4_OTHER |
VIRTCHNL2_CAP_RSS_IPV6_TCP |
VIRTCHNL2_CAP_RSS_IPV6_UDP |
VIRTCHNL2_CAP_RSS_IPV6_SCTP |
VIRTCHNL2_CAP_RSS_IPV6_OTHER);
cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP |
VIRTCHNL2_FLOW_IPV4_UDP |
VIRTCHNL2_FLOW_IPV4_SCTP |
VIRTCHNL2_FLOW_IPV4_OTHER |
VIRTCHNL2_FLOW_IPV6_TCP |
VIRTCHNL2_FLOW_IPV6_UDP |
VIRTCHNL2_FLOW_IPV6_SCTP |
VIRTCHNL2_FLOW_IPV6_OTHER);
caps.hsplit_caps =
cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |
@@ -1015,6 +1015,41 @@ static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter)
return 0;
}
/**
* idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message
* @adapter: adapter info struct
* @rule: Flow steering rule to add/delete
* @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or
* VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid.
*
* Send ADD/DELETE flow steering virtchnl message and receive the result.
*
* Return: 0 on success, negative on failure.
*/
int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
struct virtchnl2_flow_rule_add_del *rule,
enum virtchnl2_op opcode)
{
int rule_count = le32_to_cpu(rule->count);
struct idpf_vc_xn_params xn_params = {};
ssize_t reply_sz;
if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE &&
opcode != VIRTCHNL2_OP_DEL_FLOW_RULE)
return -EINVAL;
xn_params.vc_op = opcode;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.async = false;
xn_params.send_buf.iov_base = rule;
xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count);
xn_params.recv_buf.iov_base = rule;
xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count);
reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
* idpf_vport_alloc_max_qs - Allocate max queues for a vport
* @adapter: Driver specific private structure
@@ -3642,6 +3677,79 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
return !!(*cap_field & flag);
}
/**
* idpf_vport_is_cap_ena - Check if vport capability is enabled
* @vport: Private data struct
* @flag: flag(s) to check
*
* Return: true if the capability is supported, false otherwise
*/
bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag)
{
struct virtchnl2_create_vport *vport_msg;
vport_msg = vport->adapter->vport_params_recvd[vport->idx];
return !!(le16_to_cpu(vport_msg->vport_flags) & flag);
}
/**
* idpf_sideband_flow_type_ena - Check if steering is enabled for flow type
* @vport: Private data struct
* @flow_type: flow type to check (from ethtool.h)
*
* Return: true if sideband filters are allowed for @flow_type, false otherwise
*/
bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type)
{
struct virtchnl2_create_vport *vport_msg;
__le64 caps;
vport_msg = vport->adapter->vport_params_recvd[vport->idx];
caps = vport_msg->sideband_flow_caps;
switch (flow_type) {
case TCP_V4_FLOW:
return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP));
case UDP_V4_FLOW:
return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP));
default:
return false;
}
}
/**
* idpf_sideband_action_ena - Check if steering is enabled for action
* @vport: Private data struct
* @fsp: flow spec
*
* Return: true if sideband filters are allowed for @fsp, false otherwise
*/
bool idpf_sideband_action_ena(struct idpf_vport *vport,
struct ethtool_rx_flow_spec *fsp)
{
struct virtchnl2_create_vport *vport_msg;
unsigned int supp_actions;
vport_msg = vport->adapter->vport_params_recvd[vport->idx];
supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions);
/* Actions Drop/Wake are not supported */
if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
fsp->ring_cookie == RX_CLS_FLOW_WAKE)
return false;
return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE);
}
unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport)
{
struct virtchnl2_create_vport *vport_msg;
vport_msg = vport->adapter->vport_params_recvd[vport->idx];
return le32_to_cpu(vport_msg->flow_steer_max_rules);
}
/**
* idpf_get_vport_id: Get vport id
* @vport: virtual port structure

View File

@@ -105,6 +105,12 @@ int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
int idpf_queue_reg_init(struct idpf_vport *vport);
int idpf_vport_queue_ids_init(struct idpf_vport *vport);
bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag);
bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type);
bool idpf_sideband_action_ena(struct idpf_vport *vport,
struct ethtool_rx_flow_spec *fsp);
unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport);
int idpf_recv_mb_msg(struct idpf_adapter *adapter);
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
u16 msg_size, u8 *msg, u16 cookie);

View File

@@ -30,6 +30,7 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter)
.send_buf.iov_len = sizeof(send_ptp_caps_msg),
.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
};
struct virtchnl2_ptp_cross_time_reg_offsets cross_tstamp_offsets;
struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
struct virtchnl2_ptp_clk_reg_offsets clock_offsets;
struct idpf_ptp_secondary_mbx *scnd_mbx;
@@ -71,7 +72,7 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter)
access_type = ptp->get_dev_clk_time_access;
if (access_type != IDPF_PTP_DIRECT)
goto discipline_clock;
goto cross_tstamp;
clock_offsets = recv_ptp_caps_msg->clk_offsets;
@@ -90,6 +91,22 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter)
temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger);
ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
cross_tstamp:
access_type = ptp->get_cross_tstamp_access;
if (access_type != IDPF_PTP_DIRECT)
goto discipline_clock;
cross_tstamp_offsets = recv_ptp_caps_msg->cross_time_offsets;
temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_l);
ptp->dev_clk_regs.sys_time_ns_l = idpf_get_reg_addr(adapter,
temp_offset);
temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_h);
ptp->dev_clk_regs.sys_time_ns_h = idpf_get_reg_addr(adapter,
temp_offset);
temp_offset = le32_to_cpu(cross_tstamp_offsets.cmd_sync_trigger);
ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
discipline_clock:
access_type = ptp->adj_dev_clk_time_access;
if (access_type != IDPF_PTP_DIRECT)
@@ -162,6 +179,42 @@ int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
return 0;
}
/**
* idpf_ptp_get_cross_time - Send virtchnl get cross time message
* @adapter: Driver specific private structure
* @cross_time: Pointer to the device clock structure where the value is set
*
* Send virtchnl get cross time message to get the time of the clock and the
* system time.
*
* Return: 0 on success, -errno otherwise.
*/
int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
struct idpf_ptp_dev_timers *cross_time)
{
struct virtchnl2_ptp_get_cross_time cross_time_msg;
struct idpf_vc_xn_params xn_params = {
.vc_op = VIRTCHNL2_OP_PTP_GET_CROSS_TIME,
.send_buf.iov_base = &cross_time_msg,
.send_buf.iov_len = sizeof(cross_time_msg),
.recv_buf.iov_base = &cross_time_msg,
.recv_buf.iov_len = sizeof(cross_time_msg),
.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
};
int reply_sz;
reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
if (reply_sz != sizeof(cross_time_msg))
return -EIO;
cross_time->dev_clk_time_ns = le64_to_cpu(cross_time_msg.dev_time_ns);
cross_time->sys_time_ns = le64_to_cpu(cross_time_msg.sys_time_ns);
return 0;
}
/**
* idpf_ptp_set_dev_clk_time - Send virtchnl set device time message
* @adapter: Driver specific private structure

View File

@@ -80,6 +80,10 @@ enum virtchnl2_op {
VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547,
VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548,
VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS = 549,
/* Opcode 550 is reserved */
VIRTCHNL2_OP_ADD_FLOW_RULE = 551,
VIRTCHNL2_OP_GET_FLOW_RULE = 552,
VIRTCHNL2_OP_DEL_FLOW_RULE = 553,
};
/**
@@ -153,22 +157,22 @@ enum virtchnl2_cap_seg {
VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL = BIT(8),
};
/* Receive Side Scaling Flow type capability flags */
enum virtchnl2_cap_rss {
VIRTCHNL2_CAP_RSS_IPV4_TCP = BIT(0),
VIRTCHNL2_CAP_RSS_IPV4_UDP = BIT(1),
VIRTCHNL2_CAP_RSS_IPV4_SCTP = BIT(2),
VIRTCHNL2_CAP_RSS_IPV4_OTHER = BIT(3),
VIRTCHNL2_CAP_RSS_IPV6_TCP = BIT(4),
VIRTCHNL2_CAP_RSS_IPV6_UDP = BIT(5),
VIRTCHNL2_CAP_RSS_IPV6_SCTP = BIT(6),
VIRTCHNL2_CAP_RSS_IPV6_OTHER = BIT(7),
VIRTCHNL2_CAP_RSS_IPV4_AH = BIT(8),
VIRTCHNL2_CAP_RSS_IPV4_ESP = BIT(9),
VIRTCHNL2_CAP_RSS_IPV4_AH_ESP = BIT(10),
VIRTCHNL2_CAP_RSS_IPV6_AH = BIT(11),
VIRTCHNL2_CAP_RSS_IPV6_ESP = BIT(12),
VIRTCHNL2_CAP_RSS_IPV6_AH_ESP = BIT(13),
/* Receive Side Scaling and Flow Steering Flow type capability flags */
enum virtchnl2_flow_types {
VIRTCHNL2_FLOW_IPV4_TCP = BIT(0),
VIRTCHNL2_FLOW_IPV4_UDP = BIT(1),
VIRTCHNL2_FLOW_IPV4_SCTP = BIT(2),
VIRTCHNL2_FLOW_IPV4_OTHER = BIT(3),
VIRTCHNL2_FLOW_IPV6_TCP = BIT(4),
VIRTCHNL2_FLOW_IPV6_UDP = BIT(5),
VIRTCHNL2_FLOW_IPV6_SCTP = BIT(6),
VIRTCHNL2_FLOW_IPV6_OTHER = BIT(7),
VIRTCHNL2_FLOW_IPV4_AH = BIT(8),
VIRTCHNL2_FLOW_IPV4_ESP = BIT(9),
VIRTCHNL2_FLOW_IPV4_AH_ESP = BIT(10),
VIRTCHNL2_FLOW_IPV6_AH = BIT(11),
VIRTCHNL2_FLOW_IPV6_ESP = BIT(12),
VIRTCHNL2_FLOW_IPV6_AH_ESP = BIT(13),
};
/* Header split capability flags */
@@ -194,8 +198,9 @@ enum virtchnl2_cap_other {
VIRTCHNL2_CAP_RDMA = BIT_ULL(0),
VIRTCHNL2_CAP_SRIOV = BIT_ULL(1),
VIRTCHNL2_CAP_MACFILTER = BIT_ULL(2),
VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3),
/* Queue based scheduling using split queue model */
/* Other capability 3 is available
* Queue based scheduling using split queue model
*/
VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4),
VIRTCHNL2_CAP_CRC = BIT_ULL(5),
VIRTCHNL2_CAP_ADQ = BIT_ULL(6),
@@ -209,17 +214,37 @@ enum virtchnl2_cap_other {
/* EDT: Earliest Departure Time capability used for Timing Wheel */
VIRTCHNL2_CAP_EDT = BIT_ULL(14),
VIRTCHNL2_CAP_ADV_RSS = BIT_ULL(15),
VIRTCHNL2_CAP_FDIR = BIT_ULL(16),
/* Other capability 16 is available */
VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17),
VIRTCHNL2_CAP_PTYPE = BIT_ULL(18),
VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19),
/* Other capability 20-21 is reserved */
/* Other capability 20 is reserved */
VIRTCHNL2_CAP_FLOW_STEER = BIT_ULL(21),
VIRTCHNL2_CAP_LAN_MEMORY_REGIONS = BIT_ULL(22),
/* this must be the last capability */
VIRTCHNL2_CAP_OEM = BIT_ULL(63),
};
/**
* enum virtchnl2_action_types - Available actions for sideband flow steering
* @VIRTCHNL2_ACTION_DROP: Drop the packet
* @VIRTCHNL2_ACTION_PASSTHRU: Forward the packet to the next classifier/stage
* @VIRTCHNL2_ACTION_QUEUE: Forward the packet to a receive queue
* @VIRTCHNL2_ACTION_Q_GROUP: Forward the packet to a receive queue group
* @VIRTCHNL2_ACTION_MARK: Mark the packet with specific marker value
* @VIRTCHNL2_ACTION_COUNT: Increment the corresponding counter
*/
enum virtchnl2_action_types {
VIRTCHNL2_ACTION_DROP = BIT(0),
VIRTCHNL2_ACTION_PASSTHRU = BIT(1),
VIRTCHNL2_ACTION_QUEUE = BIT(2),
VIRTCHNL2_ACTION_Q_GROUP = BIT(3),
VIRTCHNL2_ACTION_MARK = BIT(4),
VIRTCHNL2_ACTION_COUNT = BIT(5),
};
/* underlying device type */
enum virtchl2_device_type {
VIRTCHNL2_MEV_DEVICE = 0,
@@ -461,7 +486,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
* @seg_caps: See enum virtchnl2_cap_seg.
* @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at.
* @rsc_caps: See enum virtchnl2_cap_rsc.
* @rss_caps: See enum virtchnl2_cap_rss.
* @rss_caps: See enum virtchnl2_flow_types.
* @other_caps: See enum virtchnl2_cap_other.
* @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox
* provided by CP.
@@ -578,11 +603,17 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
/**
* enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities.
* @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports
* @VIRTCHNL2_VPORT_INLINE_FLOW_STEER: Inline flow steering enabled
* @VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ: Inline flow steering enabled
* with explicit Rx queue action
* @VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER: Sideband flow steering enabled
* @VIRTCHNL2_VPORT_ENABLE_RDMA: RDMA is enabled for this vport
*/
enum virtchnl2_vport_flags {
VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0),
/* VIRTCHNL2_VPORT_* bits [1:3] rsvd */
VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0),
VIRTCHNL2_VPORT_INLINE_FLOW_STEER = BIT(1),
VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ = BIT(2),
VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER = BIT(3),
VIRTCHNL2_VPORT_ENABLE_RDMA = BIT(4),
};
@@ -608,6 +639,14 @@ enum virtchnl2_vport_flags {
* @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
* @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
* @pad1: Padding.
* @inline_flow_caps: Bit mask of supported inline-flow-steering
* flow types (See enum virtchnl2_flow_types)
* @sideband_flow_caps: Bit mask of supported sideband-flow-steering
* flow types (See enum virtchnl2_flow_types)
* @sideband_flow_actions: Bit mask of supported action types
* for sideband flow steering (See enum virtchnl2_action_types)
* @flow_steer_max_rules: Max rules allowed for inline and sideband
* flow steering combined
* @rss_algorithm: RSS algorithm.
* @rss_key_size: RSS key size.
* @rss_lut_size: RSS LUT size.
@@ -640,7 +679,11 @@ struct virtchnl2_create_vport {
__le16 vport_flags;
__le64 rx_desc_ids;
__le64 tx_desc_ids;
u8 pad1[72];
u8 pad1[48];
__le64 inline_flow_caps;
__le64 sideband_flow_caps;
__le32 sideband_flow_actions;
__le32 flow_steer_max_rules;
__le32 rss_algorithm;
__le16 rss_key_size;
__le16 rss_lut_size;
@@ -1615,4 +1658,156 @@ struct virtchnl2_get_lan_memory_regions {
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_lan_memory_regions);
#define VIRTCHNL2_MAX_NUM_PROTO_HDRS 4
#define VIRTCHNL2_MAX_SIZE_RAW_PACKET 256
#define VIRTCHNL2_MAX_NUM_ACTIONS 8
/**
* struct virtchnl2_proto_hdr - represent one protocol header
* @hdr_type: See enum virtchnl2_proto_hdr_type
* @pad: padding
* @buffer_spec: binary buffer based on header type.
* @buffer_mask: mask applied on buffer_spec.
*
* Structure to hold protocol headers based on hdr_type
*/
struct virtchnl2_proto_hdr {
__le32 hdr_type;
u8 pad[4];
u8 buffer_spec[64];
u8 buffer_mask[64];
};
VIRTCHNL2_CHECK_STRUCT_LEN(136, virtchnl2_proto_hdr);
/**
* struct virtchnl2_proto_hdrs - struct to represent match criteria
* @tunnel_level: specify where protocol header(s) start from.
* must be 0 when sending a raw packet request.
* 0 - from the outer layer
* 1 - from the first inner layer
* 2 - from the second inner layer
* @pad: Padding bytes
* @count: total number of protocol headers in proto_hdr. 0 for raw packet.
* @proto_hdr: Array of protocol headers
* @raw: struct holding raw packet buffer when count is 0
*/
struct virtchnl2_proto_hdrs {
u8 tunnel_level;
u8 pad[3];
__le32 count;
union {
struct virtchnl2_proto_hdr
proto_hdr[VIRTCHNL2_MAX_NUM_PROTO_HDRS];
struct {
__le16 pkt_len;
u8 spec[VIRTCHNL2_MAX_SIZE_RAW_PACKET];
u8 mask[VIRTCHNL2_MAX_SIZE_RAW_PACKET];
} raw;
};
};
VIRTCHNL2_CHECK_STRUCT_LEN(552, virtchnl2_proto_hdrs);
/**
* struct virtchnl2_rule_action - struct representing single action for a flow
* @action_type: see enum virtchnl2_action_types
* @act_conf: union representing action depending on action_type.
* @act_conf.q_id: queue id to redirect the packets to.
* @act_conf.q_grp_id: queue group id to redirect the packets to.
* @act_conf.ctr_id: used for count action. If input value 0xFFFFFFFF control
* plane assigns a new counter and returns the counter ID to
* the driver. If input value is not 0xFFFFFFFF then it must
* be an existing counter given to the driver for an earlier
* flow. Then this flow will share the counter.
* @act_conf.mark_id: Value used to mark the packets. Used for mark action.
* @act_conf.reserved: Reserved for future use.
*/
struct virtchnl2_rule_action {
__le32 action_type;
union {
__le32 q_id;
__le32 q_grp_id;
__le32 ctr_id;
__le32 mark_id;
u8 reserved[8];
} act_conf;
};
VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rule_action);
/**
* struct virtchnl2_rule_action_set - struct representing multiple actions
* @count: number of valid actions in the action set of a rule
* @actions: array of struct virtchnl2_rule_action
*/
struct virtchnl2_rule_action_set {
/* action count must be less than VIRTCHNL2_MAX_NUM_ACTIONS */
__le32 count;
struct virtchnl2_rule_action actions[VIRTCHNL2_MAX_NUM_ACTIONS];
};
VIRTCHNL2_CHECK_STRUCT_LEN(100, virtchnl2_rule_action_set);
/**
* struct virtchnl2_flow_rule - represent one flow steering rule
* @proto_hdrs: array of protocol header buffers representing match criteria
* @action_set: series of actions to be applied for given rule
* @priority: rule priority.
* @pad: padding for future extensions.
*/
struct virtchnl2_flow_rule {
struct virtchnl2_proto_hdrs proto_hdrs;
struct virtchnl2_rule_action_set action_set;
__le32 priority;
u8 pad[8];
};
VIRTCHNL2_CHECK_STRUCT_LEN(664, virtchnl2_flow_rule);
enum virtchnl2_flow_rule_status {
VIRTCHNL2_FLOW_RULE_SUCCESS = 1,
VIRTCHNL2_FLOW_RULE_NORESOURCE = 2,
VIRTCHNL2_FLOW_RULE_EXIST = 3,
VIRTCHNL2_FLOW_RULE_TIMEOUT = 4,
VIRTCHNL2_FLOW_RULE_FLOW_TYPE_NOT_SUPPORTED = 5,
VIRTCHNL2_FLOW_RULE_MATCH_KEY_NOT_SUPPORTED = 6,
VIRTCHNL2_FLOW_RULE_ACTION_NOT_SUPPORTED = 7,
VIRTCHNL2_FLOW_RULE_ACTION_COMBINATION_INVALID = 8,
VIRTCHNL2_FLOW_RULE_ACTION_DATA_INVALID = 9,
VIRTCHNL2_FLOW_RULE_NOT_ADDED = 10,
};
/**
* struct virtchnl2_flow_rule_info: structure representing single flow rule
* @rule_id: rule_id associated with the flow_rule.
* @rule_cfg: structure representing rule.
* @status: status of rule programming. See enum virtchnl2_flow_rule_status.
*/
struct virtchnl2_flow_rule_info {
__le32 rule_id;
struct virtchnl2_flow_rule rule_cfg;
__le32 status;
};
VIRTCHNL2_CHECK_STRUCT_LEN(672, virtchnl2_flow_rule_info);
/**
* struct virtchnl2_flow_rule_add_del - add/delete a flow steering rule
* @vport_id: vport id for which the rule is to be added or deleted.
* @count: Indicates number of rules to be added or deleted.
* @rule_info: Array of flow rules to be added or deleted.
*
* For VIRTCHNL2_OP_FLOW_RULE_ADD, rule_info contains list of rules to be
* added. If rule_id is 0xFFFFFFFF, then the rule is programmed and not cached.
*
* For VIRTCHNL2_OP_FLOW_RULE_DEL, there are two possibilities. The structure
* can contain either array of rule_ids or array of match keys to be deleted.
* When match keys are used the corresponding rule_ids must be 0xFFFFFFFF.
*
* status member of each rule indicates the result. Maximum of 6 rules can be
* added or deleted using this method. Driver has to retry in case of any
* failure of ADD or DEL opcode. CP doesn't retry in case of failure.
*/
struct virtchnl2_flow_rule_add_del {
__le32 vport_id;
__le32 count;
struct virtchnl2_flow_rule_info rule_info[] __counted_by_le(count);
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_flow_rule_add_del);
#endif /* _VIRTCHNL_2_H_ */

View File

@@ -154,7 +154,6 @@ struct igbvf_ring {
/* board specific private data structure */
struct igbvf_adapter {
struct timer_list watchdog_timer;
struct timer_list blink_timer;
struct work_struct reset_task;
struct work_struct watchdog_task;
@@ -162,10 +161,7 @@ struct igbvf_adapter {
const struct igbvf_info *ei;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 bd_number;
u32 rx_buffer_len;
u32 polling_interval;
u16 mng_vlan_id;
u16 link_speed;
u16 link_duplex;
@@ -183,9 +179,6 @@ struct igbvf_adapter {
unsigned int restart_queue;
u32 txd_cmd;
u32 tx_int_delay;
u32 tx_abs_int_delay;
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
@@ -193,23 +186,15 @@ struct igbvf_adapter {
/* Tx stats */
u32 tx_timeout_count;
u32 tx_fifo_head;
u32 tx_head_addr;
u32 tx_fifo_size;
u32 tx_dma_failed;
/* Rx */
struct igbvf_ring *rx_ring;
u32 rx_int_delay;
u32 rx_abs_int_delay;
/* Rx stats */
u64 hw_csum_err;
u64 hw_csum_good;
u64 rx_hdr_split;
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
unsigned int rx_ps_hdr_size;
u32 max_frame_size;
@@ -229,24 +214,14 @@ struct igbvf_adapter {
struct e1000_vf_stats stats;
u64 zero_base;
struct igbvf_ring test_tx_ring;
struct igbvf_ring test_rx_ring;
u32 test_icr;
u32 msg_enable;
struct msix_entry *msix_entries;
int int_mode;
u32 eims_enable_mask;
u32 eims_other;
u32 eeprom_wol;
u32 wol;
u32 pba;
bool fc_autoneg;
unsigned long led_status;
unsigned int flags;
unsigned long last_reset;
};

View File

@@ -1629,10 +1629,6 @@ static int igbvf_sw_init(struct igbvf_adapter *adapter)
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
adapter->tx_int_delay = 8;
adapter->tx_abs_int_delay = 32;
adapter->rx_int_delay = 0;
adapter->rx_abs_int_delay = 8;
adapter->requested_itr = 3;
adapter->current_itr = IGBVF_START_ITR;
@@ -2708,7 +2704,6 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct igbvf_adapter *adapter;
struct e1000_hw *hw;
const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
static int cards_found;
int err;
err = pci_enable_device_mem(pdev);
@@ -2780,8 +2775,6 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->watchdog_timeo = 5 * HZ;
strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
adapter->bd_number = cards_found++;
netdev->hw_features = NETIF_F_SG |
NETIF_F_TSO |
NETIF_F_TSO6 |

View File

@@ -406,10 +406,6 @@ extern char igc_driver_name[];
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
/* RX-desc Write-Back format RSS Type's */
enum igc_rss_type_num {
IGC_RSS_TYPE_NO_HASH = 0,
@@ -635,6 +631,7 @@ enum igc_filter_match_flags {
IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3),
IGC_FILTER_FLAG_USER_DATA = BIT(4),
IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5),
IGC_FILTER_FLAG_DEFAULT_QUEUE = BIT(6),
};
struct igc_nfc_filter {
@@ -662,10 +659,14 @@ struct igc_nfc_rule {
bool flex;
};
/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority
* based, 8 ethertype based and 32 Flex filter based rules.
/* IGC supports a total of 65 NFC rules, listed below in order of priority:
* - 16 MAC address based filtering rules (highest priority)
* - 8 ethertype based filtering rules
* - 32 Flex filter based filtering rules
* - 8 VLAN priority based filtering rules
* - 1 default queue rule (lowest priority)
*/
#define IGC_MAX_RXNFC_RULES 64
#define IGC_MAX_RXNFC_RULES 65
struct igc_flex_filter {
u8 index;

View File

@@ -383,11 +383,15 @@
#define IGC_RXDEXT_STATERR_IPE 0x40000000
#define IGC_RXDEXT_STATERR_RXE 0x80000000
#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define IGC_MRQC_DEFAULT_QUEUE_MASK GENMASK(5, 3)
/* Header split receive */
#define IGC_RFCTL_IPV6_EX_DIS 0x00010000

View File

@@ -1283,6 +1283,24 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
rule->flex = true;
else
rule->flex = false;
/* The wildcard rule is only applied if:
* a) None of the other filtering rules match (match_flags is zero)
* b) The flow type is ETHER_FLOW only (no additional fields set)
* c) Mask for Source MAC address is not specified (all zeros)
* d) Mask for Destination MAC address is not specified (all zeros)
* e) Mask for L2 EtherType is not specified (zero)
*
* If all these conditions are met, the rule is treated as a wildcard
* rule. Default queue feature will be used, so that all packets that do
* not match any other rule will be routed to the default queue.
*/
if (!rule->filter.match_flags &&
fsp->flow_type == ETHER_FLOW &&
is_zero_ether_addr(fsp->m_u.ether_spec.h_source) &&
is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) &&
!fsp->m_u.ether_spec.h_proto)
rule->filter.match_flags = IGC_FILTER_FLAG_DEFAULT_QUEUE;
}
/**

View File

@@ -3874,6 +3874,22 @@ static void igc_del_flex_filter(struct igc_adapter *adapter,
wr32(IGC_WUFC, wufc);
}
static void igc_set_default_queue_filter(struct igc_adapter *adapter, u32 queue)
{
struct igc_hw *hw = &adapter->hw;
u32 mrqc = rd32(IGC_MRQC);
mrqc &= ~IGC_MRQC_DEFAULT_QUEUE_MASK;
mrqc |= FIELD_PREP(IGC_MRQC_DEFAULT_QUEUE_MASK, queue);
wr32(IGC_MRQC, mrqc);
}
static void igc_reset_default_queue_filter(struct igc_adapter *adapter)
{
/* Reset the default queue to its default value which is Queue 0 */
igc_set_default_queue_filter(adapter, 0);
}
static int igc_enable_nfc_rule(struct igc_adapter *adapter,
struct igc_nfc_rule *rule)
{
@@ -3912,6 +3928,9 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter,
return err;
}
if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE)
igc_set_default_queue_filter(adapter, rule->action);
return 0;
}
@@ -3939,6 +3958,9 @@ static void igc_disable_nfc_rule(struct igc_adapter *adapter,
if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
rule->filter.dst_addr);
if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE)
igc_reset_default_queue_filter(adapter);
}
/**

View File

@@ -346,7 +346,6 @@ struct ixgbevf_adapter {
int num_rx_queues;
struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
int num_msix_vectors;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
@@ -363,8 +362,6 @@ struct ixgbevf_adapter {
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
u16 msg_enable;
/* Interrupt Throttle Rate */
u32 eitr_param;
struct ixgbevf_hw_stats stats;