mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-07 20:58:14 -04:00
net: wangxun: Use specific flag bit to simplify the code
Most of the different code that requires MAC type in the common library is due to NGBE only supports a few queues and pools, unlike TXGBE, which supports 128 queues and 64 pools. This difference accounts for most of the hardware configuration differences in the driver code. So add a flag bit "WX_FLAG_MULTI_64_FUNC" for them to clean-up the driver code. Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://patch.msgid.link/C731132E124D75E5+20250521064402.22348-3-jiawenwu@trustnetic.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
@@ -413,15 +413,10 @@ static unsigned int wx_max_channels(struct wx *wx)
|
||||
max_combined = 1;
|
||||
} else {
|
||||
/* support up to max allowed queues with RSS */
|
||||
switch (wx->mac.type) {
|
||||
case wx_mac_sp:
|
||||
case wx_mac_aml:
|
||||
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
|
||||
max_combined = 63;
|
||||
break;
|
||||
default:
|
||||
else
|
||||
max_combined = 8;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return max_combined;
|
||||
|
||||
@@ -113,15 +113,10 @@ static void wx_intr_disable(struct wx *wx, u64 qmask)
|
||||
if (mask)
|
||||
wr32(wx, WX_PX_IMS(0), mask);
|
||||
|
||||
switch (wx->mac.type) {
|
||||
case wx_mac_sp:
|
||||
case wx_mac_aml:
|
||||
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
|
||||
mask = (qmask >> 32);
|
||||
if (mask)
|
||||
wr32(wx, WX_PX_IMS(1), mask);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,15 +128,10 @@ void wx_intr_enable(struct wx *wx, u64 qmask)
|
||||
if (mask)
|
||||
wr32(wx, WX_PX_IMC(0), mask);
|
||||
|
||||
switch (wx->mac.type) {
|
||||
case wx_mac_sp:
|
||||
case wx_mac_aml:
|
||||
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
|
||||
mask = (qmask >> 32);
|
||||
if (mask)
|
||||
wr32(wx, WX_PX_IMC(1), mask);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(wx_intr_enable);
|
||||
@@ -774,14 +764,8 @@ static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
|
||||
/* setup VMDq pool mapping */
|
||||
wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
|
||||
|
||||
switch (wx->mac.type) {
|
||||
case wx_mac_sp:
|
||||
case wx_mac_aml:
|
||||
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
|
||||
wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* HW expects these in little endian so we reverse the byte
|
||||
* order from network order (big endian) to little endian
|
||||
@@ -919,14 +903,9 @@ void wx_init_rx_addrs(struct wx *wx)
|
||||
|
||||
wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
|
||||
|
||||
switch (wx->mac.type) {
|
||||
case wx_mac_sp:
|
||||
case wx_mac_aml:
|
||||
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
|
||||
/* clear VMDq pool/queue selection for RAR 0 */
|
||||
wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1512,7 +1491,7 @@ static void wx_configure_virtualization(struct wx *wx)
|
||||
wr32m(wx, WX_PSR_VM_L2CTL(pool),
|
||||
WX_PSR_VM_L2CTL_AUPE, WX_PSR_VM_L2CTL_AUPE);
|
||||
|
||||
if (wx->mac.type == wx_mac_em) {
|
||||
if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
|
||||
vf_shift = BIT(VMDQ_P(0));
|
||||
/* Enable only the PF pools for Tx/Rx */
|
||||
wr32(wx, WX_RDM_VF_RE(0), vf_shift);
|
||||
@@ -1543,7 +1522,7 @@ static void wx_configure_port(struct wx *wx)
|
||||
{
|
||||
u32 value, i;
|
||||
|
||||
if (wx->mac.type == wx_mac_em) {
|
||||
if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
|
||||
value = (wx->num_vfs == 0) ?
|
||||
WX_CFG_PORT_CTL_NUM_VT_NONE :
|
||||
WX_CFG_PORT_CTL_NUM_VT_8;
|
||||
@@ -2074,7 +2053,7 @@ static void wx_setup_psrtype(struct wx *wx)
|
||||
WX_RDB_PL_CFG_TUN_OUTL2HDR |
|
||||
WX_RDB_PL_CFG_TUN_TUNHDR;
|
||||
|
||||
if (wx->mac.type == wx_mac_em) {
|
||||
if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
|
||||
for_each_set_bit(pool, &wx->fwd_bitmask, 8)
|
||||
wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
|
||||
} else {
|
||||
|
||||
@@ -1633,7 +1633,7 @@ static bool wx_set_vmdq_queues(struct wx *wx)
|
||||
/* Add starting offset to total pool count */
|
||||
vmdq_i += wx->ring_feature[RING_F_VMDQ].offset;
|
||||
|
||||
if (wx->mac.type == wx_mac_sp) {
|
||||
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
|
||||
/* double check we are limited to maximum pools */
|
||||
vmdq_i = min_t(u16, 64, vmdq_i);
|
||||
|
||||
@@ -1693,7 +1693,7 @@ static void wx_set_rss_queues(struct wx *wx)
|
||||
|
||||
/* set mask for 16 queue limit of RSS */
|
||||
f = &wx->ring_feature[RING_F_RSS];
|
||||
if (wx->mac.type == wx_mac_sp)
|
||||
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
|
||||
f->mask = WX_RSS_64Q_MASK;
|
||||
else
|
||||
f->mask = WX_RSS_8Q_MASK;
|
||||
@@ -1853,7 +1853,7 @@ static bool wx_cache_ring_vmdq(struct wx *wx)
|
||||
if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
|
||||
return false;
|
||||
|
||||
if (wx->mac.type == wx_mac_sp) {
|
||||
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
|
||||
/* start at VMDq register offset for SR-IOV enabled setups */
|
||||
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
|
||||
for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) {
|
||||
@@ -2354,10 +2354,10 @@ void wx_configure_vectors(struct wx *wx)
|
||||
|
||||
if (pdev->msix_enabled) {
|
||||
/* Populate MSIX to EITR Select */
|
||||
if (wx->mac.type == wx_mac_sp) {
|
||||
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
|
||||
if (wx->num_vfs >= 32)
|
||||
eitrsel = BIT(wx->num_vfs % 32) - 1;
|
||||
} else if (wx->mac.type == wx_mac_em) {
|
||||
} else {
|
||||
for (i = 0; i < wx->num_vfs; i++)
|
||||
eitrsel |= BIT(i);
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
|
||||
wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE;
|
||||
}
|
||||
|
||||
if (wx->mac.type == wx_mac_em) {
|
||||
if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
|
||||
value = WX_CFG_PORT_CTL_NUM_VT_8;
|
||||
} else {
|
||||
if (num_vfs < 32)
|
||||
@@ -599,10 +599,10 @@ static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
|
||||
if (VMDQ_P(0) < 32) {
|
||||
bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
|
||||
bits &= ~BIT(VMDQ_P(0));
|
||||
if (wx->mac.type != wx_mac_em)
|
||||
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
|
||||
bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
|
||||
} else {
|
||||
if (wx->mac.type != wx_mac_em)
|
||||
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
|
||||
bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
|
||||
bits &= ~BIT(VMDQ_P(0) % 32);
|
||||
bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
|
||||
@@ -848,7 +848,7 @@ void wx_disable_vf_rx_tx(struct wx *wx)
|
||||
{
|
||||
wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX);
|
||||
wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX);
|
||||
if (wx->mac.type != wx_mac_em) {
|
||||
if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
|
||||
wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX);
|
||||
wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX);
|
||||
}
|
||||
|
||||
@@ -1184,6 +1184,7 @@ struct vf_macvlans {
|
||||
};
|
||||
|
||||
enum wx_pf_flags {
|
||||
WX_FLAG_MULTI_64_FUNC,
|
||||
WX_FLAG_SWFW_RING,
|
||||
WX_FLAG_VMDQ_ENABLED,
|
||||
WX_FLAG_VLAN_PROMISC,
|
||||
|
||||
@@ -318,6 +318,7 @@ static int txgbe_sw_init(struct wx *wx)
|
||||
wx->configure_fdir = txgbe_configure_fdir;
|
||||
|
||||
set_bit(WX_FLAG_RSC_CAPABLE, wx->flags);
|
||||
set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags);
|
||||
|
||||
/* enable itr by default in dynamic mode */
|
||||
wx->rx_itr_setting = 1;
|
||||
|
||||
Reference in New Issue
Block a user