idpf: use napi's irq affinity

Delete the driver CPU affinity info and use the core's napi config
instead.

Signed-off-by: Ahmed Zaki <ahmed.zaki@intel.com>
Link: https://patch.msgid.link/20250224232228.990783-6-ahmed.zaki@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Ahmed Zaki
2025-02-24 16:22:26 -07:00
committed by Jakub Kicinski
parent 4063af2967
commit deab38f8f0
3 changed files with 10 additions and 19 deletions

View File

@@ -814,6 +814,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
netdev->hw_features |= dflt_features | offloads;
netdev->hw_enc_features |= dflt_features | offloads;
idpf_set_ethtool_ops(netdev);
netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
/* carrier off on init to avoid Tx hangs */

View File

@@ -3551,8 +3551,6 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
q_vector->tx = NULL;
kfree(q_vector->rx);
q_vector->rx = NULL;
free_cpumask_var(q_vector->affinity_mask);
}
kfree(vport->q_vectors);
@@ -3579,8 +3577,6 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL);
kfree(free_irq(irq_num, q_vector));
}
}
@@ -3768,8 +3764,6 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
"Request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
/* assign the mask for this irq */
irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
}
return 0;
@@ -4181,7 +4175,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
{
int (*napi_poll)(struct napi_struct *napi, int budget);
u16 v_idx;
u16 v_idx, qv_idx;
int irq_num;
if (idpf_is_queue_model_split(vport->txq_model))
napi_poll = idpf_vport_splitq_napi_poll;
@@ -4190,12 +4185,12 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
qv_idx = vport->q_vector_idxs[v_idx];
irq_num = vport->adapter->msix_entries[qv_idx].vector;
netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
cpumask_set_cpu(v_idx, q_vector->affinity_mask);
netif_napi_add_config(vport->netdev, &q_vector->napi,
napi_poll, v_idx);
netif_napi_set_irq(&q_vector->napi, irq_num);
}
}
@@ -4239,9 +4234,6 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
goto error;
q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
GFP_KERNEL);
if (!q_vector->tx)

View File

@@ -382,7 +382,6 @@ struct idpf_intr_reg {
* @rx_intr_mode: Dynamic ITR or not
* @rx_itr_idx: RX ITR index
* @v_idx: Vector index
* @affinity_mask: CPU affinity mask
*/
struct idpf_q_vector {
__cacheline_group_begin_aligned(read_mostly);
@@ -419,13 +418,12 @@ struct idpf_q_vector {
__cacheline_group_begin_aligned(cold);
u16 v_idx;
cpumask_var_t affinity_mask;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_q_vector, 120,
24 + sizeof(struct napi_struct) +
2 * sizeof(struct dim),
8 + sizeof(cpumask_var_t));
8);
struct idpf_rx_queue_stats {
u64_stats_t packets;
@@ -921,7 +919,7 @@ static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
if (!q_vector)
return NUMA_NO_NODE;
cpu = cpumask_first(q_vector->affinity_mask);
cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
}