mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-09 07:51:16 -04:00
ice: Add support for persistent NAPI config
Use netif_napi_add_config to assign persistent per-NAPI config when
initializing NAPIs. This preserves NAPI config settings when queue
counts are adjusted.
Tested with an E810-2CQDA2 NIC.
Begin by setting the queue count to 4:
$ sudo ethtool -L eth4 combined 4
Check the queue settings:
$ ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/netdev.yaml \
--dump napi-get --json='{"ifindex": 4}'
[{'defer-hard-irqs': 0,
'gro-flush-timeout': 0,
'id': 8452,
'ifindex': 4,
'irq': 2782},
{'defer-hard-irqs': 0,
'gro-flush-timeout': 0,
'id': 8451,
'ifindex': 4,
'irq': 2781},
{'defer-hard-irqs': 0,
'gro-flush-timeout': 0,
'id': 8450,
'ifindex': 4,
'irq': 2780},
{'defer-hard-irqs': 0,
'gro-flush-timeout': 0,
'id': 8449,
'ifindex': 4,
'irq': 2779}]
Now, set the queue with NAPI ID 8451 to have a gro-flush-timeout of
1111:
$ sudo ./tools/net/ynl/cli.py \
--spec Documentation/netlink/specs/netdev.yaml \
--do napi-set --json='{"id": 8451, "gro-flush-timeout": 1111}'
None
Check that worked:
$ ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/netdev.yaml \
--dump napi-get --json='{"ifindex": 4}'
[{'defer-hard-irqs': 0,
'gro-flush-timeout': 0,
'id': 8452,
'ifindex': 4,
'irq': 2782},
{'defer-hard-irqs': 0,
'gro-flush-timeout': 1111,
'id': 8451,
'ifindex': 4,
'irq': 2781},
{'defer-hard-irqs': 0,
'gro-flush-timeout': 0,
'id': 8450,
'ifindex': 4,
'irq': 2780},
{'defer-hard-irqs': 0,
'gro-flush-timeout': 0,
'id': 8449,
'ifindex': 4,
'irq': 2779}]
Now reduce the queue count to 2, which would destroy the queue with NAPI
ID 8451:
$ sudo ethtool -L eth4 combined 2
Check the queue settings, noting that NAPI ID 8451 is gone:
$ ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/netdev.yaml \
--dump napi-get --json='{"ifindex": 4}'
[{'defer-hard-irqs': 0,
'gro-flush-timeout': 0,
'id': 8450,
'ifindex': 4,
'irq': 2780},
{'defer-hard-irqs': 0,
'gro-flush-timeout': 0,
'id': 8449,
'ifindex': 4,
'irq': 2779}]
Now, increase the number of queues back to 4:
$ sudo ethtool -L eth4 combined 4
Dump the settings, expecting to see the same NAPI IDs as above and for
NAPI ID 8451 to have its gro-flush-timeout set to 1111:
$ ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/netdev.yaml \
--dump napi-get --json='{"ifindex": 4}'
[{'defer-hard-irqs': 0,
'gro-flush-timeout': 0,
'id': 8452,
'ifindex': 4,
'irq': 2782},
{'defer-hard-irqs': 0,
'gro-flush-timeout': 1111,
'id': 8451,
'ifindex': 4,
'irq': 2781},
{'defer-hard-irqs': 0,
'gro-flush-timeout': 0,
'id': 8450,
'ifindex': 4,
'irq': 2780},
{'defer-hard-irqs': 0,
'gro-flush-timeout': 0,
'id': 8449,
'ifindex': 4,
'irq': 2779}]
Signed-off-by: Joe Damato <jdamato@fastly.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
This commit is contained in:
@@ -156,7 +156,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
|
||||
* handler here (i.e. resume, reset/rebuild, etc.)
|
||||
*/
|
||||
if (vsi->netdev)
|
||||
netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll);
|
||||
netif_napi_add_config(vsi->netdev, &q_vector->napi,
|
||||
ice_napi_poll, v_idx);
|
||||
|
||||
out:
|
||||
/* tie q_vector and VSI together */
|
||||
|
||||
@@ -2777,8 +2777,10 @@ void ice_napi_add(struct ice_vsi *vsi)
|
||||
return;
|
||||
|
||||
ice_for_each_q_vector(vsi, v_idx)
|
||||
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
|
||||
ice_napi_poll);
|
||||
netif_napi_add_config(vsi->netdev,
|
||||
&vsi->q_vectors[v_idx]->napi,
|
||||
ice_napi_poll,
|
||||
v_idx);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user