Merge branch 'net-convert-exit_batch_rtnl-to-exit_rtnl'

Kuniyuki Iwashima says:

====================
net: Convert ->exit_batch_rtnl() to ->exit_rtnl().

While converting nexthop to per-netns RTNL, there are two blockers
to using rtnl_net_dereference(), flush_all_nexthops() and
__unregister_nexthop_notifier(), both of which are called from
->exit_batch_rtnl().

Instead of spreading __rtnl_net_lock() over each ->exit_batch_rtnl(),
we should convert all ->exit_batch_rtnl() to per-net ->exit_rtnl() and
run it under __rtnl_net_lock() because all ->exit_batch_rtnl() functions
do not have anything to factor out for batching.

Patch 1 & 2 factorise the undo mechanism against ->init() into a single
function, and Patch 3 adds ->exit_batch_rtnl().

Patch 4 ~ 13 convert all ->exit_batch_rtnl() users.

Patch 14 removes ->exit_batch_rtnl().

Later, we can convert pfcp and ppp to use ->exit_rtnl().

v1: https://lore.kernel.org/all/20250410022004.8668-1-kuniyu@amazon.com/
====================

Link: https://patch.msgid.link/20250411205258.63164-1-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-04-14 17:09:13 -07:00
19 changed files with 213 additions and 300 deletions

View File

@@ -777,27 +777,19 @@ static __net_init int bareudp_init_net(struct net *net)
return 0;
}
static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
static void __net_exit bareudp_exit_rtnl_net(struct net *net,
struct list_head *dev_kill_list)
{
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
struct bareudp_dev *bareudp, *next;
list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
unregister_netdevice_queue(bareudp->dev, head);
}
static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_kill_list)
{
struct net *net;
list_for_each_entry(net, net_list, exit_list)
bareudp_destroy_tunnels(net, dev_kill_list);
bareudp_dellink(bareudp->dev, dev_kill_list);
}
static struct pernet_operations bareudp_net_ops = {
.init = bareudp_init_net,
.exit_batch_rtnl = bareudp_exit_batch_rtnl,
.exit_rtnl = bareudp_exit_rtnl_net,
.id = &bareudp_net_id,
.size = sizeof(struct bareudp_net),
};

View File

@@ -6558,7 +6558,7 @@ static int __net_init bond_net_init(struct net *net)
/* According to commit 69b0216ac255 ("bonding: fix bonding_masters
* race condition in bond unloading") we need to remove sysfs files
* before we remove our devices (done later in bond_net_exit_batch_rtnl())
* before we remove our devices (done later in bond_net_exit_rtnl())
*/
static void __net_exit bond_net_pre_exit(struct net *net)
{
@@ -6567,25 +6567,20 @@ static void __net_exit bond_net_pre_exit(struct net *net)
bond_destroy_sysfs(bn);
}
static void __net_exit bond_net_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_kill_list)
static void __net_exit bond_net_exit_rtnl(struct net *net,
struct list_head *dev_kill_list)
{
struct bond_net *bn;
struct net *net;
struct bond_net *bn = net_generic(net, bond_net_id);
struct bonding *bond, *tmp_bond;
/* Kill off any bonds created after unregistering bond rtnl ops */
list_for_each_entry(net, net_list, exit_list) {
struct bonding *bond, *tmp_bond;
bn = net_generic(net, bond_net_id);
list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
unregister_netdevice_queue(bond->dev, dev_kill_list);
}
list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
unregister_netdevice_queue(bond->dev, dev_kill_list);
}
/* According to commit 23fa5c2caae0 ("bonding: destroy proc directory
* only after all bonds are gone") bond_destroy_proc_dir() is called
* after bond_net_exit_batch_rtnl() has completed.
* after bond_net_exit_rtnl() has completed.
*/
static void __net_exit bond_net_exit_batch(struct list_head *net_list)
{
@@ -6601,7 +6596,7 @@ static void __net_exit bond_net_exit_batch(struct list_head *net_list)
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
.pre_exit = bond_net_pre_exit,
.exit_batch_rtnl = bond_net_exit_batch_rtnl,
.exit_rtnl = bond_net_exit_rtnl,
.exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),

View File

@@ -1946,22 +1946,14 @@ static __net_init int geneve_init_net(struct net *net)
return 0;
}
static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
static void __net_exit geneve_exit_rtnl_net(struct net *net,
struct list_head *dev_to_kill)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *geneve, *next;
list_for_each_entry_safe(geneve, next, &gn->geneve_list, next)
geneve_dellink(geneve->dev, head);
}
static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_to_kill)
{
struct net *net;
list_for_each_entry(net, net_list, exit_list)
geneve_destroy_tunnels(net, dev_to_kill);
geneve_dellink(geneve->dev, dev_to_kill);
}
static void __net_exit geneve_exit_net(struct net *net)
@@ -1973,7 +1965,7 @@ static void __net_exit geneve_exit_net(struct net *net)
static struct pernet_operations geneve_net_ops = {
.init = geneve_init_net,
.exit_batch_rtnl = geneve_exit_batch_rtnl,
.exit_rtnl = geneve_exit_rtnl_net,
.exit = geneve_exit_net,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),

View File

@@ -2475,23 +2475,19 @@ static int __net_init gtp_net_init(struct net *net)
return 0;
}
static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_to_kill)
static void __net_exit gtp_net_exit_rtnl(struct net *net,
struct list_head *dev_to_kill)
{
struct net *net;
struct gtp_net *gn = net_generic(net, gtp_net_id);
struct gtp_dev *gtp, *gtp_next;
list_for_each_entry(net, net_list, exit_list) {
struct gtp_net *gn = net_generic(net, gtp_net_id);
struct gtp_dev *gtp, *gtp_next;
list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
gtp_dellink(gtp->dev, dev_to_kill);
}
list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
gtp_dellink(gtp->dev, dev_to_kill);
}
static struct pernet_operations gtp_net_ops = {
.init = gtp_net_init,
.exit_batch_rtnl = gtp_net_exit_batch_rtnl,
.exit_rtnl = gtp_net_exit_rtnl,
.id = &gtp_net_id,
.size = sizeof(struct gtp_net),
};

View File

@@ -4966,19 +4966,15 @@ static void __net_exit vxlan_destroy_tunnels(struct vxlan_net *vn,
vxlan_dellink(vxlan->dev, dev_to_kill);
}
static void __net_exit vxlan_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_to_kill)
static void __net_exit vxlan_exit_rtnl(struct net *net,
struct list_head *dev_to_kill)
{
struct net *net;
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
ASSERT_RTNL_NET(net);
__unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
vxlan_destroy_tunnels(vn, dev_to_kill);
}
__unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
vxlan_destroy_tunnels(vn, dev_to_kill);
}
static void __net_exit vxlan_exit_net(struct net *net)
@@ -4992,7 +4988,7 @@ static void __net_exit vxlan_exit_net(struct net *net)
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
.exit_batch_rtnl = vxlan_exit_batch_rtnl,
.exit_rtnl = vxlan_exit_rtnl,
.exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),

View File

@@ -377,10 +377,9 @@ struct net *ip_tunnel_get_link_net(const struct net_device *dev);
int ip_tunnel_get_iflink(const struct net_device *dev);
int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
struct rtnl_link_ops *ops,
struct list_head *dev_to_kill);
void ip_tunnel_delete_net(struct net *net, unsigned int id,
struct rtnl_link_ops *ops,
struct list_head *dev_to_kill);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);

View File

@@ -475,8 +475,8 @@ struct pernet_operations {
void (*exit)(struct net *net);
void (*exit_batch)(struct list_head *net_exit_list);
/* Following method is called with RTNL held. */
void (*exit_batch_rtnl)(struct list_head *net_exit_list,
struct list_head *dev_kill_list);
void (*exit_rtnl)(struct net *net,
struct list_head *dev_kill_list);
unsigned int * const id;
const size_t size;
};

View File

@@ -368,21 +368,20 @@ void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on)
clear_bit(opt, &br->options);
}
static void __net_exit br_net_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_to_kill)
static void __net_exit br_net_exit_rtnl(struct net *net,
struct list_head *dev_to_kill)
{
struct net_device *dev;
struct net *net;
ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
for_each_netdev(net, dev)
if (netif_is_bridge_master(dev))
br_dev_delete(dev, dev_to_kill);
ASSERT_RTNL_NET(net);
for_each_netdev(net, dev)
if (netif_is_bridge_master(dev))
br_dev_delete(dev, dev_to_kill);
}
static struct pernet_operations br_net_ops = {
.exit_batch_rtnl = br_net_exit_batch_rtnl,
.exit_rtnl = br_net_exit_rtnl,
};
static const struct stp_proto br_stp_proto = {

View File

@@ -163,16 +163,45 @@ static void ops_pre_exit_list(const struct pernet_operations *ops,
}
}
static void ops_exit_rtnl_list(const struct list_head *ops_list,
const struct pernet_operations *ops,
struct list_head *net_exit_list)
{
const struct pernet_operations *saved_ops = ops;
LIST_HEAD(dev_kill_list);
struct net *net;
rtnl_lock();
list_for_each_entry(net, net_exit_list, exit_list) {
__rtnl_net_lock(net);
ops = saved_ops;
list_for_each_entry_continue_reverse(ops, ops_list, list) {
if (ops->exit_rtnl)
ops->exit_rtnl(net, &dev_kill_list);
}
__rtnl_net_unlock(net);
}
unregister_netdevice_many(&dev_kill_list);
rtnl_unlock();
}
static void ops_exit_list(const struct pernet_operations *ops,
struct list_head *net_exit_list)
{
struct net *net;
if (ops->exit) {
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list) {
ops->exit(net);
cond_resched();
}
}
if (ops->exit_batch)
ops->exit_batch(net_exit_list);
}
@@ -188,6 +217,54 @@ static void ops_free_list(const struct pernet_operations *ops,
}
}
static void ops_undo_list(const struct list_head *ops_list,
const struct pernet_operations *ops,
struct list_head *net_exit_list,
bool expedite_rcu, bool hold_rtnl)
{
const struct pernet_operations *saved_ops;
if (!ops)
ops = list_entry(ops_list, typeof(*ops), list);
saved_ops = ops;
list_for_each_entry_continue_reverse(ops, ops_list, list)
ops_pre_exit_list(ops, net_exit_list);
/* Another CPU might be rcu-iterating the list, wait for it.
* This needs to be before calling the exit() notifiers, so the
* rcu_barrier() after ops_undo_list() isn't sufficient alone.
* Also the pre_exit() and exit() methods need this barrier.
*/
if (expedite_rcu)
synchronize_rcu_expedited();
else
synchronize_rcu();
if (hold_rtnl)
ops_exit_rtnl_list(ops_list, saved_ops, net_exit_list);
ops = saved_ops;
list_for_each_entry_continue_reverse(ops, ops_list, list)
ops_exit_list(ops, net_exit_list);
ops = saved_ops;
list_for_each_entry_continue_reverse(ops, ops_list, list)
ops_free_list(ops, net_exit_list);
}
static void ops_undo_single(struct pernet_operations *ops,
struct list_head *net_exit_list)
{
bool hold_rtnl = !!ops->exit_rtnl;
LIST_HEAD(ops_list);
list_add(&ops->list, &ops_list);
ops_undo_list(&ops_list, NULL, net_exit_list, false, hold_rtnl);
list_del(&ops->list);
}
/* should be called with nsid_lock held */
static int alloc_netid(struct net *net, struct net *peer, int reqid)
{
@@ -351,9 +428,8 @@ static __net_init void preinit_net(struct net *net, struct user_namespace *user_
static __net_init int setup_net(struct net *net)
{
/* Must be called with pernet_ops_rwsem held */
const struct pernet_operations *ops, *saved_ops;
const struct pernet_operations *ops;
LIST_HEAD(net_exit_list);
LIST_HEAD(dev_kill_list);
int error = 0;
preempt_disable();
@@ -376,29 +452,7 @@ static __net_init int setup_net(struct net *net)
* for the pernet modules whose init functions did not fail.
*/
list_add(&net->exit_list, &net_exit_list);
saved_ops = ops;
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
ops_pre_exit_list(ops, &net_exit_list);
synchronize_rcu();
ops = saved_ops;
rtnl_lock();
list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
if (ops->exit_batch_rtnl)
ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
}
unregister_netdevice_many(&dev_kill_list);
rtnl_unlock();
ops = saved_ops;
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list);
ops = saved_ops;
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list);
ops_undo_list(&pernet_list, ops, &net_exit_list, false, true);
rcu_barrier();
goto out;
}
@@ -594,11 +648,9 @@ struct task_struct *cleanup_net_task;
static void cleanup_net(struct work_struct *work)
{
const struct pernet_operations *ops;
struct net *net, *tmp, *last;
struct llist_node *net_kill_list;
struct net *net, *tmp, *last;
LIST_HEAD(net_exit_list);
LIST_HEAD(dev_kill_list);
cleanup_net_task = current;
@@ -629,33 +681,7 @@ static void cleanup_net(struct work_struct *work)
list_add_tail(&net->exit_list, &net_exit_list);
}
/* Run all of the network namespace pre_exit methods */
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_pre_exit_list(ops, &net_exit_list);
/*
* Another CPU might be rcu-iterating the list, wait for it.
* This needs to be before calling the exit() notifiers, so
* the rcu_barrier() below isn't sufficient alone.
* Also the pre_exit() and exit() methods need this barrier.
*/
synchronize_rcu_expedited();
rtnl_lock();
list_for_each_entry_reverse(ops, &pernet_list, list) {
if (ops->exit_batch_rtnl)
ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
}
unregister_netdevice_many(&dev_kill_list);
rtnl_unlock();
/* Run all of the network namespace exit methods */
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list);
/* Free the net generic variables */
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list);
ops_undo_list(&pernet_list, NULL, &net_exit_list, true, true);
up_read(&pernet_ops_rwsem);
@@ -1239,31 +1265,13 @@ void __init net_ns_init(void)
rtnl_register_many(net_ns_rtnl_msg_handlers);
}
static void free_exit_list(struct pernet_operations *ops, struct list_head *net_exit_list)
{
ops_pre_exit_list(ops, net_exit_list);
synchronize_rcu();
if (ops->exit_batch_rtnl) {
LIST_HEAD(dev_kill_list);
rtnl_lock();
ops->exit_batch_rtnl(net_exit_list, &dev_kill_list);
unregister_netdevice_many(&dev_kill_list);
rtnl_unlock();
}
ops_exit_list(ops, net_exit_list);
ops_free_list(ops, net_exit_list);
}
#ifdef CONFIG_NET_NS
static int __register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
LIST_HEAD(net_exit_list);
struct net *net;
int error;
LIST_HEAD(net_exit_list);
list_add_tail(&ops->list, list);
if (ops->init || ops->id) {
@@ -1282,21 +1290,21 @@ static int __register_pernet_operations(struct list_head *list,
out_undo:
/* If I have an error cleanup all namespaces I initialized */
list_del(&ops->list);
free_exit_list(ops, &net_exit_list);
ops_undo_single(ops, &net_exit_list);
return error;
}
static void __unregister_pernet_operations(struct pernet_operations *ops)
{
struct net *net;
LIST_HEAD(net_exit_list);
struct net *net;
list_del(&ops->list);
/* See comment in __register_pernet_operations() */
for_each_net(net)
list_add_tail(&net->exit_list, &net_exit_list);
free_exit_list(ops, &net_exit_list);
list_del(&ops->list);
ops_undo_single(ops, &net_exit_list);
}
#else
@@ -1304,22 +1312,23 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
static int __register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
if (!init_net_initialized) {
list_add_tail(&ops->list, list);
list_add_tail(&ops->list, list);
if (!init_net_initialized)
return 0;
}
return ops_init(ops, &init_net);
}
static void __unregister_pernet_operations(struct pernet_operations *ops)
{
if (!init_net_initialized) {
list_del(&ops->list);
} else {
list_del(&ops->list);
if (init_net_initialized) {
LIST_HEAD(net_exit_list);
list_add(&init_net.exit_list, &net_exit_list);
free_exit_list(ops, &net_exit_list);
ops_undo_single(ops, &net_exit_list);
}
}

View File

@@ -1066,16 +1066,15 @@ static int __net_init ipgre_init_net(struct net *net)
return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
}
static void __net_exit ipgre_exit_batch_rtnl(struct list_head *list_net,
struct list_head *dev_to_kill)
static void __net_exit ipgre_exit_rtnl(struct net *net,
struct list_head *dev_to_kill)
{
ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops,
dev_to_kill);
ip_tunnel_delete_net(net, ipgre_net_id, &ipgre_link_ops, dev_to_kill);
}
static struct pernet_operations ipgre_net_ops = {
.init = ipgre_init_net,
.exit_batch_rtnl = ipgre_exit_batch_rtnl,
.exit_rtnl = ipgre_exit_rtnl,
.id = &ipgre_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -1752,16 +1751,15 @@ static int __net_init ipgre_tap_init_net(struct net *net)
return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
}
static void __net_exit ipgre_tap_exit_batch_rtnl(struct list_head *list_net,
struct list_head *dev_to_kill)
static void __net_exit ipgre_tap_exit_rtnl(struct net *net,
struct list_head *dev_to_kill)
{
ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops,
dev_to_kill);
ip_tunnel_delete_net(net, gre_tap_net_id, &ipgre_tap_ops, dev_to_kill);
}
static struct pernet_operations ipgre_tap_net_ops = {
.init = ipgre_tap_init_net,
.exit_batch_rtnl = ipgre_tap_exit_batch_rtnl,
.exit_rtnl = ipgre_tap_exit_rtnl,
.id = &gre_tap_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -1772,16 +1770,15 @@ static int __net_init erspan_init_net(struct net *net)
&erspan_link_ops, "erspan0");
}
static void __net_exit erspan_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_to_kill)
static void __net_exit erspan_exit_rtnl(struct net *net,
struct list_head *dev_to_kill)
{
ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops,
dev_to_kill);
ip_tunnel_delete_net(net, erspan_net_id, &erspan_link_ops, dev_to_kill);
}
static struct pernet_operations erspan_net_ops = {
.init = erspan_init_net,
.exit_batch_rtnl = erspan_exit_batch_rtnl,
.exit_rtnl = erspan_exit_rtnl,
.id = &erspan_net_id,
.size = sizeof(struct ip_tunnel_net),
};

View File

@@ -1174,13 +1174,16 @@ int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
}
EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
static void ip_tunnel_destroy(struct net *net, struct ip_tunnel_net *itn,
struct list_head *head,
struct rtnl_link_ops *ops)
void ip_tunnel_delete_net(struct net *net, unsigned int id,
struct rtnl_link_ops *ops,
struct list_head *head)
{
struct ip_tunnel_net *itn = net_generic(net, id);
struct net_device *dev, *aux;
int h;
ASSERT_RTNL_NET(net);
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == ops)
unregister_netdevice_queue(dev, head);
@@ -1198,21 +1201,7 @@ static void ip_tunnel_destroy(struct net *net, struct ip_tunnel_net *itn,
unregister_netdevice_queue(t->dev, head);
}
}
void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id,
struct rtnl_link_ops *ops,
struct list_head *dev_to_kill)
{
struct ip_tunnel_net *itn;
struct net *net;
ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
itn = net_generic(net, id);
ip_tunnel_destroy(net, itn, dev_to_kill, ops);
}
}
EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets);
EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
int ip_tunnel_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct ip_tunnel_parm_kern *p,

View File

@@ -523,16 +523,15 @@ static int __net_init vti_init_net(struct net *net)
return 0;
}
static void __net_exit vti_exit_batch_rtnl(struct list_head *list_net,
struct list_head *dev_to_kill)
static void __net_exit vti_exit_rtnl(struct net *net,
struct list_head *dev_to_kill)
{
ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops,
dev_to_kill);
ip_tunnel_delete_net(net, vti_net_id, &vti_link_ops, dev_to_kill);
}
static struct pernet_operations vti_net_ops = {
.init = vti_init_net,
.exit_batch_rtnl = vti_exit_batch_rtnl,
.exit_rtnl = vti_exit_rtnl,
.id = &vti_net_id,
.size = sizeof(struct ip_tunnel_net),
};

View File

@@ -604,16 +604,15 @@ static int __net_init ipip_init_net(struct net *net)
return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0");
}
static void __net_exit ipip_exit_batch_rtnl(struct list_head *list_net,
struct list_head *dev_to_kill)
static void __net_exit ipip_exit_rtnl(struct net *net,
struct list_head *dev_to_kill)
{
ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops,
dev_to_kill);
ip_tunnel_delete_net(net, ipip_net_id, &ipip_link_ops, dev_to_kill);
}
static struct pernet_operations ipip_net_ops = {
.init = ipip_init_net,
.exit_batch_rtnl = ipip_exit_batch_rtnl,
.exit_rtnl = ipip_exit_rtnl,
.id = &ipip_net_id,
.size = sizeof(struct ip_tunnel_net),
};

View File

@@ -4040,14 +4040,11 @@ void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
}
EXPORT_SYMBOL(nexthop_res_grp_activity_update);
static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_to_kill)
static void __net_exit nexthop_net_exit_rtnl(struct net *net,
struct list_head *dev_to_kill)
{
struct net *net;
ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
flush_all_nexthops(net);
ASSERT_RTNL_NET(net);
flush_all_nexthops(net);
}
static void __net_exit nexthop_net_exit(struct net *net)
@@ -4072,7 +4069,7 @@ static int __net_init nexthop_net_init(struct net *net)
static struct pernet_operations nexthop_net_ops = {
.init = nexthop_net_init,
.exit = nexthop_net_exit,
.exit_batch_rtnl = nexthop_net_exit_batch_rtnl,
.exit_rtnl = nexthop_net_exit_rtnl,
};
static const struct rtnl_msg_handler nexthop_rtnl_msg_handlers[] __initconst = {

View File

@@ -1570,7 +1570,7 @@ static struct inet6_protocol ip6gre_protocol __read_mostly = {
.flags = INET6_PROTO_FINAL,
};
static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
static void __net_exit ip6gre_exit_rtnl_net(struct net *net, struct list_head *head)
{
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
struct net_device *dev, *aux;
@@ -1587,16 +1587,16 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
for (h = 0; h < IP6_GRE_HASH_SIZE; h++) {
struct ip6_tnl *t;
t = rtnl_dereference(ign->tunnels[prio][h]);
t = rtnl_net_dereference(net, ign->tunnels[prio][h]);
while (t) {
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
if (!net_eq(dev_net(t->dev), net))
unregister_netdevice_queue(t->dev,
head);
t = rtnl_dereference(t->next);
unregister_netdevice_queue(t->dev, head);
t = rtnl_net_dereference(net, t->next);
}
}
}
@@ -1640,19 +1640,9 @@ static int __net_init ip6gre_init_net(struct net *net)
return err;
}
static void __net_exit ip6gre_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_to_kill)
{
struct net *net;
ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
ip6gre_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations ip6gre_net_ops = {
.init = ip6gre_init_net,
.exit_batch_rtnl = ip6gre_exit_batch_rtnl,
.exit_rtnl = ip6gre_exit_rtnl_net,
.id = &ip6gre_net_id,
.size = sizeof(struct ip6gre_net),
};

View File

@@ -2210,7 +2210,7 @@ static struct xfrm6_tunnel mplsip6_handler __read_mostly = {
.priority = 1,
};
static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list)
static void __net_exit ip6_tnl_exit_rtnl_net(struct net *net, struct list_head *list)
{
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
struct net_device *dev, *aux;
@@ -2222,25 +2222,27 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head
unregister_netdevice_queue(dev, list);
for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
t = rtnl_dereference(ip6n->tnls_r_l[h]);
t = rtnl_net_dereference(net, ip6n->tnls_r_l[h]);
while (t) {
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
if (!net_eq(dev_net(t->dev), net))
unregister_netdevice_queue(t->dev, list);
t = rtnl_dereference(t->next);
t = rtnl_net_dereference(net, t->next);
}
}
t = rtnl_dereference(ip6n->tnls_wc[0]);
t = rtnl_net_dereference(net, ip6n->tnls_wc[0]);
while (t) {
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
if (!net_eq(dev_net(t->dev), net))
unregister_netdevice_queue(t->dev, list);
t = rtnl_dereference(t->next);
t = rtnl_net_dereference(net, t->next);
}
}
@@ -2287,19 +2289,9 @@ static int __net_init ip6_tnl_init_net(struct net *net)
return err;
}
static void __net_exit ip6_tnl_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_to_kill)
{
struct net *net;
ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
ip6_tnl_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations ip6_tnl_net_ops = {
.init = ip6_tnl_init_net,
.exit_batch_rtnl = ip6_tnl_exit_batch_rtnl,
.exit_rtnl = ip6_tnl_exit_rtnl_net,
.id = &ip6_tnl_net_id,
.size = sizeof(struct ip6_tnl_net),
};

View File

@@ -1112,21 +1112,21 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = {
.get_link_net = ip6_tnl_get_link_net,
};
static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n,
struct list_head *list)
static void __net_exit vti6_exit_rtnl_net(struct net *net, struct list_head *list)
{
int h;
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
struct ip6_tnl *t;
int h;
for (h = 0; h < IP6_VTI_HASH_SIZE; h++) {
t = rtnl_dereference(ip6n->tnls_r_l[h]);
t = rtnl_net_dereference(net, ip6n->tnls_r_l[h]);
while (t) {
unregister_netdevice_queue(t->dev, list);
t = rtnl_dereference(t->next);
t = rtnl_net_dereference(net, t->next);
}
}
t = rtnl_dereference(ip6n->tnls_wc[0]);
t = rtnl_net_dereference(net, ip6n->tnls_wc[0]);
if (t)
unregister_netdevice_queue(t->dev, list);
}
@@ -1170,22 +1170,9 @@ static int __net_init vti6_init_net(struct net *net)
return err;
}
static void __net_exit vti6_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_to_kill)
{
struct vti6_net *ip6n;
struct net *net;
ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
ip6n = net_generic(net, vti6_net_id);
vti6_destroy_tunnels(ip6n, dev_to_kill);
}
}
static struct pernet_operations vti6_net_ops = {
.init = vti6_init_net,
.exit_batch_rtnl = vti6_exit_batch_rtnl,
.exit_rtnl = vti6_exit_rtnl_net,
.id = &vti6_net_id,
.size = sizeof(struct vti6_net),
};

View File

@@ -1804,8 +1804,7 @@ static struct xfrm_tunnel mplsip_handler __read_mostly = {
};
#endif
static void __net_exit sit_destroy_tunnels(struct net *net,
struct list_head *head)
static void __net_exit sit_exit_rtnl_net(struct net *net, struct list_head *head)
{
struct sit_net *sitn = net_generic(net, sit_net_id);
struct net_device *dev, *aux;
@@ -1820,15 +1819,15 @@ static void __net_exit sit_destroy_tunnels(struct net *net,
for (h = 0; h < (prio ? IP6_SIT_HASH_SIZE : 1); h++) {
struct ip_tunnel *t;
t = rtnl_dereference(sitn->tunnels[prio][h]);
t = rtnl_net_dereference(net, sitn->tunnels[prio][h]);
while (t) {
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
if (!net_eq(dev_net(t->dev), net))
unregister_netdevice_queue(t->dev,
head);
t = rtnl_dereference(t->next);
unregister_netdevice_queue(t->dev, head);
t = rtnl_net_dereference(net, t->next);
}
}
}
@@ -1881,19 +1880,9 @@ static int __net_init sit_init_net(struct net *net)
return err;
}
static void __net_exit sit_exit_batch_rtnl(struct list_head *net_list,
struct list_head *dev_to_kill)
{
struct net *net;
ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
sit_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations sit_net_ops = {
.init = sit_init_net,
.exit_batch_rtnl = sit_exit_batch_rtnl,
.exit_rtnl = sit_exit_rtnl_net,
.id = &sit_net_id,
.size = sizeof(struct sit_net),
};

View File

@@ -952,32 +952,28 @@ static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
.get_link_net = xfrmi_get_link_net,
};
static void __net_exit xfrmi_exit_batch_rtnl(struct list_head *net_exit_list,
struct list_head *dev_to_kill)
static void __net_exit xfrmi_exit_rtnl(struct net *net,
struct list_head *dev_to_kill)
{
struct net *net;
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
struct xfrm_if __rcu **xip;
struct xfrm_if *xi;
int i;
ASSERT_RTNL();
list_for_each_entry(net, net_exit_list, exit_list) {
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
struct xfrm_if __rcu **xip;
struct xfrm_if *xi;
int i;
for (i = 0; i < XFRMI_HASH_SIZE; i++) {
for (xip = &xfrmn->xfrmi[i];
(xi = rtnl_dereference(*xip)) != NULL;
xip = &xi->next)
unregister_netdevice_queue(xi->dev, dev_to_kill);
}
xi = rtnl_dereference(xfrmn->collect_md_xfrmi);
if (xi)
for (i = 0; i < XFRMI_HASH_SIZE; i++) {
for (xip = &xfrmn->xfrmi[i];
(xi = rtnl_net_dereference(net, *xip)) != NULL;
xip = &xi->next)
unregister_netdevice_queue(xi->dev, dev_to_kill);
}
xi = rtnl_net_dereference(net, xfrmn->collect_md_xfrmi);
if (xi)
unregister_netdevice_queue(xi->dev, dev_to_kill);
}
static struct pernet_operations xfrmi_net_ops = {
.exit_batch_rtnl = xfrmi_exit_batch_rtnl,
.exit_rtnl = xfrmi_exit_rtnl,
.id = &xfrmi_net_id,
.size = sizeof(struct xfrmi_net),
};