mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-15 23:41:35 -04:00
Merge tag 'nf-26-05-08' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf
Pablo Neira Ayuso says:
====================
Netfilter fixes for net
The following batch contains Netfilter fixes for net:
1) Allow initial x_tables table replacement without emitting an audit
log message. Delay the register message until after hooks are wired up
to avoid unnecessary unregister logs during error unwinding.
2) Fix a NULL dereference by allocating hook ops before adding the
table to the per-netns list. Use `synchronize_rcu()` during error
unwinding to ensure the table stops processing packets before
teardown. Defer audit log register message until all operations
succeed.
3) Refactor xtables to use a single `xt_unregister_table_pre_exit`
function. Eliminate code duplication by centralizing table
unregistration logic within the xtables core. ebtables cannot be
changed due to incompatibility.
4) Unregister xtables templates before module removal. This prevents
a race condition where userspace instantiates a new table after the
pernet unreg removed the current table.
5) Add `xtables_unregister_table_exit` to fully unregister netfilter
tables during module removal. Unlink the table from dying lists,
then free hook operations.
6) Implement a two-stage removal scheme for ebtables following the
x_tables pattern. Assign table->ops while holding the ebt mutex to
prevent exposing partially-filled structures.
7) Fix ebtables module initialization race. Register the template last
in table initialization functions. Prevent table instantiation before
pernet operations are available.
8) Fix a race condition in x_tables module initialization. Ensure
pernet ops are fully set up before exposing the table to userspace.
9) Fix a race condition in ebtables module initialization, similar to
previous patch.
10) Restore propagation of helper to expected connection, this is a
fix-for-recent-fix.
11) Validate that the expectation tuple and mask netlink attributes are
present when adding expectation via nfqueue, this fixes a possible
null-ptr-deref.
12) Fix possible rare memleak in the SIP helper in case helper has been
detached from conntrack entry, from Li Xiasong.
13) Fix refcount leak in nft_ct when creating custom expectation, also
from Li Xiason.
Patches 1-9 from Florian Westphal.
10) Restore propagation of helper to expected connection, this is a
fix-for-recent-fix.
11) Check that tuple and mask netlink attributes are set when creating an
expectation via nfqueue.
* tag 'nf-26-05-08' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf:
netfilter: nft_ct: fix missing expect put in obj eval
netfilter: nf_conntrack_sip: get helper before allocating expectation
netfilter: ctnetlink: check tuple and mask in expectations created via nfqueue
netfilter: nf_conntrack_expect: restore helper propagation via expectation
netfilter: bridge: eb_tables: close module init race
netfilter: x_tables: close dangling table module init race
netfilter: ebtables: close dangling table module init race
netfilter: ebtables: move to two-stage removal scheme
netfilter: x_tables: add and use xtables_unregister_table_exit
netfilter: x_tables: unregister the templates first
netfilter: x_tables: add and use xt_unregister_table_pre_exit
netfilter: x_tables: allocate hook ops while under mutex
netfilter: x_tables: allow initial table replace without emitting audit log message
====================
Link: https://patch.msgid.link/20260507234509.603182-1-pablo@netfilter.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -305,9 +305,11 @@ struct xt_counters *xt_counters_alloc(unsigned int counters);
|
||||
|
||||
struct xt_table *xt_register_table(struct net *net,
|
||||
const struct xt_table *table,
|
||||
const struct nf_hook_ops *template_ops,
|
||||
struct xt_table_info *bootstrap,
|
||||
struct xt_table_info *newinfo);
|
||||
void *xt_unregister_table(struct xt_table *table);
|
||||
void xt_unregister_table_pre_exit(struct net *net, u8 af, const char *name);
|
||||
struct xt_table *xt_unregister_table_exit(struct net *net, u8 af, const char *name);
|
||||
|
||||
struct xt_table_info *xt_replace_table(struct xt_table *table,
|
||||
unsigned int num_counters,
|
||||
|
||||
@@ -53,7 +53,6 @@ int arpt_register_table(struct net *net, const struct xt_table *table,
|
||||
const struct arpt_replace *repl,
|
||||
const struct nf_hook_ops *ops);
|
||||
void arpt_unregister_table(struct net *net, const char *name);
|
||||
void arpt_unregister_table_pre_exit(struct net *net, const char *name);
|
||||
extern unsigned int arpt_do_table(void *priv, struct sk_buff *skb,
|
||||
const struct nf_hook_state *state);
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@ int ipt_register_table(struct net *net, const struct xt_table *table,
|
||||
const struct ipt_replace *repl,
|
||||
const struct nf_hook_ops *ops);
|
||||
|
||||
void ipt_unregister_table_pre_exit(struct net *net, const char *name);
|
||||
void ipt_unregister_table_exit(struct net *net, const char *name);
|
||||
|
||||
/* Standard entry. */
|
||||
|
||||
@@ -27,7 +27,6 @@ extern void *ip6t_alloc_initial_table(const struct xt_table *);
|
||||
int ip6t_register_table(struct net *net, const struct xt_table *table,
|
||||
const struct ip6t_replace *repl,
|
||||
const struct nf_hook_ops *ops);
|
||||
void ip6t_unregister_table_pre_exit(struct net *net, const char *name);
|
||||
void ip6t_unregister_table_exit(struct net *net, const char *name);
|
||||
extern unsigned int ip6t_do_table(void *priv, struct sk_buff *skb,
|
||||
const struct nf_hook_state *state);
|
||||
|
||||
@@ -45,9 +45,12 @@ struct nf_conntrack_expect {
|
||||
void (*expectfn)(struct nf_conn *new,
|
||||
struct nf_conntrack_expect *this);
|
||||
|
||||
/* Helper to assign to new connection */
|
||||
/* Helper that created this expectation */
|
||||
struct nf_conntrack_helper __rcu *helper;
|
||||
|
||||
/* Helper to assign to new connection */
|
||||
struct nf_conntrack_helper __rcu *assign_helper;
|
||||
|
||||
/* The conntrack of the master connection */
|
||||
struct nf_conn *master;
|
||||
|
||||
|
||||
@@ -112,24 +112,22 @@ static struct pernet_operations broute_net_ops = {
|
||||
|
||||
static int __init ebtable_broute_init(void)
|
||||
{
|
||||
int ret = ebt_register_template(&broute_table, broute_table_init);
|
||||
int ret = register_pernet_subsys(&broute_net_ops);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = register_pernet_subsys(&broute_net_ops);
|
||||
if (ret) {
|
||||
ebt_unregister_template(&broute_table);
|
||||
return ret;
|
||||
}
|
||||
ret = ebt_register_template(&broute_table, broute_table_init);
|
||||
if (ret)
|
||||
unregister_pernet_subsys(&broute_net_ops);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ebtable_broute_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&broute_net_ops);
|
||||
ebt_unregister_template(&broute_table);
|
||||
unregister_pernet_subsys(&broute_net_ops);
|
||||
}
|
||||
|
||||
module_init(ebtable_broute_init);
|
||||
|
||||
@@ -93,24 +93,22 @@ static struct pernet_operations frame_filter_net_ops = {
|
||||
|
||||
static int __init ebtable_filter_init(void)
|
||||
{
|
||||
int ret = ebt_register_template(&frame_filter, frame_filter_table_init);
|
||||
int ret = register_pernet_subsys(&frame_filter_net_ops);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = register_pernet_subsys(&frame_filter_net_ops);
|
||||
if (ret) {
|
||||
ebt_unregister_template(&frame_filter);
|
||||
return ret;
|
||||
}
|
||||
ret = ebt_register_template(&frame_filter, frame_filter_table_init);
|
||||
if (ret)
|
||||
unregister_pernet_subsys(&frame_filter_net_ops);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ebtable_filter_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&frame_filter_net_ops);
|
||||
ebt_unregister_template(&frame_filter);
|
||||
unregister_pernet_subsys(&frame_filter_net_ops);
|
||||
}
|
||||
|
||||
module_init(ebtable_filter_init);
|
||||
|
||||
@@ -93,24 +93,22 @@ static struct pernet_operations frame_nat_net_ops = {
|
||||
|
||||
static int __init ebtable_nat_init(void)
|
||||
{
|
||||
int ret = ebt_register_template(&frame_nat, frame_nat_table_init);
|
||||
int ret = register_pernet_subsys(&frame_nat_net_ops);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = register_pernet_subsys(&frame_nat_net_ops);
|
||||
if (ret) {
|
||||
ebt_unregister_template(&frame_nat);
|
||||
return ret;
|
||||
}
|
||||
ret = ebt_register_template(&frame_nat, frame_nat_table_init);
|
||||
if (ret)
|
||||
unregister_pernet_subsys(&frame_nat_net_ops);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ebtable_nat_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&frame_nat_net_ops);
|
||||
ebt_unregister_template(&frame_nat);
|
||||
unregister_pernet_subsys(&frame_nat_net_ops);
|
||||
}
|
||||
|
||||
module_init(ebtable_nat_init);
|
||||
|
||||
@@ -42,6 +42,7 @@
|
||||
|
||||
struct ebt_pernet {
|
||||
struct list_head tables;
|
||||
struct list_head dead_tables;
|
||||
};
|
||||
|
||||
struct ebt_template {
|
||||
@@ -1162,11 +1163,6 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
|
||||
|
||||
static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
|
||||
{
|
||||
mutex_lock(&ebt_mutex);
|
||||
list_del(&table->list);
|
||||
mutex_unlock(&ebt_mutex);
|
||||
audit_log_nfcfg(table->name, AF_BRIDGE, table->private->nentries,
|
||||
AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
|
||||
EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
|
||||
ebt_cleanup_entry, net, NULL);
|
||||
if (table->private->nentries)
|
||||
@@ -1267,13 +1263,15 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
||||
for (i = 0; i < num_ops; i++)
|
||||
ops[i].priv = table;
|
||||
|
||||
list_add(&table->list, &ebt_net->tables);
|
||||
mutex_unlock(&ebt_mutex);
|
||||
|
||||
table->ops = ops;
|
||||
ret = nf_register_net_hooks(net, ops, num_ops);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
synchronize_rcu();
|
||||
__ebt_unregister_table(net, table);
|
||||
} else {
|
||||
list_add(&table->list, &ebt_net->tables);
|
||||
}
|
||||
mutex_unlock(&ebt_mutex);
|
||||
|
||||
audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries,
|
||||
AUDIT_XT_OP_REGISTER, GFP_KERNEL);
|
||||
@@ -1339,7 +1337,7 @@ void ebt_unregister_template(const struct ebt_table *t)
|
||||
}
|
||||
EXPORT_SYMBOL(ebt_unregister_template);
|
||||
|
||||
static struct ebt_table *__ebt_find_table(struct net *net, const char *name)
|
||||
void ebt_unregister_table_pre_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
|
||||
struct ebt_table *t;
|
||||
@@ -1348,30 +1346,36 @@ static struct ebt_table *__ebt_find_table(struct net *net, const char *name)
|
||||
|
||||
list_for_each_entry(t, &ebt_net->tables, list) {
|
||||
if (strcmp(t->name, name) == 0) {
|
||||
list_move(&t->list, &ebt_net->dead_tables);
|
||||
mutex_unlock(&ebt_mutex);
|
||||
return t;
|
||||
nf_unregister_net_hooks(net, t->ops, hweight32(t->valid_hooks));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&ebt_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void ebt_unregister_table_pre_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct ebt_table *table = __ebt_find_table(net, name);
|
||||
|
||||
if (table)
|
||||
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
|
||||
}
|
||||
EXPORT_SYMBOL(ebt_unregister_table_pre_exit);
|
||||
|
||||
void ebt_unregister_table(struct net *net, const char *name)
|
||||
{
|
||||
struct ebt_table *table = __ebt_find_table(net, name);
|
||||
struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
|
||||
struct ebt_table *t;
|
||||
|
||||
if (table)
|
||||
__ebt_unregister_table(net, table);
|
||||
mutex_lock(&ebt_mutex);
|
||||
|
||||
list_for_each_entry(t, &ebt_net->dead_tables, list) {
|
||||
if (strcmp(t->name, name) == 0) {
|
||||
list_del(&t->list);
|
||||
audit_log_nfcfg(t->name, AF_BRIDGE, t->private->nentries,
|
||||
AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
|
||||
__ebt_unregister_table(net, t);
|
||||
mutex_unlock(&ebt_mutex);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&ebt_mutex);
|
||||
}
|
||||
|
||||
/* userspace just supplied us with counters */
|
||||
@@ -2556,11 +2560,21 @@ static int __net_init ebt_pernet_init(struct net *net)
|
||||
struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
|
||||
|
||||
INIT_LIST_HEAD(&ebt_net->tables);
|
||||
INIT_LIST_HEAD(&ebt_net->dead_tables);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __net_exit ebt_pernet_exit(struct net *net)
|
||||
{
|
||||
struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id);
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&ebt_net->tables));
|
||||
WARN_ON_ONCE(!list_empty(&ebt_net->dead_tables));
|
||||
}
|
||||
|
||||
static struct pernet_operations ebt_net_ops = {
|
||||
.init = ebt_pernet_init,
|
||||
.exit = ebt_pernet_exit,
|
||||
.id = &ebt_pernet_id,
|
||||
.size = sizeof(struct ebt_pernet),
|
||||
};
|
||||
@@ -2569,19 +2583,20 @@ static int __init ebtables_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = xt_register_target(&ebt_standard_target);
|
||||
ret = register_pernet_subsys(&ebt_net_ops);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = nf_register_sockopt(&ebt_sockopts);
|
||||
|
||||
ret = xt_register_target(&ebt_standard_target);
|
||||
if (ret < 0) {
|
||||
xt_unregister_target(&ebt_standard_target);
|
||||
unregister_pernet_subsys(&ebt_net_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&ebt_net_ops);
|
||||
ret = nf_register_sockopt(&ebt_sockopts);
|
||||
if (ret < 0) {
|
||||
nf_unregister_sockopt(&ebt_sockopts);
|
||||
xt_unregister_target(&ebt_standard_target);
|
||||
unregister_pernet_subsys(&ebt_net_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -1501,13 +1501,11 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
|
||||
|
||||
static void __arpt_unregister_table(struct net *net, struct xt_table *table)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
void *loc_cpu_entry;
|
||||
struct xt_table_info *private = table->private;
|
||||
struct module *table_owner = table->me;
|
||||
void *loc_cpu_entry;
|
||||
struct arpt_entry *iter;
|
||||
|
||||
private = xt_unregister_table(table);
|
||||
|
||||
/* Decrease module usage counts and free resources */
|
||||
loc_cpu_entry = private->entries;
|
||||
xt_entry_foreach(iter, loc_cpu_entry, private->size)
|
||||
@@ -1515,6 +1513,7 @@ static void __arpt_unregister_table(struct net *net, struct xt_table *table)
|
||||
if (private->number > private->initial_entries)
|
||||
module_put(table_owner);
|
||||
xt_free_table_info(private);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
int arpt_register_table(struct net *net,
|
||||
@@ -1522,13 +1521,11 @@ int arpt_register_table(struct net *net,
|
||||
const struct arpt_replace *repl,
|
||||
const struct nf_hook_ops *template_ops)
|
||||
{
|
||||
struct nf_hook_ops *ops;
|
||||
unsigned int num_ops;
|
||||
int ret, i;
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table_info bootstrap = {0};
|
||||
void *loc_cpu_entry;
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table *new_table;
|
||||
void *loc_cpu_entry;
|
||||
int ret;
|
||||
|
||||
newinfo = xt_alloc_table_info(repl->size);
|
||||
if (!newinfo)
|
||||
@@ -1543,7 +1540,7 @@ int arpt_register_table(struct net *net,
|
||||
return ret;
|
||||
}
|
||||
|
||||
new_table = xt_register_table(net, table, &bootstrap, newinfo);
|
||||
new_table = xt_register_table(net, table, template_ops, &bootstrap, newinfo);
|
||||
if (IS_ERR(new_table)) {
|
||||
struct arpt_entry *iter;
|
||||
|
||||
@@ -1553,46 +1550,12 @@ int arpt_register_table(struct net *net,
|
||||
return PTR_ERR(new_table);
|
||||
}
|
||||
|
||||
num_ops = hweight32(table->valid_hooks);
|
||||
if (num_ops == 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
|
||||
if (!ops) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_ops; i++)
|
||||
ops[i].priv = new_table;
|
||||
|
||||
new_table->ops = ops;
|
||||
|
||||
ret = nf_register_net_hooks(net, ops, num_ops);
|
||||
if (ret != 0)
|
||||
goto out_free;
|
||||
|
||||
return ret;
|
||||
|
||||
out_free:
|
||||
__arpt_unregister_table(net, new_table);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void arpt_unregister_table_pre_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct xt_table *table = xt_find_table(net, NFPROTO_ARP, name);
|
||||
|
||||
if (table)
|
||||
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
|
||||
}
|
||||
EXPORT_SYMBOL(arpt_unregister_table_pre_exit);
|
||||
|
||||
void arpt_unregister_table(struct net *net, const char *name)
|
||||
{
|
||||
struct xt_table *table = xt_find_table(net, NFPROTO_ARP, name);
|
||||
struct xt_table *table = xt_unregister_table_exit(net, NFPROTO_ARP, name);
|
||||
|
||||
if (table)
|
||||
__arpt_unregister_table(net, table);
|
||||
|
||||
@@ -43,7 +43,7 @@ static int arptable_filter_table_init(struct net *net)
|
||||
|
||||
static void __net_exit arptable_filter_net_pre_exit(struct net *net)
|
||||
{
|
||||
arpt_unregister_table_pre_exit(net, "filter");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_ARP, "filter");
|
||||
}
|
||||
|
||||
static void __net_exit arptable_filter_net_exit(struct net *net)
|
||||
@@ -58,32 +58,33 @@ static struct pernet_operations arptable_filter_net_ops = {
|
||||
|
||||
static int __init arptable_filter_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&packet_filter,
|
||||
arptable_filter_table_init);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
arpfilter_ops = xt_hook_ops_alloc(&packet_filter, arpt_do_table);
|
||||
if (IS_ERR(arpfilter_ops)) {
|
||||
xt_unregister_template(&packet_filter);
|
||||
if (IS_ERR(arpfilter_ops))
|
||||
return PTR_ERR(arpfilter_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&arptable_filter_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&packet_filter,
|
||||
arptable_filter_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&packet_filter);
|
||||
kfree(arpfilter_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&arptable_filter_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(arpfilter_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit arptable_filter_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&arptable_filter_net_ops);
|
||||
xt_unregister_template(&packet_filter);
|
||||
unregister_pernet_subsys(&arptable_filter_net_ops);
|
||||
kfree(arpfilter_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -1704,12 +1704,10 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
|
||||
|
||||
static void __ipt_unregister_table(struct net *net, struct xt_table *table)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
void *loc_cpu_entry;
|
||||
struct xt_table_info *private = table->private;
|
||||
struct module *table_owner = table->me;
|
||||
struct ipt_entry *iter;
|
||||
|
||||
private = xt_unregister_table(table);
|
||||
void *loc_cpu_entry;
|
||||
|
||||
/* Decrease module usage counts and free resources */
|
||||
loc_cpu_entry = private->entries;
|
||||
@@ -1718,19 +1716,18 @@ static void __ipt_unregister_table(struct net *net, struct xt_table *table)
|
||||
if (private->number > private->initial_entries)
|
||||
module_put(table_owner);
|
||||
xt_free_table_info(private);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
int ipt_register_table(struct net *net, const struct xt_table *table,
|
||||
const struct ipt_replace *repl,
|
||||
const struct nf_hook_ops *template_ops)
|
||||
{
|
||||
struct nf_hook_ops *ops;
|
||||
unsigned int num_ops;
|
||||
int ret, i;
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table_info bootstrap = {0};
|
||||
void *loc_cpu_entry;
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table *new_table;
|
||||
void *loc_cpu_entry;
|
||||
int ret;
|
||||
|
||||
newinfo = xt_alloc_table_info(repl->size);
|
||||
if (!newinfo)
|
||||
@@ -1745,7 +1742,7 @@ int ipt_register_table(struct net *net, const struct xt_table *table,
|
||||
return ret;
|
||||
}
|
||||
|
||||
new_table = xt_register_table(net, table, &bootstrap, newinfo);
|
||||
new_table = xt_register_table(net, table, template_ops, &bootstrap, newinfo);
|
||||
if (IS_ERR(new_table)) {
|
||||
struct ipt_entry *iter;
|
||||
|
||||
@@ -1755,51 +1752,12 @@ int ipt_register_table(struct net *net, const struct xt_table *table,
|
||||
return PTR_ERR(new_table);
|
||||
}
|
||||
|
||||
/* No template? No need to do anything. This is used by 'nat' table, it registers
|
||||
* with the nat core instead of the netfilter core.
|
||||
*/
|
||||
if (!template_ops)
|
||||
return 0;
|
||||
|
||||
num_ops = hweight32(table->valid_hooks);
|
||||
if (num_ops == 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
|
||||
if (!ops) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_ops; i++)
|
||||
ops[i].priv = new_table;
|
||||
|
||||
new_table->ops = ops;
|
||||
|
||||
ret = nf_register_net_hooks(net, ops, num_ops);
|
||||
if (ret != 0)
|
||||
goto out_free;
|
||||
|
||||
return ret;
|
||||
|
||||
out_free:
|
||||
__ipt_unregister_table(net, new_table);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ipt_unregister_table_pre_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct xt_table *table = xt_find_table(net, NFPROTO_IPV4, name);
|
||||
|
||||
if (table)
|
||||
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
|
||||
}
|
||||
|
||||
void ipt_unregister_table_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct xt_table *table = xt_find_table(net, NFPROTO_IPV4, name);
|
||||
struct xt_table *table = xt_unregister_table_exit(net, NFPROTO_IPV4, name);
|
||||
|
||||
if (table)
|
||||
__ipt_unregister_table(net, table);
|
||||
@@ -1887,7 +1845,6 @@ static void __exit ip_tables_fini(void)
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(ipt_register_table);
|
||||
EXPORT_SYMBOL(ipt_unregister_table_pre_exit);
|
||||
EXPORT_SYMBOL(ipt_unregister_table_exit);
|
||||
EXPORT_SYMBOL(ipt_do_table);
|
||||
module_init(ip_tables_init);
|
||||
|
||||
@@ -61,7 +61,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_filter_net_pre_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table_pre_exit(net, "filter");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV4, "filter");
|
||||
}
|
||||
|
||||
static void __net_exit iptable_filter_net_exit(struct net *net)
|
||||
@@ -77,32 +77,33 @@ static struct pernet_operations iptable_filter_net_ops = {
|
||||
|
||||
static int __init iptable_filter_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&packet_filter,
|
||||
iptable_filter_table_init);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
filter_ops = xt_hook_ops_alloc(&packet_filter, ipt_do_table);
|
||||
if (IS_ERR(filter_ops)) {
|
||||
xt_unregister_template(&packet_filter);
|
||||
if (IS_ERR(filter_ops))
|
||||
return PTR_ERR(filter_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&iptable_filter_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&packet_filter,
|
||||
iptable_filter_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&packet_filter);
|
||||
kfree(filter_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&iptable_filter_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(filter_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit iptable_filter_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&iptable_filter_net_ops);
|
||||
xt_unregister_template(&packet_filter);
|
||||
unregister_pernet_subsys(&iptable_filter_net_ops);
|
||||
kfree(filter_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ static int iptable_mangle_table_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_mangle_net_pre_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table_pre_exit(net, "mangle");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV4, "mangle");
|
||||
}
|
||||
|
||||
static void __net_exit iptable_mangle_net_exit(struct net *net)
|
||||
@@ -111,32 +111,33 @@ static struct pernet_operations iptable_mangle_net_ops = {
|
||||
|
||||
static int __init iptable_mangle_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&packet_mangler,
|
||||
iptable_mangle_table_init);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
mangle_ops = xt_hook_ops_alloc(&packet_mangler, iptable_mangle_hook);
|
||||
if (IS_ERR(mangle_ops)) {
|
||||
xt_unregister_template(&packet_mangler);
|
||||
ret = PTR_ERR(mangle_ops);
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(mangle_ops))
|
||||
return PTR_ERR(mangle_ops);
|
||||
|
||||
ret = register_pernet_subsys(&iptable_mangle_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&packet_mangler,
|
||||
iptable_mangle_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&packet_mangler);
|
||||
kfree(mangle_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&iptable_mangle_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(mangle_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit iptable_mangle_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&iptable_mangle_net_ops);
|
||||
xt_unregister_template(&packet_mangler);
|
||||
unregister_pernet_subsys(&iptable_mangle_net_ops);
|
||||
kfree(mangle_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -119,8 +119,11 @@ static int iptable_nat_table_init(struct net *net)
|
||||
}
|
||||
|
||||
ret = ipt_nat_register_lookups(net);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV4, "nat");
|
||||
synchronize_rcu();
|
||||
ipt_unregister_table_exit(net, "nat");
|
||||
}
|
||||
|
||||
kfree(repl);
|
||||
return ret;
|
||||
@@ -129,6 +132,7 @@ static int iptable_nat_table_init(struct net *net)
|
||||
static void __net_exit iptable_nat_net_pre_exit(struct net *net)
|
||||
{
|
||||
ipt_nat_unregister_lookups(net);
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV4, "nat");
|
||||
}
|
||||
|
||||
static void __net_exit iptable_nat_net_exit(struct net *net)
|
||||
|
||||
@@ -53,7 +53,7 @@ static int iptable_raw_table_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_raw_net_pre_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table_pre_exit(net, "raw");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV4, "raw");
|
||||
}
|
||||
|
||||
static void __net_exit iptable_raw_net_exit(struct net *net)
|
||||
@@ -77,32 +77,32 @@ static int __init iptable_raw_init(void)
|
||||
pr_info("Enabling raw table before defrag\n");
|
||||
}
|
||||
|
||||
ret = xt_register_template(table,
|
||||
iptable_raw_table_init);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
rawtable_ops = xt_hook_ops_alloc(table, ipt_do_table);
|
||||
if (IS_ERR(rawtable_ops)) {
|
||||
xt_unregister_template(table);
|
||||
if (IS_ERR(rawtable_ops))
|
||||
return PTR_ERR(rawtable_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&iptable_raw_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(table,
|
||||
iptable_raw_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(table);
|
||||
kfree(rawtable_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&iptable_raw_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(rawtable_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit iptable_raw_fini(void)
|
||||
{
|
||||
xt_unregister_template(&packet_raw);
|
||||
unregister_pernet_subsys(&iptable_raw_net_ops);
|
||||
kfree(rawtable_ops);
|
||||
xt_unregister_template(&packet_raw);
|
||||
}
|
||||
|
||||
module_init(iptable_raw_init);
|
||||
|
||||
@@ -50,7 +50,7 @@ static int iptable_security_table_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_security_net_pre_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table_pre_exit(net, "security");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV4, "security");
|
||||
}
|
||||
|
||||
static void __net_exit iptable_security_net_exit(struct net *net)
|
||||
@@ -65,33 +65,34 @@ static struct pernet_operations iptable_security_net_ops = {
|
||||
|
||||
static int __init iptable_security_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&security_table,
|
||||
iptable_security_table_init);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
sectbl_ops = xt_hook_ops_alloc(&security_table, ipt_do_table);
|
||||
if (IS_ERR(sectbl_ops)) {
|
||||
xt_unregister_template(&security_table);
|
||||
if (IS_ERR(sectbl_ops))
|
||||
return PTR_ERR(sectbl_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&iptable_security_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&security_table,
|
||||
iptable_security_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&security_table);
|
||||
kfree(sectbl_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&iptable_security_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(sectbl_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit iptable_security_fini(void)
|
||||
{
|
||||
xt_unregister_template(&security_table);
|
||||
unregister_pernet_subsys(&iptable_security_net_ops);
|
||||
kfree(sectbl_ops);
|
||||
xt_unregister_template(&security_table);
|
||||
}
|
||||
|
||||
module_init(iptable_security_init);
|
||||
|
||||
@@ -1713,12 +1713,10 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
|
||||
|
||||
static void __ip6t_unregister_table(struct net *net, struct xt_table *table)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
void *loc_cpu_entry;
|
||||
struct xt_table_info *private = table->private;
|
||||
struct module *table_owner = table->me;
|
||||
struct ip6t_entry *iter;
|
||||
|
||||
private = xt_unregister_table(table);
|
||||
void *loc_cpu_entry;
|
||||
|
||||
/* Decrease module usage counts and free resources */
|
||||
loc_cpu_entry = private->entries;
|
||||
@@ -1727,19 +1725,18 @@ static void __ip6t_unregister_table(struct net *net, struct xt_table *table)
|
||||
if (private->number > private->initial_entries)
|
||||
module_put(table_owner);
|
||||
xt_free_table_info(private);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
int ip6t_register_table(struct net *net, const struct xt_table *table,
|
||||
const struct ip6t_replace *repl,
|
||||
const struct nf_hook_ops *template_ops)
|
||||
{
|
||||
struct nf_hook_ops *ops;
|
||||
unsigned int num_ops;
|
||||
int ret, i;
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table_info bootstrap = {0};
|
||||
void *loc_cpu_entry;
|
||||
struct xt_table_info *newinfo;
|
||||
struct xt_table *new_table;
|
||||
void *loc_cpu_entry;
|
||||
int ret;
|
||||
|
||||
newinfo = xt_alloc_table_info(repl->size);
|
||||
if (!newinfo)
|
||||
@@ -1754,7 +1751,7 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
|
||||
return ret;
|
||||
}
|
||||
|
||||
new_table = xt_register_table(net, table, &bootstrap, newinfo);
|
||||
new_table = xt_register_table(net, table, template_ops, &bootstrap, newinfo);
|
||||
if (IS_ERR(new_table)) {
|
||||
struct ip6t_entry *iter;
|
||||
|
||||
@@ -1764,48 +1761,12 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
|
||||
return PTR_ERR(new_table);
|
||||
}
|
||||
|
||||
if (!template_ops)
|
||||
return 0;
|
||||
|
||||
num_ops = hweight32(table->valid_hooks);
|
||||
if (num_ops == 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
|
||||
if (!ops) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_ops; i++)
|
||||
ops[i].priv = new_table;
|
||||
|
||||
new_table->ops = ops;
|
||||
|
||||
ret = nf_register_net_hooks(net, ops, num_ops);
|
||||
if (ret != 0)
|
||||
goto out_free;
|
||||
|
||||
return ret;
|
||||
|
||||
out_free:
|
||||
__ip6t_unregister_table(net, new_table);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ip6t_unregister_table_pre_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct xt_table *table = xt_find_table(net, NFPROTO_IPV6, name);
|
||||
|
||||
if (table)
|
||||
nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks));
|
||||
}
|
||||
|
||||
void ip6t_unregister_table_exit(struct net *net, const char *name)
|
||||
{
|
||||
struct xt_table *table = xt_find_table(net, NFPROTO_IPV6, name);
|
||||
struct xt_table *table = xt_unregister_table_exit(net, NFPROTO_IPV6, name);
|
||||
|
||||
if (table)
|
||||
__ip6t_unregister_table(net, table);
|
||||
@@ -1894,7 +1855,6 @@ static void __exit ip6_tables_fini(void)
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(ip6t_register_table);
|
||||
EXPORT_SYMBOL(ip6t_unregister_table_pre_exit);
|
||||
EXPORT_SYMBOL(ip6t_unregister_table_exit);
|
||||
EXPORT_SYMBOL(ip6t_do_table);
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
|
||||
|
||||
static void __net_exit ip6table_filter_net_pre_exit(struct net *net)
|
||||
{
|
||||
ip6t_unregister_table_pre_exit(net, "filter");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV6, "filter");
|
||||
}
|
||||
|
||||
static void __net_exit ip6table_filter_net_exit(struct net *net)
|
||||
@@ -76,32 +76,32 @@ static struct pernet_operations ip6table_filter_net_ops = {
|
||||
|
||||
static int __init ip6table_filter_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&packet_filter,
|
||||
ip6table_filter_table_init);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
filter_ops = xt_hook_ops_alloc(&packet_filter, ip6t_do_table);
|
||||
if (IS_ERR(filter_ops)) {
|
||||
xt_unregister_template(&packet_filter);
|
||||
if (IS_ERR(filter_ops))
|
||||
return PTR_ERR(filter_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&ip6table_filter_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&packet_filter, ip6table_filter_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&packet_filter);
|
||||
kfree(filter_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&ip6table_filter_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(filter_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ip6table_filter_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip6table_filter_net_ops);
|
||||
xt_unregister_template(&packet_filter);
|
||||
unregister_pernet_subsys(&ip6table_filter_net_ops);
|
||||
kfree(filter_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ static int ip6table_mangle_table_init(struct net *net)
|
||||
|
||||
static void __net_exit ip6table_mangle_net_pre_exit(struct net *net)
|
||||
{
|
||||
ip6t_unregister_table_pre_exit(net, "mangle");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV6, "mangle");
|
||||
}
|
||||
|
||||
static void __net_exit ip6table_mangle_net_exit(struct net *net)
|
||||
@@ -104,32 +104,33 @@ static struct pernet_operations ip6table_mangle_net_ops = {
|
||||
|
||||
static int __init ip6table_mangle_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&packet_mangler,
|
||||
ip6table_mangle_table_init);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
mangle_ops = xt_hook_ops_alloc(&packet_mangler, ip6table_mangle_hook);
|
||||
if (IS_ERR(mangle_ops)) {
|
||||
xt_unregister_template(&packet_mangler);
|
||||
if (IS_ERR(mangle_ops))
|
||||
return PTR_ERR(mangle_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&ip6table_mangle_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&packet_mangler,
|
||||
ip6table_mangle_table_init);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&packet_mangler);
|
||||
kfree(mangle_ops);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&ip6table_mangle_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(mangle_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ip6table_mangle_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip6table_mangle_net_ops);
|
||||
xt_unregister_template(&packet_mangler);
|
||||
unregister_pernet_subsys(&ip6table_mangle_net_ops);
|
||||
kfree(mangle_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -121,8 +121,11 @@ static int ip6table_nat_table_init(struct net *net)
|
||||
}
|
||||
|
||||
ret = ip6t_nat_register_lookups(net);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV6, "nat");
|
||||
synchronize_rcu();
|
||||
ip6t_unregister_table_exit(net, "nat");
|
||||
}
|
||||
|
||||
kfree(repl);
|
||||
return ret;
|
||||
@@ -131,6 +134,7 @@ static int ip6table_nat_table_init(struct net *net)
|
||||
static void __net_exit ip6table_nat_net_pre_exit(struct net *net)
|
||||
{
|
||||
ip6t_nat_unregister_lookups(net);
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV6, "nat");
|
||||
}
|
||||
|
||||
static void __net_exit ip6table_nat_net_exit(struct net *net)
|
||||
|
||||
@@ -52,7 +52,7 @@ static int ip6table_raw_table_init(struct net *net)
|
||||
|
||||
static void __net_exit ip6table_raw_net_pre_exit(struct net *net)
|
||||
{
|
||||
ip6t_unregister_table_pre_exit(net, "raw");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV6, "raw");
|
||||
}
|
||||
|
||||
static void __net_exit ip6table_raw_net_exit(struct net *net)
|
||||
@@ -75,31 +75,31 @@ static int __init ip6table_raw_init(void)
|
||||
pr_info("Enabling raw table before defrag\n");
|
||||
}
|
||||
|
||||
ret = xt_register_template(table, ip6table_raw_table_init);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Register hooks */
|
||||
rawtable_ops = xt_hook_ops_alloc(table, ip6t_do_table);
|
||||
if (IS_ERR(rawtable_ops)) {
|
||||
xt_unregister_template(table);
|
||||
if (IS_ERR(rawtable_ops))
|
||||
return PTR_ERR(rawtable_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&ip6table_raw_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(table, ip6table_raw_table_init);
|
||||
if (ret < 0) {
|
||||
kfree(rawtable_ops);
|
||||
xt_unregister_template(table);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&ip6table_raw_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(rawtable_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ip6table_raw_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip6table_raw_net_ops);
|
||||
xt_unregister_template(&packet_raw);
|
||||
unregister_pernet_subsys(&ip6table_raw_net_ops);
|
||||
kfree(rawtable_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ static int ip6table_security_table_init(struct net *net)
|
||||
|
||||
static void __net_exit ip6table_security_net_pre_exit(struct net *net)
|
||||
{
|
||||
ip6t_unregister_table_pre_exit(net, "security");
|
||||
xt_unregister_table_pre_exit(net, NFPROTO_IPV6, "security");
|
||||
}
|
||||
|
||||
static void __net_exit ip6table_security_net_exit(struct net *net)
|
||||
@@ -64,32 +64,33 @@ static struct pernet_operations ip6table_security_net_ops = {
|
||||
|
||||
static int __init ip6table_security_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&security_table,
|
||||
ip6table_security_table_init);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
sectbl_ops = xt_hook_ops_alloc(&security_table, ip6t_do_table);
|
||||
if (IS_ERR(sectbl_ops)) {
|
||||
xt_unregister_template(&security_table);
|
||||
if (IS_ERR(sectbl_ops))
|
||||
return PTR_ERR(sectbl_ops);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&ip6table_security_net_ops);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
|
||||
ret = xt_register_template(&security_table,
|
||||
ip6table_security_table_init);
|
||||
if (ret < 0) {
|
||||
kfree(sectbl_ops);
|
||||
xt_unregister_template(&security_table);
|
||||
return ret;
|
||||
unregister_pernet_subsys(&ip6table_security_net_ops);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_free:
|
||||
kfree(sectbl_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ip6table_security_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip6table_security_net_ops);
|
||||
xt_unregister_template(&security_table);
|
||||
unregister_pernet_subsys(&ip6table_security_net_ops);
|
||||
kfree(sectbl_ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -72,6 +72,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT;
|
||||
exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
|
||||
rcu_assign_pointer(exp->helper, helper);
|
||||
rcu_assign_pointer(exp->assign_helper, NULL);
|
||||
write_pnet(&exp->net, net);
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
exp->zone = ct->zone;
|
||||
|
||||
@@ -1811,14 +1811,17 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
|
||||
spin_lock_bh(&nf_conntrack_expect_lock);
|
||||
exp = nf_ct_find_expectation(net, zone, tuple, !tmpl || nf_ct_is_confirmed(tmpl));
|
||||
if (exp) {
|
||||
struct nf_conntrack_helper *assign_helper;
|
||||
|
||||
/* Welcome, Mr. Bond. We've been expecting you... */
|
||||
__set_bit(IPS_EXPECTED_BIT, &ct->status);
|
||||
/* exp->master safe, refcnt bumped in nf_ct_find_expectation */
|
||||
ct->master = exp->master;
|
||||
if (exp->helper) {
|
||||
assign_helper = rcu_dereference(exp->assign_helper);
|
||||
if (assign_helper) {
|
||||
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
|
||||
if (help)
|
||||
rcu_assign_pointer(help->helper, exp->helper);
|
||||
rcu_assign_pointer(help->helper, assign_helper);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_MARK
|
||||
|
||||
@@ -344,6 +344,7 @@ void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
|
||||
helper = rcu_dereference(help->helper);
|
||||
|
||||
rcu_assign_pointer(exp->helper, helper);
|
||||
rcu_assign_pointer(exp->assign_helper, NULL);
|
||||
write_pnet(&exp->net, net);
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
exp->zone = ct->zone;
|
||||
|
||||
@@ -643,7 +643,7 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
|
||||
&ct->tuplehash[!dir].tuple.src.u3,
|
||||
&ct->tuplehash[!dir].tuple.dst.u3,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
rcu_assign_pointer(exp->helper, &nf_conntrack_helper_h245);
|
||||
rcu_assign_pointer(exp->assign_helper, &nf_conntrack_helper_h245);
|
||||
|
||||
nathook = rcu_dereference(nfct_h323_nat_hook);
|
||||
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
|
||||
@@ -767,7 +767,7 @@ static int expect_callforwarding(struct sk_buff *skb,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
&ct->tuplehash[!dir].tuple.src.u3, &addr,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
rcu_assign_pointer(exp->helper, nf_conntrack_helper_q931);
|
||||
rcu_assign_pointer(exp->assign_helper, nf_conntrack_helper_q931);
|
||||
|
||||
nathook = rcu_dereference(nfct_h323_nat_hook);
|
||||
if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
|
||||
@@ -1234,7 +1234,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
|
||||
&ct->tuplehash[!dir].tuple.src.u3 : NULL,
|
||||
&ct->tuplehash[!dir].tuple.dst.u3,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
rcu_assign_pointer(exp->helper, nf_conntrack_helper_q931);
|
||||
rcu_assign_pointer(exp->assign_helper, nf_conntrack_helper_q931);
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */
|
||||
|
||||
nathook = rcu_dereference(nfct_h323_nat_hook);
|
||||
@@ -1306,7 +1306,7 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
|
||||
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
|
||||
&ct->tuplehash[!dir].tuple.src.u3, &addr,
|
||||
IPPROTO_UDP, NULL, &port);
|
||||
rcu_assign_pointer(exp->helper, nf_conntrack_helper_ras);
|
||||
rcu_assign_pointer(exp->assign_helper, nf_conntrack_helper_ras);
|
||||
|
||||
if (nf_ct_expect_related(exp, 0) == 0) {
|
||||
pr_debug("nf_ct_ras: expect RAS ");
|
||||
@@ -1523,7 +1523,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
|
||||
&ct->tuplehash[!dir].tuple.src.u3, &addr,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT;
|
||||
rcu_assign_pointer(exp->helper, nf_conntrack_helper_q931);
|
||||
rcu_assign_pointer(exp->assign_helper, nf_conntrack_helper_q931);
|
||||
|
||||
if (nf_ct_expect_related(exp, 0) == 0) {
|
||||
pr_debug("nf_ct_ras: expect Q.931 ");
|
||||
@@ -1577,7 +1577,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
|
||||
&ct->tuplehash[!dir].tuple.src.u3, &addr,
|
||||
IPPROTO_TCP, NULL, &port);
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT;
|
||||
rcu_assign_pointer(exp->helper, nf_conntrack_helper_q931);
|
||||
rcu_assign_pointer(exp->assign_helper, nf_conntrack_helper_q931);
|
||||
|
||||
if (nf_ct_expect_related(exp, 0) == 0) {
|
||||
pr_debug("nf_ct_ras: expect Q.931 ");
|
||||
|
||||
@@ -400,6 +400,11 @@ static bool expect_iter_me(struct nf_conntrack_expect *exp, void *data)
|
||||
|
||||
this = rcu_dereference_protected(exp->helper,
|
||||
lockdep_is_held(&nf_conntrack_expect_lock));
|
||||
if (this == me)
|
||||
return true;
|
||||
|
||||
this = rcu_dereference_protected(exp->assign_helper,
|
||||
lockdep_is_held(&nf_conntrack_expect_lock));
|
||||
return this == me;
|
||||
}
|
||||
|
||||
|
||||
@@ -2634,6 +2634,7 @@ static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
|
||||
|
||||
static struct nf_conntrack_expect *
|
||||
ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
|
||||
const struct nf_conntrack_helper *assign_helper,
|
||||
struct nf_conntrack_tuple *tuple,
|
||||
struct nf_conntrack_tuple *mask);
|
||||
|
||||
@@ -2860,6 +2861,7 @@ static int
|
||||
ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
|
||||
u32 portid, u32 report)
|
||||
{
|
||||
struct nf_conntrack_helper *assign_helper = NULL;
|
||||
struct nlattr *cda[CTA_EXPECT_MAX+1];
|
||||
struct nf_conntrack_tuple tuple, mask;
|
||||
struct nf_conntrack_expect *exp;
|
||||
@@ -2870,13 +2872,26 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (!cda[CTA_EXPECT_TUPLE] || !cda[CTA_EXPECT_MASK])
|
||||
return -EINVAL;
|
||||
|
||||
err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda,
|
||||
ct, &tuple, &mask);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (cda[CTA_EXPECT_HELP_NAME]) {
|
||||
const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
|
||||
|
||||
assign_helper = __nf_conntrack_helper_find(helpname,
|
||||
nf_ct_l3num(ct),
|
||||
tuple.dst.protonum);
|
||||
if (!assign_helper)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
|
||||
&tuple, &mask);
|
||||
assign_helper, &tuple, &mask);
|
||||
if (IS_ERR(exp))
|
||||
return PTR_ERR(exp);
|
||||
|
||||
@@ -3515,6 +3530,7 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
|
||||
|
||||
static struct nf_conntrack_expect *
|
||||
ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
|
||||
const struct nf_conntrack_helper *assign_helper,
|
||||
struct nf_conntrack_tuple *tuple,
|
||||
struct nf_conntrack_tuple *mask)
|
||||
{
|
||||
@@ -3568,6 +3584,7 @@ ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
|
||||
exp->zone = ct->zone;
|
||||
#endif
|
||||
rcu_assign_pointer(exp->helper, helper);
|
||||
rcu_assign_pointer(exp->assign_helper, assign_helper);
|
||||
exp->tuple = *tuple;
|
||||
exp->mask.src.u3 = mask->src.u3;
|
||||
exp->mask.src.u.all = mask->src.u.all;
|
||||
@@ -3623,7 +3640,7 @@ ctnetlink_create_expect(struct net *net,
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
|
||||
rcu_read_lock();
|
||||
exp = ctnetlink_alloc_expect(cda, ct, &tuple, &mask);
|
||||
exp = ctnetlink_alloc_expect(cda, ct, NULL, &tuple, &mask);
|
||||
if (IS_ERR(exp)) {
|
||||
err = PTR_ERR(exp);
|
||||
goto err_rcu;
|
||||
|
||||
@@ -1366,6 +1366,10 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
|
||||
goto store_cseq;
|
||||
}
|
||||
|
||||
helper = rcu_dereference(nfct_help(ct)->helper);
|
||||
if (!helper)
|
||||
return NF_DROP;
|
||||
|
||||
exp = nf_ct_expect_alloc(ct);
|
||||
if (!exp) {
|
||||
nf_ct_helper_log(skb, ct, "cannot alloc expectation");
|
||||
@@ -1376,14 +1380,10 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
|
||||
if (sip_direct_signalling)
|
||||
saddr = &ct->tuplehash[!dir].tuple.src.u3;
|
||||
|
||||
helper = rcu_dereference(nfct_help(ct)->helper);
|
||||
if (!helper)
|
||||
return NF_DROP;
|
||||
|
||||
nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct),
|
||||
saddr, &daddr, proto, NULL, &port);
|
||||
exp->timeout.expires = sip_timeout * HZ;
|
||||
rcu_assign_pointer(exp->helper, helper);
|
||||
rcu_assign_pointer(exp->assign_helper, helper);
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
|
||||
|
||||
hooks = rcu_dereference(nf_nat_sip_hooks);
|
||||
|
||||
@@ -1334,6 +1334,8 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
|
||||
|
||||
if (nf_ct_expect_related(exp, 0) != 0)
|
||||
regs->verdict.code = NF_DROP;
|
||||
|
||||
nf_ct_expect_put(exp);
|
||||
}
|
||||
|
||||
static const struct nla_policy nft_ct_expect_policy[NFTA_CT_EXPECT_MAX + 1] = {
|
||||
|
||||
@@ -55,6 +55,9 @@ static struct list_head xt_templates[NFPROTO_NUMPROTO];
|
||||
|
||||
struct xt_pernet {
|
||||
struct list_head tables[NFPROTO_NUMPROTO];
|
||||
|
||||
/* stash area used during netns exit */
|
||||
struct list_head dead_tables[NFPROTO_NUMPROTO];
|
||||
};
|
||||
|
||||
struct compat_delta {
|
||||
@@ -1472,11 +1475,9 @@ struct xt_counters *xt_counters_alloc(unsigned int counters)
|
||||
}
|
||||
EXPORT_SYMBOL(xt_counters_alloc);
|
||||
|
||||
struct xt_table_info *
|
||||
xt_replace_table(struct xt_table *table,
|
||||
unsigned int num_counters,
|
||||
struct xt_table_info *newinfo,
|
||||
int *error)
|
||||
static struct xt_table_info *
|
||||
do_replace_table(struct xt_table *table, unsigned int num_counters,
|
||||
struct xt_table_info *newinfo, int *error)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
unsigned int cpu;
|
||||
@@ -1531,30 +1532,54 @@ xt_replace_table(struct xt_table *table,
|
||||
}
|
||||
}
|
||||
|
||||
audit_log_nfcfg(table->name, table->af, private->number,
|
||||
!private->number ? AUDIT_XT_OP_REGISTER :
|
||||
AUDIT_XT_OP_REPLACE,
|
||||
GFP_KERNEL);
|
||||
return private;
|
||||
}
|
||||
|
||||
struct xt_table_info *
|
||||
xt_replace_table(struct xt_table *table, unsigned int num_counters,
|
||||
struct xt_table_info *newinfo,
|
||||
int *error)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
|
||||
private = do_replace_table(table, num_counters, newinfo, error);
|
||||
if (private)
|
||||
audit_log_nfcfg(table->name, table->af, private->number,
|
||||
AUDIT_XT_OP_REPLACE,
|
||||
GFP_KERNEL);
|
||||
|
||||
return private;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_replace_table);
|
||||
|
||||
struct xt_table *xt_register_table(struct net *net,
|
||||
const struct xt_table *input_table,
|
||||
const struct nf_hook_ops *template_ops,
|
||||
struct xt_table_info *bootstrap,
|
||||
struct xt_table_info *newinfo)
|
||||
{
|
||||
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
|
||||
struct xt_table *t, *table = NULL;
|
||||
struct nf_hook_ops *ops = NULL;
|
||||
struct xt_table_info *private;
|
||||
struct xt_table *t, *table;
|
||||
int ret;
|
||||
unsigned int num_ops;
|
||||
int ret = -EINVAL;
|
||||
|
||||
num_ops = hweight32(input_table->valid_hooks);
|
||||
if (num_ops == 0)
|
||||
goto out;
|
||||
|
||||
ret = -ENOMEM;
|
||||
if (template_ops) {
|
||||
ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
|
||||
if (!ops)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Don't add one object to multiple lists. */
|
||||
table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
|
||||
if (!table) {
|
||||
ret = -ENOMEM;
|
||||
if (!table)
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&xt[table->af].mutex);
|
||||
/* Don't autoload: we'd eat our tail... */
|
||||
@@ -1568,7 +1593,7 @@ struct xt_table *xt_register_table(struct net *net,
|
||||
/* Simplifies replace_table code. */
|
||||
table->private = bootstrap;
|
||||
|
||||
if (!xt_replace_table(table, 0, newinfo, &ret))
|
||||
if (!do_replace_table(table, 0, newinfo, &ret))
|
||||
goto unlock;
|
||||
|
||||
private = table->private;
|
||||
@@ -1577,34 +1602,122 @@ struct xt_table *xt_register_table(struct net *net,
|
||||
/* save number of initial entries */
|
||||
private->initial_entries = private->number;
|
||||
|
||||
if (ops) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_ops; i++)
|
||||
ops[i].priv = table;
|
||||
|
||||
ret = nf_register_net_hooks(net, ops, num_ops);
|
||||
if (ret != 0) {
|
||||
mutex_unlock(&xt[table->af].mutex);
|
||||
/* nf_register_net_hooks() might have published a
|
||||
* base chain before internal error unwind.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
goto out;
|
||||
}
|
||||
|
||||
table->ops = ops;
|
||||
}
|
||||
|
||||
audit_log_nfcfg(table->name, table->af, private->number,
|
||||
AUDIT_XT_OP_REGISTER, GFP_KERNEL);
|
||||
|
||||
list_add(&table->list, &xt_net->tables[table->af]);
|
||||
mutex_unlock(&xt[table->af].mutex);
|
||||
return table;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&xt[table->af].mutex);
|
||||
kfree(table);
|
||||
out:
|
||||
kfree(table);
|
||||
kfree(ops);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_register_table);
|
||||
|
||||
void *xt_unregister_table(struct xt_table *table)
|
||||
/**
|
||||
* xt_unregister_table_pre_exit - pre-shutdown unregister of a table
|
||||
* @net: network namespace
|
||||
* @af: address family (e.g., NFPROTO_IPV4, NFPROTO_IPV6)
|
||||
* @name: name of the table to unregister
|
||||
*
|
||||
* Unregisters the specified netfilter table from the given network namespace
|
||||
* and also unregisters the hooks from netfilter core: no new packets will be
|
||||
* processed.
|
||||
*
|
||||
* This must be called prior to xt_unregister_table_exit() from the pernet
|
||||
* .pre_exit callback. After this call, the table is no longer visible to
|
||||
* the get/setsockopt path. In case of rmmod, module exit path must have
|
||||
* called xt_unregister_template() prior to unregistering pernet ops to
|
||||
* prevent re-instantiation of the table.
|
||||
*
|
||||
* See also: xt_unregister_table_exit()
|
||||
*/
|
||||
void xt_unregister_table_pre_exit(struct net *net, u8 af, const char *name)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
|
||||
struct xt_table *t;
|
||||
|
||||
mutex_lock(&xt[table->af].mutex);
|
||||
private = table->private;
|
||||
list_del(&table->list);
|
||||
mutex_unlock(&xt[table->af].mutex);
|
||||
audit_log_nfcfg(table->name, table->af, private->number,
|
||||
AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
|
||||
kfree(table->ops);
|
||||
kfree(table);
|
||||
mutex_lock(&xt[af].mutex);
|
||||
list_for_each_entry(t, &xt_net->tables[af], list) {
|
||||
if (strcmp(t->name, name) == 0) {
|
||||
list_move(&t->list, &xt_net->dead_tables[af]);
|
||||
mutex_unlock(&xt[af].mutex);
|
||||
|
||||
return private;
|
||||
if (t->ops) /* nat table registers with nat core, t->ops is NULL. */
|
||||
nf_unregister_net_hooks(net, t->ops, hweight32(t->valid_hooks));
|
||||
return;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&xt[af].mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_unregister_table);
|
||||
EXPORT_SYMBOL(xt_unregister_table_pre_exit);
|
||||
|
||||
/**
|
||||
* xt_unregister_table_exit - remove a table during namespace teardown
|
||||
* @net: the network namespace from which to unregister the table
|
||||
* @af: address family (e.g., NFPROTO_IPV4, NFPROTO_IPV6)
|
||||
* @name: name of the table to unregister
|
||||
*
|
||||
* Completes the unregister process for a table. This must be called from
|
||||
* the pernet ops .exit callback. This is the second stage after
|
||||
* xt_unregister_table_pre_exit().
|
||||
*
|
||||
* pair with xt_unregister_table_pre_exit() during namespace shutdown.
|
||||
*
|
||||
* Return: the unregistered table or NULL if the table was never
|
||||
* instantiated. The caller needs to kfree() the table after it
|
||||
* has removed the family specific matches/targets.
|
||||
*/
|
||||
struct xt_table *xt_unregister_table_exit(struct net *net, u8 af, const char *name)
|
||||
{
|
||||
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
|
||||
struct xt_table *table;
|
||||
|
||||
mutex_lock(&xt[af].mutex);
|
||||
list_for_each_entry(table, &xt_net->dead_tables[af], list) {
|
||||
struct nf_hook_ops *ops = NULL;
|
||||
|
||||
if (strcmp(table->name, name) != 0)
|
||||
continue;
|
||||
|
||||
list_del(&table->list);
|
||||
|
||||
audit_log_nfcfg(table->name, table->af, table->private->number,
|
||||
AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
|
||||
swap(table->ops, ops);
|
||||
mutex_unlock(&xt[af].mutex);
|
||||
|
||||
kfree(ops);
|
||||
return table;
|
||||
}
|
||||
mutex_unlock(&xt[af].mutex);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_unregister_table_exit);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
@@ -2051,8 +2164,10 @@ static int __net_init xt_net_init(struct net *net)
|
||||
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NFPROTO_NUMPROTO; i++)
|
||||
for (i = 0; i < NFPROTO_NUMPROTO; i++) {
|
||||
INIT_LIST_HEAD(&xt_net->tables[i]);
|
||||
INIT_LIST_HEAD(&xt_net->dead_tables[i]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2061,8 +2176,10 @@ static void __net_exit xt_net_exit(struct net *net)
|
||||
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NFPROTO_NUMPROTO; i++)
|
||||
for (i = 0; i < NFPROTO_NUMPROTO; i++) {
|
||||
WARN_ON_ONCE(!list_empty(&xt_net->tables[i]));
|
||||
WARN_ON_ONCE(!list_empty(&xt_net->dead_tables[i]));
|
||||
}
|
||||
}
|
||||
|
||||
static struct pernet_operations xt_net_ops = {
|
||||
|
||||
Reference in New Issue
Block a user