mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-19 07:49:02 -05:00
netfilter: nfnetlink_queue: nfqnl_instance GFP_ATOMIC -> GFP_KERNEL_ACCOUNT allocation
Currently, instance_create() uses GFP_ATOMIC because it's called while holding instances_lock spinlock. This makes allocation more likely to fail under memory pressure. Refactor nfqnl_recv_config() to drop RCU lock after instance_lookup() and peer_portid verification. A socket cannot simultaneously send a message and close, so the queue owned by the sending socket cannot be destroyed while processing its CONFIG message. This allows instance_create() to allocate with GFP_KERNEL_ACCOUNT before taking the spinlock. Suggested-by: Florian Westphal <fw@strlen.de> Signed-off-by: Scott Mitchell <scott.k.mitch1@gmail.com> Signed-off-by: Florian Westphal <fw@strlen.de>
This commit is contained in:
committed by
Florian Westphal
parent
d00453b6e3
commit
a4400a5b34
@@ -121,17 +121,9 @@ instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
|
||||
unsigned int h;
|
||||
int err;
|
||||
|
||||
spin_lock(&q->instances_lock);
|
||||
if (instance_lookup(q, queue_num)) {
|
||||
err = -EEXIST;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
|
||||
if (!inst) {
|
||||
err = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
inst = kzalloc(sizeof(*inst), GFP_KERNEL_ACCOUNT);
|
||||
if (!inst)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
inst->queue_num = queue_num;
|
||||
inst->peer_portid = portid;
|
||||
@@ -141,9 +133,15 @@ instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
|
||||
spin_lock_init(&inst->lock);
|
||||
INIT_LIST_HEAD(&inst->queue_list);
|
||||
|
||||
spin_lock(&q->instances_lock);
|
||||
if (instance_lookup(q, queue_num)) {
|
||||
err = -EEXIST;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!try_module_get(THIS_MODULE)) {
|
||||
err = -EAGAIN;
|
||||
goto out_free;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
h = instance_hashfn(queue_num);
|
||||
@@ -153,10 +151,9 @@ instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
|
||||
|
||||
return inst;
|
||||
|
||||
out_free:
|
||||
kfree(inst);
|
||||
out_unlock:
|
||||
spin_unlock(&q->instances_lock);
|
||||
kfree(inst);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
@@ -1498,7 +1495,8 @@ static int nfqnl_recv_config(struct sk_buff *skb, const struct nfnl_info *info,
|
||||
struct nfqnl_msg_config_cmd *cmd = NULL;
|
||||
struct nfqnl_instance *queue;
|
||||
__u32 flags = 0, mask = 0;
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON_ONCE(!lockdep_nfnl_is_held(NFNL_SUBSYS_QUEUE));
|
||||
|
||||
if (nfqa[NFQA_CFG_CMD]) {
|
||||
cmd = nla_data(nfqa[NFQA_CFG_CMD]);
|
||||
@@ -1544,47 +1542,44 @@ static int nfqnl_recv_config(struct sk_buff *skb, const struct nfnl_info *info,
|
||||
}
|
||||
}
|
||||
|
||||
/* Lookup queue under RCU. After peer_portid check (or for new queue
|
||||
* in BIND case), the queue is owned by the socket sending this message.
|
||||
* A socket cannot simultaneously send a message and close, so while
|
||||
* processing this CONFIG message, nfqnl_rcv_nl_event() (triggered by
|
||||
* socket close) cannot destroy this queue. Safe to use without RCU.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
queue = instance_lookup(q, queue_num);
|
||||
if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
|
||||
ret = -EPERM;
|
||||
goto err_out_unlock;
|
||||
rcu_read_unlock();
|
||||
return -EPERM;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (cmd != NULL) {
|
||||
switch (cmd->command) {
|
||||
case NFQNL_CFG_CMD_BIND:
|
||||
if (queue) {
|
||||
ret = -EBUSY;
|
||||
goto err_out_unlock;
|
||||
}
|
||||
queue = instance_create(q, queue_num,
|
||||
NETLINK_CB(skb).portid);
|
||||
if (IS_ERR(queue)) {
|
||||
ret = PTR_ERR(queue);
|
||||
goto err_out_unlock;
|
||||
}
|
||||
if (queue)
|
||||
return -EBUSY;
|
||||
queue = instance_create(q, queue_num, NETLINK_CB(skb).portid);
|
||||
if (IS_ERR(queue))
|
||||
return PTR_ERR(queue);
|
||||
break;
|
||||
case NFQNL_CFG_CMD_UNBIND:
|
||||
if (!queue) {
|
||||
ret = -ENODEV;
|
||||
goto err_out_unlock;
|
||||
}
|
||||
if (!queue)
|
||||
return -ENODEV;
|
||||
instance_destroy(q, queue);
|
||||
goto err_out_unlock;
|
||||
return 0;
|
||||
case NFQNL_CFG_CMD_PF_BIND:
|
||||
case NFQNL_CFG_CMD_PF_UNBIND:
|
||||
break;
|
||||
default:
|
||||
ret = -ENOTSUPP;
|
||||
goto err_out_unlock;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
if (!queue) {
|
||||
ret = -ENODEV;
|
||||
goto err_out_unlock;
|
||||
}
|
||||
if (!queue)
|
||||
return -ENODEV;
|
||||
|
||||
if (nfqa[NFQA_CFG_PARAMS]) {
|
||||
struct nfqnl_msg_config_params *params =
|
||||
@@ -1609,9 +1604,7 @@ static int nfqnl_recv_config(struct sk_buff *skb, const struct nfnl_info *info,
|
||||
spin_unlock_bh(&queue->lock);
|
||||
}
|
||||
|
||||
err_out_unlock:
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
|
||||
|
||||
Reference in New Issue
Block a user