af_unix: Don't use skb_recv_datagram() in unix_stream_read_skb().

unix_stream_read_skb() calls skb_recv_datagram() with MSG_DONTWAIT,
which is mostly equivalent to sock_error(sk) + skb_dequeue().

In the following patch, we will add a new field to cache the number
of bytes in the receive queue.  Then, we want to avoid introducing
atomic ops in the fast path, so we will reuse the receive queue lock.

As a preparation for the change, let's not use skb_recv_datagram()
in unix_stream_read_skb().

Note that sock_error() is now moved out of the u->iolock mutex as
the mutex does not synchronise the peer's close() at all.

Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20250702223606.1054680-4-kuniyu@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Kuniyuki Iwashima
2025-07-02 22:35:15 +00:00
committed by Jakub Kicinski
parent 772f01049c
commit d0aac85449

View File

@@ -2786,6 +2786,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
struct sk_buff_head *queue = &sk->sk_receive_queue;
struct unix_sock *u = unix_sk(sk);
struct sk_buff *skb;
int err;
@@ -2793,30 +2794,34 @@ static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
return -ENOTCONN;
mutex_lock(&u->iolock);
skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
mutex_unlock(&u->iolock);
if (!skb)
err = sock_error(sk);
if (err)
return err;
mutex_lock(&u->iolock);
spin_lock(&queue->lock);
skb = __skb_dequeue(queue);
if (!skb) {
spin_unlock(&queue->lock);
mutex_unlock(&u->iolock);
return -EAGAIN;
}
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
if (unlikely(skb == READ_ONCE(u->oob_skb))) {
bool drop = false;
if (skb == u->oob_skb) {
WRITE_ONCE(u->oob_skb, NULL);
spin_unlock(&queue->lock);
mutex_unlock(&u->iolock);
spin_lock(&sk->sk_receive_queue.lock);
if (likely(skb == u->oob_skb)) {
WRITE_ONCE(u->oob_skb, NULL);
drop = true;
}
spin_unlock(&sk->sk_receive_queue.lock);
if (drop) {
kfree_skb_reason(skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
return -EAGAIN;
}
kfree_skb_reason(skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
return -EAGAIN;
}
#endif
spin_unlock(&queue->lock);
mutex_unlock(&u->iolock);
return recv_actor(sk, skb);
}