net: mark racy access on sk->sk_rcvbuf
authorlinke li <lilinke99@qq.com>
Thu, 21 Mar 2024 08:44:10 +0000 (16:44 +0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 25 Mar 2024 14:46:59 +0000 (14:46 +0000)
sk->sk_rcvbuf in __sock_queue_rcv_skb() and __sk_receive_skb() can be
changed by other threads. Mark this as benign using READ_ONCE().

This patch is aimed at reducing the number of benign races reported by
KCSAN in order to focus future debugging effort on harmful races.

Signed-off-by: linke li <lilinke99@qq.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/core/sock.c

index 43bf3818c19e829b47d3989d36e2e1b3bf985438..0963689a59506ac3309ad9a86d06b729948ad357 100644 (file)
@@ -482,7 +482,7 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        unsigned long flags;
        struct sk_buff_head *list = &sk->sk_receive_queue;
 
-       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
+       if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) {
                atomic_inc(&sk->sk_drops);
                trace_sock_rcvqueue_full(sk, skb);
                return -ENOMEM;
@@ -552,7 +552,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
 
        skb->dev = NULL;
 
-       if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
+       if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) {
                atomic_inc(&sk->sk_drops);
                goto discard_and_relse;
        }