1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * NET Generic infrastructure for Network protocols.
5 * Definitions for request_sock
7 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * From code originally in include/net/tcp.h
11 #ifndef _REQUEST_SOCK_H
12 #define _REQUEST_SOCK_H
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/types.h>
17 #include <linux/bug.h>
18 #include <linux/refcount.h>
27 struct request_sock_ops {
29 unsigned int obj_size;
30 struct kmem_cache *slab;
32 int (*rtx_syn_ack)(const struct sock *sk,
33 struct request_sock *req);
34 void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
35 struct request_sock *req);
36 void (*send_reset)(const struct sock *sk,
38 void (*destructor)(struct request_sock *req);
39 void (*syn_ack_timeout)(const struct request_sock *req);
42 int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
51 /* struct request_sock - mini sock to represent a connection request
54 struct sock_common __req_common;
55 #define rsk_refcnt __req_common.skc_refcnt
56 #define rsk_hash __req_common.skc_hash
57 #define rsk_listener __req_common.skc_listener
58 #define rsk_window_clamp __req_common.skc_window_clamp
59 #define rsk_rcv_wnd __req_common.skc_rcv_wnd
61 struct request_sock *dl_next;
63 u8 num_retrans; /* number of retransmits */
64 u8 syncookie:1; /* syncookie: encode tcpopts in timestamp */
65 u8 num_timeout:7; /* number of timeouts */
67 struct timer_list rsk_timer;
68 const struct request_sock_ops *rsk_ops;
70 struct saved_syn *saved_syn;
76 static inline struct request_sock *inet_reqsk(const struct sock *sk)
78 return (struct request_sock *)sk;
81 static inline struct sock *req_to_sk(struct request_sock *req)
83 return (struct sock *)req;
87 * skb_steal_sock - steal a socket from an sk_buff
88 * @skb: sk_buff to steal the socket from
89 * @refcounted: is set to true if the socket is reference-counted
90 * @prefetched: is set to true if the socket was assigned from bpf
92 static inline struct sock *skb_steal_sock(struct sk_buff *skb,
93 bool *refcounted, bool *prefetched)
95 struct sock *sk = skb->sk;
103 *prefetched = skb_sk_is_prefetched(skb);
105 #if IS_ENABLED(CONFIG_SYN_COOKIES)
106 if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) {
107 struct request_sock *req = inet_reqsk(sk);
110 sk = req->rsk_listener;
111 req->rsk_listener = NULL;
115 *refcounted = sk_is_refcounted(sk);
120 skb->destructor = NULL;
125 static inline struct request_sock *
126 reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
127 bool attach_listener)
129 struct request_sock *req;
131 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
134 req->rsk_listener = NULL;
135 if (attach_listener) {
136 if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
137 kmem_cache_free(ops->slab, req);
140 req->rsk_listener = sk_listener;
143 req_to_sk(req)->sk_prot = sk_listener->sk_prot;
144 sk_node_init(&req_to_sk(req)->sk_node);
145 sk_tx_queue_clear(req_to_sk(req));
146 req->saved_syn = NULL;
148 req->num_timeout = 0;
149 req->num_retrans = 0;
151 refcount_set(&req->rsk_refcnt, 0);
156 static inline void __reqsk_free(struct request_sock *req)
158 req->rsk_ops->destructor(req);
159 if (req->rsk_listener)
160 sock_put(req->rsk_listener);
161 kfree(req->saved_syn);
162 kmem_cache_free(req->rsk_ops->slab, req);
165 static inline void reqsk_free(struct request_sock *req)
167 WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
171 static inline void reqsk_put(struct request_sock *req)
173 if (refcount_dec_and_test(&req->rsk_refcnt))
178 * For a TCP Fast Open listener -
179 * lock - protects the access to all the reqsk, which is co-owned by
180 * the listener and the child socket.
181 * qlen - pending TFO requests (still in TCP_SYN_RECV).
182 * max_qlen - max TFO reqs allowed before TFO is disabled.
184 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
185 * structure above. But there is some implementation difficulty due to
186 * listen_sock being part of request_sock_queue hence will be freed when
187 * a listener is stopped. But TFO related fields may continue to be
188 * accessed even after a listener is closed, until its sk_refcnt drops
189 * to 0 implying no more outstanding TFO reqs. One solution is to keep
190 * listen_opt around until sk_refcnt drops to 0. But there is some other
191 * complexity that needs to be resolved. E.g., a listener can be disabled
192 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
194 struct fastopen_queue {
195 struct request_sock *rskq_rst_head; /* Keep track of past TFO */
196 struct request_sock *rskq_rst_tail; /* requests that caused RST.
197 * This is part of the defense
198 * against spoofing attack.
201 int qlen; /* # of pending (TCP_SYN_RECV) reqs */
202 int max_qlen; /* != 0 iff TFO is currently enabled */
204 struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
207 /** struct request_sock_queue - queue of request_socks
209 * @rskq_accept_head - FIFO head of established children
210 * @rskq_accept_tail - FIFO tail of established children
211 * @rskq_defer_accept - User waits for some data after accept()
214 struct request_sock_queue {
215 spinlock_t rskq_lock;
216 u8 rskq_defer_accept;
222 struct request_sock *rskq_accept_head;
223 struct request_sock *rskq_accept_tail;
224 struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
229 void reqsk_queue_alloc(struct request_sock_queue *queue);
231 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
234 static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
236 return READ_ONCE(queue->rskq_accept_head) == NULL;
239 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
242 struct request_sock *req;
244 spin_lock_bh(&queue->rskq_lock);
245 req = queue->rskq_accept_head;
247 sk_acceptq_removed(parent);
248 WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
249 if (queue->rskq_accept_head == NULL)
250 queue->rskq_accept_tail = NULL;
252 spin_unlock_bh(&queue->rskq_lock);
256 static inline void reqsk_queue_removed(struct request_sock_queue *queue,
257 const struct request_sock *req)
259 if (req->num_timeout == 0)
260 atomic_dec(&queue->young);
261 atomic_dec(&queue->qlen);
264 static inline void reqsk_queue_added(struct request_sock_queue *queue)
266 atomic_inc(&queue->young);
267 atomic_inc(&queue->qlen);
270 static inline int reqsk_queue_len(const struct request_sock_queue *queue)
272 return atomic_read(&queue->qlen);
275 static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
277 return atomic_read(&queue->young);
280 #endif /* _REQUEST_SOCK_H */