1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET3: Garbage Collector For AF_UNIX sockets
6 * Copyright (C) Barak A. Pearlmutter.
8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 * If it doesn't work blame me, it worked when Barak sent it.
16 * Current optimizations:
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
21 * and stack is just a path from root to the current pointer.
23 * Future optimizations:
25 * - don't just push entire root set; process in place
28 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
29 * Cope with changing max_files.
31 * Graph may have cycles. That is, we can send the descriptor
32 * of foo to bar and vice versa. Current code chokes on that.
33 * Fix: move SCM_RIGHTS ones into the separate list and then
34 * skb_free() them all instead of doing explicit fput's.
35 * Another problem: since fput() may block somebody may
36 * create a new unix_socket when we are in the middle of sweep
37 * phase. Fix: revert the logic wrt MARKED. Mark everything
38 * upon the beginning and unmark non-junk ones.
40 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
41 * sent to connect()'ed but still not accept()'ed sockets.
42 * Fixed. Old code had slightly different problem here:
43 * extra fput() in situation when we passed the descriptor via
44 * such socket and closed it (descriptor). That would happen on
45 * each unix_gc() until the accept(). Since the struct file in
46 * question would go to the free list and might be reused...
47 * That might be the reason of random oopses on filp_close()
48 * in unrelated processes.
51 * Kill the explicit allocation of stack. Now we keep the tree
52 * with root in dummy + pointer (gc_current) to one of the nodes.
53 * Stack is represented as path from gc_current to dummy. Unmark
54 * now means "add to tree". Push == "make it a son of gc_current".
55 * Pop == "move gc_current to parent". We keep only pointers to
56 * parents (->gc_tree).
58 * Damn. Added missing check for ->dead in listen queues scanning.
60 * Miklos Szeredi 25 Jun 2007
61 * Reimplement with a cycle collecting algorithm. This should
62 * solve several problems with the previous code, like being racy
63 * wrt receive and holding up unrelated socket operations.
66 #include <linux/kernel.h>
67 #include <linux/string.h>
68 #include <linux/socket.h>
70 #include <linux/net.h>
72 #include <linux/skbuff.h>
73 #include <linux/netdevice.h>
74 #include <linux/file.h>
75 #include <linux/proc_fs.h>
76 #include <linux/mutex.h>
77 #include <linux/wait.h>
80 #include <net/af_unix.h>
82 #include <net/tcp_states.h>
84 struct unix_sock *unix_get_socket(struct file *filp)
86 struct inode *inode = file_inode(filp);
89 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
90 struct socket *sock = SOCKET_I(inode);
91 const struct proto_ops *ops;
92 struct sock *sk = sock->sk;
94 ops = READ_ONCE(sock->ops);
97 if (sk && ops && ops->family == PF_UNIX)
104 DEFINE_SPINLOCK(unix_gc_lock);
105 unsigned int unix_tot_inflight;
106 static LIST_HEAD(gc_candidates);
107 static LIST_HEAD(gc_inflight_list);
109 /* Keep the number of times in flight count for the file
110 * descriptor if it is for an AF_UNIX socket.
112 void unix_inflight(struct user_struct *user, struct file *filp)
114 struct unix_sock *u = unix_get_socket(filp);
116 spin_lock(&unix_gc_lock);
120 WARN_ON_ONCE(!list_empty(&u->link));
121 list_add_tail(&u->link, &gc_inflight_list);
123 WARN_ON_ONCE(list_empty(&u->link));
127 /* Paired with READ_ONCE() in wait_for_unix_gc() */
128 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
131 WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
133 spin_unlock(&unix_gc_lock);
136 void unix_notinflight(struct user_struct *user, struct file *filp)
138 struct unix_sock *u = unix_get_socket(filp);
140 spin_lock(&unix_gc_lock);
143 WARN_ON_ONCE(!u->inflight);
144 WARN_ON_ONCE(list_empty(&u->link));
148 list_del_init(&u->link);
150 /* Paired with READ_ONCE() in wait_for_unix_gc() */
151 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
154 WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
156 spin_unlock(&unix_gc_lock);
159 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
160 struct sk_buff_head *hitlist)
163 struct sk_buff *next;
165 spin_lock(&x->sk_receive_queue.lock);
166 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
167 /* Do we have file descriptors ? */
168 if (UNIXCB(skb).fp) {
170 /* Process the descriptors of this socket */
171 int nfd = UNIXCB(skb).fp->count;
172 struct file **fp = UNIXCB(skb).fp->fp;
175 /* Get the socket the fd matches if it indeed does so */
176 struct unix_sock *u = unix_get_socket(*fp++);
178 /* Ignore non-candidates, they could have been added
179 * to the queues after starting the garbage collection
181 if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
187 if (hit && hitlist != NULL) {
188 __skb_unlink(skb, &x->sk_receive_queue);
189 __skb_queue_tail(hitlist, skb);
193 spin_unlock(&x->sk_receive_queue.lock);
196 static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
197 struct sk_buff_head *hitlist)
199 if (x->sk_state != TCP_LISTEN) {
200 scan_inflight(x, func, hitlist);
203 struct sk_buff *next;
207 /* For a listening socket collect the queued embryos
208 * and perform a scan on them as well.
210 spin_lock(&x->sk_receive_queue.lock);
211 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
212 u = unix_sk(skb->sk);
214 /* An embryo cannot be in-flight, so it's safe
215 * to use the list link.
217 WARN_ON_ONCE(!list_empty(&u->link));
218 list_add_tail(&u->link, &embryos);
220 spin_unlock(&x->sk_receive_queue.lock);
222 while (!list_empty(&embryos)) {
223 u = list_entry(embryos.next, struct unix_sock, link);
224 scan_inflight(&u->sk, func, hitlist);
225 list_del_init(&u->link);
230 static void dec_inflight(struct unix_sock *usk)
235 static void inc_inflight(struct unix_sock *usk)
240 static void inc_inflight_move_tail(struct unix_sock *u)
244 /* If this still might be part of a cycle, move it to the end
245 * of the list, so that it's checked even if it was already
248 if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
249 list_move_tail(&u->link, &gc_candidates);
252 static bool gc_in_progress;
254 static void __unix_gc(struct work_struct *work)
256 struct sk_buff_head hitlist;
257 struct unix_sock *u, *next;
258 LIST_HEAD(not_cycle_list);
259 struct list_head cursor;
261 spin_lock(&unix_gc_lock);
263 /* First, select candidates for garbage collection. Only
264 * in-flight sockets are considered, and from those only ones
265 * which don't have any external reference.
267 * Holding unix_gc_lock will protect these candidates from
268 * being detached, and hence from gaining an external
269 * reference. Since there are no possible receivers, all
270 * buffers currently on the candidates' queues stay there
271 * during the garbage collection.
273 * We also know that no new candidate can be added onto the
274 * receive queues. Other, non candidate sockets _can_ be
275 * added to queue, so we must make sure only to touch
278 list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
281 total_refs = file_count(u->sk.sk_socket->file);
283 WARN_ON_ONCE(!u->inflight);
284 WARN_ON_ONCE(total_refs < u->inflight);
285 if (total_refs == u->inflight) {
286 list_move_tail(&u->link, &gc_candidates);
287 __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
288 __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
292 /* Now remove all internal in-flight reference to children of
295 list_for_each_entry(u, &gc_candidates, link)
296 scan_children(&u->sk, dec_inflight, NULL);
298 /* Restore the references for children of all candidates,
299 * which have remaining references. Do this recursively, so
300 * only those remain, which form cyclic references.
302 * Use a "cursor" link, to make the list traversal safe, even
303 * though elements might be moved about.
305 list_add(&cursor, &gc_candidates);
306 while (cursor.next != &gc_candidates) {
307 u = list_entry(cursor.next, struct unix_sock, link);
309 /* Move cursor to after the current position. */
310 list_move(&cursor, &u->link);
313 list_move_tail(&u->link, ¬_cycle_list);
314 __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
315 scan_children(&u->sk, inc_inflight_move_tail, NULL);
320 /* Now gc_candidates contains only garbage. Restore original
321 * inflight counters for these as well, and remove the skbuffs
322 * which are creating the cycle(s).
324 skb_queue_head_init(&hitlist);
325 list_for_each_entry(u, &gc_candidates, link) {
326 scan_children(&u->sk, inc_inflight, &hitlist);
328 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
330 kfree_skb(u->oob_skb);
336 /* not_cycle_list contains those sockets which do not make up a
337 * cycle. Restore these to the inflight list.
339 while (!list_empty(¬_cycle_list)) {
340 u = list_entry(not_cycle_list.next, struct unix_sock, link);
341 __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
342 list_move_tail(&u->link, &gc_inflight_list);
345 spin_unlock(&unix_gc_lock);
347 /* Here we are. Hitlist is filled. Die. */
348 __skb_queue_purge(&hitlist);
350 spin_lock(&unix_gc_lock);
352 /* All candidates should have been detached by now. */
353 WARN_ON_ONCE(!list_empty(&gc_candidates));
355 /* Paired with READ_ONCE() in wait_for_unix_gc(). */
356 WRITE_ONCE(gc_in_progress, false);
358 spin_unlock(&unix_gc_lock);
361 static DECLARE_WORK(unix_gc_work, __unix_gc);
365 WRITE_ONCE(gc_in_progress, true);
366 queue_work(system_unbound_wq, &unix_gc_work);
369 #define UNIX_INFLIGHT_TRIGGER_GC 16000
370 #define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
372 void wait_for_unix_gc(struct scm_fp_list *fpl)
374 /* If number of inflight sockets is insane,
375 * force a garbage collect right now.
377 * Paired with the WRITE_ONCE() in unix_inflight(),
378 * unix_notinflight(), and __unix_gc().
380 if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
381 !READ_ONCE(gc_in_progress))
384 /* Penalise users who want to send AF_UNIX sockets
385 * but whose sockets have not been received yet.
387 if (!fpl || !fpl->count_unix ||
388 READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
391 if (READ_ONCE(gc_in_progress))
392 flush_work(&unix_gc_work);