Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * NET3: Garbage Collector For AF_UNIX sockets |
| 3 | * |
| 4 | * Garbage Collector: |
| 5 | * Copyright (C) Barak A. Pearlmutter. |
| 6 | * Released under the GPL version 2 or later. |
| 7 | * |
| 8 | * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. |
| 9 | * If it doesn't work blame me, it worked when Barak sent it. |
| 10 | * |
| 11 | * Assumptions: |
| 12 | * |
| 13 | * - object w/ a bit |
| 14 | * - free list |
| 15 | * |
| 16 | * Current optimizations: |
| 17 | * |
| 18 | * - explicit stack instead of recursion |
| 19 | * - tail recurse on first born instead of immediate push/pop |
| 20 | * - we gather the stuff that should not be killed into tree |
| 21 | * and stack is just a path from root to the current pointer. |
| 22 | * |
| 23 | * Future optimizations: |
| 24 | * |
| 25 | * - don't just push entire root set; process in place |
| 26 | * |
| 27 | * This program is free software; you can redistribute it and/or |
| 28 | * modify it under the terms of the GNU General Public License |
| 29 | * as published by the Free Software Foundation; either version |
| 30 | * 2 of the License, or (at your option) any later version. |
| 31 | * |
| 32 | * Fixes: |
| 33 | * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. |
| 34 | * Cope with changing max_files. |
| 35 | * Al Viro 11 Oct 1998 |
| 36 | * Graph may have cycles. That is, we can send the descriptor |
| 37 | * of foo to bar and vice versa. Current code chokes on that. |
| 38 | * Fix: move SCM_RIGHTS ones into the separate list and then |
| 39 | * skb_free() them all instead of doing explicit fput's. |
| 40 | * Another problem: since fput() may block somebody may |
| 41 | * create a new unix_socket when we are in the middle of sweep |
| 42 | * phase. Fix: revert the logic wrt MARKED. Mark everything |
| 43 | * upon the beginning and unmark non-junk ones. |
| 44 | * |
| 45 | * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS |
| 46 | * sent to connect()'ed but still not accept()'ed sockets. |
| 47 | * Fixed. Old code had slightly different problem here: |
| 48 | * extra fput() in situation when we passed the descriptor via |
| 49 | * such socket and closed it (descriptor). That would happen on |
| 50 | * each unix_gc() until the accept(). Since the struct file in |
| 51 | * question would go to the free list and might be reused... |
| 52 | * That might be the reason of random oopses on filp_close() |
| 53 | * in unrelated processes. |
| 54 | * |
| 55 | * AV 28 Feb 1999 |
| 56 | * Kill the explicit allocation of stack. Now we keep the tree |
| 57 | * with root in dummy + pointer (gc_current) to one of the nodes. |
| 58 | * Stack is represented as path from gc_current to dummy. Unmark |
| 59 | * now means "add to tree". Push == "make it a son of gc_current". |
| 60 | * Pop == "move gc_current to parent". We keep only pointers to |
| 61 | * parents (->gc_tree). |
| 62 | * AV 1 Mar 1999 |
| 63 | * Damn. Added missing check for ->dead in listen queues scanning. |
| 64 | * |
| 65 | * Miklos Szeredi 25 Jun 2007 |
| 66 | * Reimplement with a cycle collecting algorithm. This should |
| 67 | * solve several problems with the previous code, like being racy |
| 68 | * wrt receive and holding up unrelated socket operations. |
| 69 | */ |
| 70 | |
| 71 | #include <linux/kernel.h> |
| 72 | #include <linux/string.h> |
| 73 | #include <linux/socket.h> |
| 74 | #include <linux/un.h> |
| 75 | #include <linux/net.h> |
| 76 | #include <linux/fs.h> |
| 77 | #include <linux/skbuff.h> |
| 78 | #include <linux/netdevice.h> |
| 79 | #include <linux/file.h> |
| 80 | #include <linux/proc_fs.h> |
| 81 | #include <linux/mutex.h> |
| 82 | #include <linux/wait.h> |
| 83 | |
| 84 | #include <net/sock.h> |
| 85 | #include <net/af_unix.h> |
| 86 | #include <net/scm.h> |
| 87 | #include <net/tcp_states.h> |
| 88 | |
| 89 | /* Internal data structures and random procedures: */ |
| 90 | |
| 91 | static LIST_HEAD(gc_inflight_list); |
| 92 | static LIST_HEAD(gc_candidates); |
| 93 | static DEFINE_SPINLOCK(unix_gc_lock); |
| 94 | static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); |
| 95 | |
| 96 | unsigned int unix_tot_inflight; |
| 97 | |
| 98 | struct sock *unix_get_socket(struct file *filp) |
| 99 | { |
| 100 | struct sock *u_sock = NULL; |
| 101 | struct inode *inode = file_inode(filp); |
| 102 | |
| 103 | /* Socket ? */ |
| 104 | if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { |
| 105 | struct socket *sock = SOCKET_I(inode); |
| 106 | struct sock *s = sock->sk; |
| 107 | |
| 108 | /* PF_UNIX ? */ |
| 109 | if (s && sock->ops && sock->ops->family == PF_UNIX) |
| 110 | u_sock = s; |
| 111 | } |
| 112 | return u_sock; |
| 113 | } |
| 114 | |
| 115 | /* Keep the number of times in flight count for the file |
| 116 | * descriptor if it is for an AF_UNIX socket. |
| 117 | */ |
| 118 | |
| 119 | void unix_inflight(struct user_struct *user, struct file *fp) |
| 120 | { |
| 121 | struct sock *s = unix_get_socket(fp); |
| 122 | |
| 123 | spin_lock(&unix_gc_lock); |
| 124 | |
| 125 | if (s) { |
| 126 | struct unix_sock *u = unix_sk(s); |
| 127 | |
| 128 | if (atomic_long_inc_return(&u->inflight) == 1) { |
| 129 | BUG_ON(!list_empty(&u->link)); |
| 130 | list_add_tail(&u->link, &gc_inflight_list); |
| 131 | } else { |
| 132 | BUG_ON(list_empty(&u->link)); |
| 133 | } |
| 134 | unix_tot_inflight++; |
| 135 | } |
| 136 | user->unix_inflight++; |
| 137 | spin_unlock(&unix_gc_lock); |
| 138 | } |
| 139 | |
| 140 | void unix_notinflight(struct user_struct *user, struct file *fp) |
| 141 | { |
| 142 | struct sock *s = unix_get_socket(fp); |
| 143 | |
| 144 | spin_lock(&unix_gc_lock); |
| 145 | |
| 146 | if (s) { |
| 147 | struct unix_sock *u = unix_sk(s); |
| 148 | |
| 149 | BUG_ON(!atomic_long_read(&u->inflight)); |
| 150 | BUG_ON(list_empty(&u->link)); |
| 151 | |
| 152 | if (atomic_long_dec_and_test(&u->inflight)) |
| 153 | list_del_init(&u->link); |
| 154 | unix_tot_inflight--; |
| 155 | } |
| 156 | user->unix_inflight--; |
| 157 | spin_unlock(&unix_gc_lock); |
| 158 | } |
| 159 | |
| 160 | static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), |
| 161 | struct sk_buff_head *hitlist) |
| 162 | { |
| 163 | struct sk_buff *skb; |
| 164 | struct sk_buff *next; |
| 165 | |
| 166 | spin_lock(&x->sk_receive_queue.lock); |
| 167 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
| 168 | /* Do we have file descriptors ? */ |
| 169 | if (UNIXCB(skb).fp) { |
| 170 | bool hit = false; |
| 171 | /* Process the descriptors of this socket */ |
| 172 | int nfd = UNIXCB(skb).fp->count; |
| 173 | struct file **fp = UNIXCB(skb).fp->fp; |
| 174 | |
| 175 | while (nfd--) { |
| 176 | /* Get the socket the fd matches if it indeed does so */ |
| 177 | struct sock *sk = unix_get_socket(*fp++); |
| 178 | |
| 179 | if (sk) { |
| 180 | struct unix_sock *u = unix_sk(sk); |
| 181 | |
| 182 | /* Ignore non-candidates, they could |
| 183 | * have been added to the queues after |
| 184 | * starting the garbage collection |
| 185 | */ |
| 186 | if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { |
| 187 | hit = true; |
| 188 | |
| 189 | func(u); |
| 190 | } |
| 191 | } |
| 192 | } |
| 193 | if (hit && hitlist != NULL) { |
| 194 | __skb_unlink(skb, &x->sk_receive_queue); |
| 195 | __skb_queue_tail(hitlist, skb); |
| 196 | } |
| 197 | } |
| 198 | } |
| 199 | spin_unlock(&x->sk_receive_queue.lock); |
| 200 | } |
| 201 | |
| 202 | static void scan_children(struct sock *x, void (*func)(struct unix_sock *), |
| 203 | struct sk_buff_head *hitlist) |
| 204 | { |
| 205 | if (x->sk_state != TCP_LISTEN) { |
| 206 | scan_inflight(x, func, hitlist); |
| 207 | } else { |
| 208 | struct sk_buff *skb; |
| 209 | struct sk_buff *next; |
| 210 | struct unix_sock *u; |
| 211 | LIST_HEAD(embryos); |
| 212 | |
| 213 | /* For a listening socket collect the queued embryos |
| 214 | * and perform a scan on them as well. |
| 215 | */ |
| 216 | spin_lock(&x->sk_receive_queue.lock); |
| 217 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
| 218 | u = unix_sk(skb->sk); |
| 219 | |
| 220 | /* An embryo cannot be in-flight, so it's safe |
| 221 | * to use the list link. |
| 222 | */ |
| 223 | BUG_ON(!list_empty(&u->link)); |
| 224 | list_add_tail(&u->link, &embryos); |
| 225 | } |
| 226 | spin_unlock(&x->sk_receive_queue.lock); |
| 227 | |
| 228 | while (!list_empty(&embryos)) { |
| 229 | u = list_entry(embryos.next, struct unix_sock, link); |
| 230 | scan_inflight(&u->sk, func, hitlist); |
| 231 | list_del_init(&u->link); |
| 232 | } |
| 233 | } |
| 234 | } |
| 235 | |
| 236 | static void dec_inflight(struct unix_sock *usk) |
| 237 | { |
| 238 | atomic_long_dec(&usk->inflight); |
| 239 | } |
| 240 | |
| 241 | static void inc_inflight(struct unix_sock *usk) |
| 242 | { |
| 243 | atomic_long_inc(&usk->inflight); |
| 244 | } |
| 245 | |
| 246 | static void inc_inflight_move_tail(struct unix_sock *u) |
| 247 | { |
| 248 | atomic_long_inc(&u->inflight); |
| 249 | /* If this still might be part of a cycle, move it to the end |
| 250 | * of the list, so that it's checked even if it was already |
| 251 | * passed over |
| 252 | */ |
| 253 | if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) |
| 254 | list_move_tail(&u->link, &gc_candidates); |
| 255 | } |
| 256 | |
| 257 | static bool gc_in_progress; |
| 258 | #define UNIX_INFLIGHT_TRIGGER_GC 16000 |
| 259 | |
| 260 | void wait_for_unix_gc(void) |
| 261 | { |
| 262 | /* If number of inflight sockets is insane, |
| 263 | * force a garbage collect right now. |
| 264 | */ |
| 265 | if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) |
| 266 | unix_gc(); |
| 267 | wait_event(unix_gc_wait, gc_in_progress == false); |
| 268 | } |
| 269 | |
| 270 | /* The external entry point: unix_gc() */ |
| 271 | void unix_gc(void) |
| 272 | { |
| 273 | struct unix_sock *u; |
| 274 | struct unix_sock *next; |
| 275 | struct sk_buff_head hitlist; |
| 276 | struct list_head cursor; |
| 277 | LIST_HEAD(not_cycle_list); |
| 278 | |
| 279 | spin_lock(&unix_gc_lock); |
| 280 | |
| 281 | /* Avoid a recursive GC. */ |
| 282 | if (gc_in_progress) |
| 283 | goto out; |
| 284 | |
| 285 | gc_in_progress = true; |
| 286 | /* First, select candidates for garbage collection. Only |
| 287 | * in-flight sockets are considered, and from those only ones |
| 288 | * which don't have any external reference. |
| 289 | * |
| 290 | * Holding unix_gc_lock will protect these candidates from |
| 291 | * being detached, and hence from gaining an external |
| 292 | * reference. Since there are no possible receivers, all |
| 293 | * buffers currently on the candidates' queues stay there |
| 294 | * during the garbage collection. |
| 295 | * |
| 296 | * We also know that no new candidate can be added onto the |
| 297 | * receive queues. Other, non candidate sockets _can_ be |
| 298 | * added to queue, so we must make sure only to touch |
| 299 | * candidates. |
| 300 | */ |
| 301 | list_for_each_entry_safe(u, next, &gc_inflight_list, link) { |
| 302 | long total_refs; |
| 303 | long inflight_refs; |
| 304 | |
| 305 | total_refs = file_count(u->sk.sk_socket->file); |
| 306 | inflight_refs = atomic_long_read(&u->inflight); |
| 307 | |
| 308 | BUG_ON(inflight_refs < 1); |
| 309 | BUG_ON(total_refs < inflight_refs); |
| 310 | if (total_refs == inflight_refs) { |
| 311 | list_move_tail(&u->link, &gc_candidates); |
| 312 | __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); |
| 313 | __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); |
| 314 | } |
| 315 | } |
| 316 | |
| 317 | /* Now remove all internal in-flight reference to children of |
| 318 | * the candidates. |
| 319 | */ |
| 320 | list_for_each_entry(u, &gc_candidates, link) |
| 321 | scan_children(&u->sk, dec_inflight, NULL); |
| 322 | |
| 323 | /* Restore the references for children of all candidates, |
| 324 | * which have remaining references. Do this recursively, so |
| 325 | * only those remain, which form cyclic references. |
| 326 | * |
| 327 | * Use a "cursor" link, to make the list traversal safe, even |
| 328 | * though elements might be moved about. |
| 329 | */ |
| 330 | list_add(&cursor, &gc_candidates); |
| 331 | while (cursor.next != &gc_candidates) { |
| 332 | u = list_entry(cursor.next, struct unix_sock, link); |
| 333 | |
| 334 | /* Move cursor to after the current position. */ |
| 335 | list_move(&cursor, &u->link); |
| 336 | |
| 337 | if (atomic_long_read(&u->inflight) > 0) { |
| 338 | list_move_tail(&u->link, ¬_cycle_list); |
| 339 | __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); |
| 340 | scan_children(&u->sk, inc_inflight_move_tail, NULL); |
| 341 | } |
| 342 | } |
| 343 | list_del(&cursor); |
| 344 | |
| 345 | /* Now gc_candidates contains only garbage. Restore original |
| 346 | * inflight counters for these as well, and remove the skbuffs |
| 347 | * which are creating the cycle(s). |
| 348 | */ |
| 349 | skb_queue_head_init(&hitlist); |
| 350 | list_for_each_entry(u, &gc_candidates, link) |
| 351 | scan_children(&u->sk, inc_inflight, &hitlist); |
| 352 | |
| 353 | /* not_cycle_list contains those sockets which do not make up a |
| 354 | * cycle. Restore these to the inflight list. |
| 355 | */ |
| 356 | while (!list_empty(¬_cycle_list)) { |
| 357 | u = list_entry(not_cycle_list.next, struct unix_sock, link); |
| 358 | __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); |
| 359 | list_move_tail(&u->link, &gc_inflight_list); |
| 360 | } |
| 361 | |
| 362 | spin_unlock(&unix_gc_lock); |
| 363 | |
| 364 | /* Here we are. Hitlist is filled. Die. */ |
| 365 | __skb_queue_purge(&hitlist); |
| 366 | |
| 367 | spin_lock(&unix_gc_lock); |
| 368 | |
| 369 | /* All candidates should have been detached by now. */ |
| 370 | BUG_ON(!list_empty(&gc_candidates)); |
| 371 | gc_in_progress = false; |
| 372 | wake_up(&unix_gc_wait); |
| 373 | |
| 374 | out: |
| 375 | spin_unlock(&unix_gc_lock); |
| 376 | } |