2 * Generic pidhash and scalable, time-bounded PID allocator
4 * (C) 2002-2003 Nadia Yvette Chambers, IBM
5 * (C) 2004 Nadia Yvette Chambers, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 * Many thanks to Oleg Nesterov for comments and help
30 #include <linux/export.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/rculist.h>
34 #include <linux/bootmem.h>
35 #include <linux/hash.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/proc_ns.h>
40 #include <linux/proc_fs.h>
41 #include <linux/sched/task.h>
42 #include <linux/idr.h>
44 struct pid init_struct_pid = {
45 .count = ATOMIC_INIT(1),
58 int pid_max = PID_MAX_DEFAULT;
60 #define RESERVED_PIDS 300
62 int pid_max_min = RESERVED_PIDS + 1;
63 int pid_max_max = PID_MAX_LIMIT;
66 * PID-map pages start out as NULL, they get allocated upon
67 * first use and are never deallocated. This way a low pid_max
68 * value does not cause lots of bitmaps to be allocated, but
69 * the scheme scales to up to 4 million PIDs, runtime.
71 struct pid_namespace init_pid_ns = {
73 .idr = IDR_INIT(init_pid_ns.idr),
74 .pid_allocated = PIDNS_ADDING,
76 .child_reaper = &init_task,
77 .user_ns = &init_user_ns,
78 .ns.inum = PROC_PID_INIT_INO,
80 .ns.ops = &pidns_operations,
83 EXPORT_SYMBOL_GPL(init_pid_ns);
86 * Note: disable interrupts while the pidmap_lock is held as an
87 * interrupt might come in and do read_lock(&tasklist_lock).
89 * If we don't disable interrupts there is a nasty deadlock between
90 * detach_pid()->free_pid() and another cpu that does
91 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
92 * read_lock(&tasklist_lock);
94 * After we clean up the tasklist_lock and know there are no
95 * irq handlers that take it we can leave the interrupts enabled.
96 * For now it is easier to be safe than to prove it can't happen.
99 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
101 void put_pid(struct pid *pid)
103 struct pid_namespace *ns;
108 ns = pid->numbers[pid->level].ns;
109 if ((atomic_read(&pid->count) == 1) ||
110 atomic_dec_and_test(&pid->count)) {
111 kmem_cache_free(ns->pid_cachep, pid);
115 EXPORT_SYMBOL_GPL(put_pid);
117 static void delayed_put_pid(struct rcu_head *rhp)
119 struct pid *pid = container_of(rhp, struct pid, rcu);
123 void free_pid(struct pid *pid)
125 /* We can be called with write_lock_irq(&tasklist_lock) held */
129 spin_lock_irqsave(&pidmap_lock, flags);
130 for (i = 0; i <= pid->level; i++) {
131 struct upid *upid = pid->numbers + i;
132 struct pid_namespace *ns = upid->ns;
133 switch (--ns->pid_allocated) {
136 /* When all that is left in the pid namespace
137 * is the reaper wake up the reaper. The reaper
138 * may be sleeping in zap_pid_ns_processes().
140 wake_up_process(ns->child_reaper);
143 /* Handle a fork failure of the first process */
144 WARN_ON(ns->child_reaper);
145 ns->pid_allocated = 0;
148 schedule_work(&ns->proc_work);
152 idr_remove(&ns->idr, upid->nr);
154 spin_unlock_irqrestore(&pidmap_lock, flags);
156 call_rcu(&pid->rcu, delayed_put_pid);
159 struct pid *alloc_pid(struct pid_namespace *ns)
164 struct pid_namespace *tmp;
166 int retval = -ENOMEM;
168 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
170 return ERR_PTR(retval);
173 pid->level = ns->level;
175 for (i = ns->level; i >= 0; i--) {
178 idr_preload(GFP_KERNEL);
179 spin_lock_irq(&pidmap_lock);
182 * init really needs pid 1, but after reaching the maximum
183 * wrap back to RESERVED_PIDS
185 if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
186 pid_min = RESERVED_PIDS;
189 * Store a null pointer so find_pid_ns does not find
190 * a partially initialized PID (see below).
192 nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
193 pid_max, GFP_ATOMIC);
194 spin_unlock_irq(&pidmap_lock);
202 pid->numbers[i].nr = nr;
203 pid->numbers[i].ns = tmp;
207 if (unlikely(is_child_reaper(pid))) {
208 if (pid_ns_prepare_proc(ns))
213 atomic_set(&pid->count, 1);
214 for (type = 0; type < PIDTYPE_MAX; ++type)
215 INIT_HLIST_HEAD(&pid->tasks[type]);
217 upid = pid->numbers + ns->level;
218 spin_lock_irq(&pidmap_lock);
219 if (!(ns->pid_allocated & PIDNS_ADDING))
221 for ( ; upid >= pid->numbers; --upid) {
222 /* Make the PID visible to find_pid_ns. */
223 idr_replace(&upid->ns->idr, pid, upid->nr);
224 upid->ns->pid_allocated++;
226 spin_unlock_irq(&pidmap_lock);
231 spin_unlock_irq(&pidmap_lock);
235 spin_lock_irq(&pidmap_lock);
236 while (++i <= ns->level)
237 idr_remove(&ns->idr, (pid->numbers + i)->nr);
239 /* On failure to allocate the first pid, reset the state */
240 if (ns->pid_allocated == PIDNS_ADDING)
241 idr_set_cursor(&ns->idr, 0);
243 spin_unlock_irq(&pidmap_lock);
245 kmem_cache_free(ns->pid_cachep, pid);
246 return ERR_PTR(retval);
249 void disable_pid_allocation(struct pid_namespace *ns)
251 spin_lock_irq(&pidmap_lock);
252 ns->pid_allocated &= ~PIDNS_ADDING;
253 spin_unlock_irq(&pidmap_lock);
256 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
258 return idr_find(&ns->idr, nr);
260 EXPORT_SYMBOL_GPL(find_pid_ns);
262 struct pid *find_vpid(int nr)
264 return find_pid_ns(nr, task_active_pid_ns(current));
266 EXPORT_SYMBOL_GPL(find_vpid);
269 * attach_pid() must be called with the tasklist_lock write-held.
271 void attach_pid(struct task_struct *task, enum pid_type type)
273 struct pid_link *link = &task->pids[type];
274 hlist_add_head_rcu(&link->node, &link->pid->tasks[type]);
277 static void __change_pid(struct task_struct *task, enum pid_type type,
280 struct pid_link *link;
284 link = &task->pids[type];
287 hlist_del_rcu(&link->node);
290 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
291 if (!hlist_empty(&pid->tasks[tmp]))
297 void detach_pid(struct task_struct *task, enum pid_type type)
299 __change_pid(task, type, NULL);
302 void change_pid(struct task_struct *task, enum pid_type type,
305 __change_pid(task, type, pid);
306 attach_pid(task, type);
309 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
310 void transfer_pid(struct task_struct *old, struct task_struct *new,
313 new->pids[type].pid = old->pids[type].pid;
314 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
317 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
319 struct task_struct *result = NULL;
321 struct hlist_node *first;
322 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
323 lockdep_tasklist_lock_is_held());
325 result = hlist_entry(first, struct task_struct, pids[(type)].node);
329 EXPORT_SYMBOL(pid_task);
332 * Must be called under rcu_read_lock().
334 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
336 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
337 "find_task_by_pid_ns() needs rcu_read_lock() protection");
338 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
341 struct task_struct *find_task_by_vpid(pid_t vnr)
343 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
346 struct task_struct *find_get_task_by_vpid(pid_t nr)
348 struct task_struct *task;
351 task = find_task_by_vpid(nr);
353 get_task_struct(task);
359 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
363 if (type != PIDTYPE_PID)
364 task = task->group_leader;
365 pid = get_pid(rcu_dereference(task->pids[type].pid));
369 EXPORT_SYMBOL_GPL(get_task_pid);
371 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
373 struct task_struct *result;
375 result = pid_task(pid, type);
377 get_task_struct(result);
381 EXPORT_SYMBOL_GPL(get_pid_task);
383 struct pid *find_get_pid(pid_t nr)
388 pid = get_pid(find_vpid(nr));
393 EXPORT_SYMBOL_GPL(find_get_pid);
395 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
400 if (pid && ns->level <= pid->level) {
401 upid = &pid->numbers[ns->level];
407 EXPORT_SYMBOL_GPL(pid_nr_ns);
409 pid_t pid_vnr(struct pid *pid)
411 return pid_nr_ns(pid, task_active_pid_ns(current));
413 EXPORT_SYMBOL_GPL(pid_vnr);
415 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
416 struct pid_namespace *ns)
422 ns = task_active_pid_ns(current);
423 if (likely(pid_alive(task))) {
424 if (type != PIDTYPE_PID) {
425 if (type == __PIDTYPE_TGID)
428 task = task->group_leader;
430 nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
436 EXPORT_SYMBOL(__task_pid_nr_ns);
438 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
440 return ns_of_pid(task_pid(tsk));
442 EXPORT_SYMBOL_GPL(task_active_pid_ns);
445 * Used by proc to find the first pid that is greater than or equal to nr.
447 * If there is a pid at nr this function is exactly the same as find_pid_ns.
449 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
451 return idr_get_next(&ns->idr, &nr);
454 void __init pid_idr_init(void)
456 /* Verify no one has done anything silly: */
457 BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
459 /* bump default and minimum pid_max based on number of cpus */
460 pid_max = min(pid_max_max, max_t(int, pid_max,
461 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
462 pid_max_min = max_t(int, pid_max_min,
463 PIDS_PER_CPU_MIN * num_possible_cpus());
464 pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
466 idr_init(&init_pid_ns.idr);
468 init_pid_ns.pid_cachep = KMEM_CACHE(pid,
469 SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);