2 * Generic pidhash and scalable, time-bounded PID allocator
4 * (C) 2002-2003 Nadia Yvette Chambers, IBM
5 * (C) 2004 Nadia Yvette Chambers, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 * Many thanks to Oleg Nesterov for comments and help
30 #include <linux/export.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/rculist.h>
34 #include <linux/bootmem.h>
35 #include <linux/hash.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/proc_ns.h>
40 #include <linux/proc_fs.h>
42 #define pid_hashfn(nr, ns) \
43 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
44 static struct hlist_head
*pid_hash
;
45 static unsigned int pidhash_shift
= 4;
46 struct pid init_struct_pid
= INIT_STRUCT_PID
;
48 int pid_max
= PID_MAX_DEFAULT
;
50 #define RESERVED_PIDS 300
52 int pid_max_min
= RESERVED_PIDS
+ 1;
53 int pid_max_max
= PID_MAX_LIMIT
;
55 #define BITS_PER_PAGE (PAGE_SIZE*8)
56 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
58 static inline int mk_pid(struct pid_namespace
*pid_ns
,
59 struct pidmap
*map
, int off
)
61 return (map
- pid_ns
->pidmap
)*BITS_PER_PAGE
+ off
;
64 #define find_next_offset(map, off) \
65 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
68 * PID-map pages start out as NULL, they get allocated upon
69 * first use and are never deallocated. This way a low pid_max
70 * value does not cause lots of bitmaps to be allocated, but
71 * the scheme scales to up to 4 million PIDs, runtime.
73 struct pid_namespace init_pid_ns
= {
75 .refcount
= ATOMIC_INIT(2),
78 [ 0 ... PIDMAP_ENTRIES
-1] = { ATOMIC_INIT(BITS_PER_PAGE
), NULL
}
82 .child_reaper
= &init_task
,
83 .user_ns
= &init_user_ns
,
84 .proc_inum
= PROC_PID_INIT_INO
,
86 EXPORT_SYMBOL_GPL(init_pid_ns
);
89 * Note: disable interrupts while the pidmap_lock is held as an
90 * interrupt might come in and do read_lock(&tasklist_lock).
92 * If we don't disable interrupts there is a nasty deadlock between
93 * detach_pid()->free_pid() and another cpu that does
94 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
95 * read_lock(&tasklist_lock);
97 * After we clean up the tasklist_lock and know there are no
98 * irq handlers that take it we can leave the interrupts enabled.
99 * For now it is easier to be safe than to prove it can't happen.
102 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(pidmap_lock
);
104 static void free_pidmap(struct upid
*upid
)
107 struct pidmap
*map
= upid
->ns
->pidmap
+ nr
/ BITS_PER_PAGE
;
108 int offset
= nr
& BITS_PER_PAGE_MASK
;
110 clear_bit(offset
, map
->page
);
111 atomic_inc(&map
->nr_free
);
115 * If we started walking pids at 'base', is 'a' seen before 'b'?
117 static int pid_before(int base
, int a
, int b
)
120 * This is the same as saying
122 * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
123 * and that mapping orders 'a' and 'b' with respect to 'base'.
125 return (unsigned)(a
- base
) < (unsigned)(b
- base
);
129 * We might be racing with someone else trying to set pid_ns->last_pid
130 * at the pid allocation time (there's also a sysctl for this, but racing
131 * with this one is OK, see comment in kernel/pid_namespace.c about it).
132 * We want the winner to have the "later" value, because if the
133 * "earlier" value prevails, then a pid may get reused immediately.
135 * Since pids rollover, it is not sufficient to just pick the bigger
136 * value. We have to consider where we started counting from.
138 * 'base' is the value of pid_ns->last_pid that we observed when
139 * we started looking for a pid.
141 * 'pid' is the pid that we eventually found.
143 static void set_last_pid(struct pid_namespace
*pid_ns
, int base
, int pid
)
146 int last_write
= base
;
149 last_write
= cmpxchg(&pid_ns
->last_pid
, prev
, pid
);
150 } while ((prev
!= last_write
) && (pid_before(base
, last_write
, pid
)));
153 static int alloc_pidmap(struct pid_namespace
*pid_ns
)
155 int i
, offset
, max_scan
, pid
, last
= pid_ns
->last_pid
;
161 offset
= pid
& BITS_PER_PAGE_MASK
;
162 map
= &pid_ns
->pidmap
[pid
/BITS_PER_PAGE
];
164 * If last_pid points into the middle of the map->page we
165 * want to scan this bitmap block twice, the second time
166 * we start with offset == 0 (or RESERVED_PIDS).
168 max_scan
= DIV_ROUND_UP(pid_max
, BITS_PER_PAGE
) - !offset
;
169 for (i
= 0; i
<= max_scan
; ++i
) {
170 if (unlikely(!map
->page
)) {
171 void *page
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
173 * Free the page if someone raced with us
176 spin_lock_irq(&pidmap_lock
);
181 spin_unlock_irq(&pidmap_lock
);
183 if (unlikely(!map
->page
))
186 if (likely(atomic_read(&map
->nr_free
))) {
188 if (!test_and_set_bit(offset
, map
->page
)) {
189 atomic_dec(&map
->nr_free
);
190 set_last_pid(pid_ns
, last
, pid
);
193 offset
= find_next_offset(map
, offset
);
194 pid
= mk_pid(pid_ns
, map
, offset
);
195 } while (offset
< BITS_PER_PAGE
&& pid
< pid_max
);
197 if (map
< &pid_ns
->pidmap
[(pid_max
-1)/BITS_PER_PAGE
]) {
201 map
= &pid_ns
->pidmap
[0];
202 offset
= RESERVED_PIDS
;
203 if (unlikely(last
== offset
))
206 pid
= mk_pid(pid_ns
, map
, offset
);
211 int next_pidmap(struct pid_namespace
*pid_ns
, unsigned int last
)
214 struct pidmap
*map
, *end
;
216 if (last
>= PID_MAX_LIMIT
)
219 offset
= (last
+ 1) & BITS_PER_PAGE_MASK
;
220 map
= &pid_ns
->pidmap
[(last
+ 1)/BITS_PER_PAGE
];
221 end
= &pid_ns
->pidmap
[PIDMAP_ENTRIES
];
222 for (; map
< end
; map
++, offset
= 0) {
223 if (unlikely(!map
->page
))
225 offset
= find_next_bit((map
)->page
, BITS_PER_PAGE
, offset
);
226 if (offset
< BITS_PER_PAGE
)
227 return mk_pid(pid_ns
, map
, offset
);
232 void put_pid(struct pid
*pid
)
234 struct pid_namespace
*ns
;
239 ns
= pid
->numbers
[pid
->level
].ns
;
240 if ((atomic_read(&pid
->count
) == 1) ||
241 atomic_dec_and_test(&pid
->count
)) {
242 kmem_cache_free(ns
->pid_cachep
, pid
);
246 EXPORT_SYMBOL_GPL(put_pid
);
248 static void delayed_put_pid(struct rcu_head
*rhp
)
250 struct pid
*pid
= container_of(rhp
, struct pid
, rcu
);
254 void free_pid(struct pid
*pid
)
256 /* We can be called with write_lock_irq(&tasklist_lock) held */
260 spin_lock_irqsave(&pidmap_lock
, flags
);
261 for (i
= 0; i
<= pid
->level
; i
++) {
262 struct upid
*upid
= pid
->numbers
+ i
;
263 struct pid_namespace
*ns
= upid
->ns
;
264 hlist_del_rcu(&upid
->pid_chain
);
265 switch(--ns
->nr_hashed
) {
267 /* When all that is left in the pid namespace
268 * is the reaper wake up the reaper. The reaper
269 * may be sleeping in zap_pid_ns_processes().
271 wake_up_process(ns
->child_reaper
);
274 schedule_work(&ns
->proc_work
);
278 spin_unlock_irqrestore(&pidmap_lock
, flags
);
280 for (i
= 0; i
<= pid
->level
; i
++)
281 free_pidmap(pid
->numbers
+ i
);
283 call_rcu(&pid
->rcu
, delayed_put_pid
);
286 struct pid
*alloc_pid(struct pid_namespace
*ns
)
291 struct pid_namespace
*tmp
;
294 pid
= kmem_cache_alloc(ns
->pid_cachep
, GFP_KERNEL
);
299 pid
->level
= ns
->level
;
300 for (i
= ns
->level
; i
>= 0; i
--) {
301 nr
= alloc_pidmap(tmp
);
305 pid
->numbers
[i
].nr
= nr
;
306 pid
->numbers
[i
].ns
= tmp
;
310 if (unlikely(is_child_reaper(pid
))) {
311 if (pid_ns_prepare_proc(ns
))
316 atomic_set(&pid
->count
, 1);
317 for (type
= 0; type
< PIDTYPE_MAX
; ++type
)
318 INIT_HLIST_HEAD(&pid
->tasks
[type
]);
320 upid
= pid
->numbers
+ ns
->level
;
321 spin_lock_irq(&pidmap_lock
);
322 if (!(ns
->nr_hashed
& PIDNS_HASH_ADDING
))
324 for ( ; upid
>= pid
->numbers
; --upid
) {
325 hlist_add_head_rcu(&upid
->pid_chain
,
326 &pid_hash
[pid_hashfn(upid
->nr
, upid
->ns
)]);
327 upid
->ns
->nr_hashed
++;
329 spin_unlock_irq(&pidmap_lock
);
335 spin_unlock_irq(&pidmap_lock
);
337 while (++i
<= ns
->level
)
338 free_pidmap(pid
->numbers
+ i
);
340 kmem_cache_free(ns
->pid_cachep
, pid
);
345 void disable_pid_allocation(struct pid_namespace
*ns
)
347 spin_lock_irq(&pidmap_lock
);
348 ns
->nr_hashed
&= ~PIDNS_HASH_ADDING
;
349 spin_unlock_irq(&pidmap_lock
);
352 struct pid
*find_pid_ns(int nr
, struct pid_namespace
*ns
)
356 hlist_for_each_entry_rcu(pnr
,
357 &pid_hash
[pid_hashfn(nr
, ns
)], pid_chain
)
358 if (pnr
->nr
== nr
&& pnr
->ns
== ns
)
359 return container_of(pnr
, struct pid
,
364 EXPORT_SYMBOL_GPL(find_pid_ns
);
366 struct pid
*find_vpid(int nr
)
368 return find_pid_ns(nr
, task_active_pid_ns(current
));
370 EXPORT_SYMBOL_GPL(find_vpid
);
373 * attach_pid() must be called with the tasklist_lock write-held.
375 void attach_pid(struct task_struct
*task
, enum pid_type type
,
378 struct pid_link
*link
;
380 link
= &task
->pids
[type
];
382 hlist_add_head_rcu(&link
->node
, &pid
->tasks
[type
]);
385 static void __change_pid(struct task_struct
*task
, enum pid_type type
,
388 struct pid_link
*link
;
392 link
= &task
->pids
[type
];
395 hlist_del_rcu(&link
->node
);
398 for (tmp
= PIDTYPE_MAX
; --tmp
>= 0; )
399 if (!hlist_empty(&pid
->tasks
[tmp
]))
405 void detach_pid(struct task_struct
*task
, enum pid_type type
)
407 __change_pid(task
, type
, NULL
);
410 void change_pid(struct task_struct
*task
, enum pid_type type
,
413 __change_pid(task
, type
, pid
);
414 attach_pid(task
, type
, pid
);
417 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
418 void transfer_pid(struct task_struct
*old
, struct task_struct
*new,
421 new->pids
[type
].pid
= old
->pids
[type
].pid
;
422 hlist_replace_rcu(&old
->pids
[type
].node
, &new->pids
[type
].node
);
425 struct task_struct
*pid_task(struct pid
*pid
, enum pid_type type
)
427 struct task_struct
*result
= NULL
;
429 struct hlist_node
*first
;
430 first
= rcu_dereference_check(hlist_first_rcu(&pid
->tasks
[type
]),
431 lockdep_tasklist_lock_is_held());
433 result
= hlist_entry(first
, struct task_struct
, pids
[(type
)].node
);
437 EXPORT_SYMBOL(pid_task
);
440 * Must be called under rcu_read_lock().
442 struct task_struct
*find_task_by_pid_ns(pid_t nr
, struct pid_namespace
*ns
)
444 rcu_lockdep_assert(rcu_read_lock_held(),
445 "find_task_by_pid_ns() needs rcu_read_lock()"
447 return pid_task(find_pid_ns(nr
, ns
), PIDTYPE_PID
);
450 struct task_struct
*find_task_by_vpid(pid_t vnr
)
452 return find_task_by_pid_ns(vnr
, task_active_pid_ns(current
));
455 struct pid
*get_task_pid(struct task_struct
*task
, enum pid_type type
)
459 if (type
!= PIDTYPE_PID
)
460 task
= task
->group_leader
;
461 pid
= get_pid(task
->pids
[type
].pid
);
465 EXPORT_SYMBOL_GPL(get_task_pid
);
467 struct task_struct
*get_pid_task(struct pid
*pid
, enum pid_type type
)
469 struct task_struct
*result
;
471 result
= pid_task(pid
, type
);
473 get_task_struct(result
);
477 EXPORT_SYMBOL_GPL(get_pid_task
);
479 struct pid
*find_get_pid(pid_t nr
)
484 pid
= get_pid(find_vpid(nr
));
489 EXPORT_SYMBOL_GPL(find_get_pid
);
491 pid_t
pid_nr_ns(struct pid
*pid
, struct pid_namespace
*ns
)
496 if (pid
&& ns
->level
<= pid
->level
) {
497 upid
= &pid
->numbers
[ns
->level
];
503 EXPORT_SYMBOL_GPL(pid_nr_ns
);
505 pid_t
pid_vnr(struct pid
*pid
)
507 return pid_nr_ns(pid
, task_active_pid_ns(current
));
509 EXPORT_SYMBOL_GPL(pid_vnr
);
511 pid_t
__task_pid_nr_ns(struct task_struct
*task
, enum pid_type type
,
512 struct pid_namespace
*ns
)
518 ns
= task_active_pid_ns(current
);
519 if (likely(pid_alive(task
))) {
520 if (type
!= PIDTYPE_PID
)
521 task
= task
->group_leader
;
522 nr
= pid_nr_ns(task
->pids
[type
].pid
, ns
);
528 EXPORT_SYMBOL(__task_pid_nr_ns
);
530 pid_t
task_tgid_nr_ns(struct task_struct
*tsk
, struct pid_namespace
*ns
)
532 return pid_nr_ns(task_tgid(tsk
), ns
);
534 EXPORT_SYMBOL(task_tgid_nr_ns
);
536 struct pid_namespace
*task_active_pid_ns(struct task_struct
*tsk
)
538 return ns_of_pid(task_pid(tsk
));
540 EXPORT_SYMBOL_GPL(task_active_pid_ns
);
543 * Used by proc to find the first pid that is greater than or equal to nr.
545 * If there is a pid at nr this function is exactly the same as find_pid_ns.
547 struct pid
*find_ge_pid(int nr
, struct pid_namespace
*ns
)
552 pid
= find_pid_ns(nr
, ns
);
555 nr
= next_pidmap(ns
, nr
);
562 * The pid hash table is scaled according to the amount of memory in the
563 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
566 void __init
pidhash_init(void)
568 unsigned int i
, pidhash_size
;
570 pid_hash
= alloc_large_system_hash("PID", sizeof(*pid_hash
), 0, 18,
571 HASH_EARLY
| HASH_SMALL
,
572 &pidhash_shift
, NULL
,
574 pidhash_size
= 1U << pidhash_shift
;
576 for (i
= 0; i
< pidhash_size
; i
++)
577 INIT_HLIST_HEAD(&pid_hash
[i
]);
580 void __init
pidmap_init(void)
582 /* Veryify no one has done anything silly */
583 BUILD_BUG_ON(PID_MAX_LIMIT
>= PIDNS_HASH_ADDING
);
585 /* bump default and minimum pid_max based on number of cpus */
586 pid_max
= min(pid_max_max
, max_t(int, pid_max
,
587 PIDS_PER_CPU_DEFAULT
* num_possible_cpus()));
588 pid_max_min
= max_t(int, pid_max_min
,
589 PIDS_PER_CPU_MIN
* num_possible_cpus());
590 pr_info("pid_max: default: %u minimum: %u\n", pid_max
, pid_max_min
);
592 init_pid_ns
.pidmap
[0].page
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
593 /* Reserve PID 0. We never call free_pidmap(0) */
594 set_bit(0, init_pid_ns
.pidmap
[0].page
);
595 atomic_dec(&init_pid_ns
.pidmap
[0].nr_free
);
596 init_pid_ns
.nr_hashed
= PIDNS_HASH_ADDING
;
598 init_pid_ns
.pid_cachep
= KMEM_CACHE(pid
,
599 SLAB_HWCACHE_ALIGN
| SLAB_PANIC
);