2 * Generic pidhash and scalable, time-bounded PID allocator
4 * (C) 2002-2003 William Irwin, IBM
5 * (C) 2004 William Irwin, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 * Many thanks to Oleg Nesterov for comments and help
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/bootmem.h>
34 #include <linux/hash.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/init_task.h>
38 #define pid_hashfn(nr, ns) \
39 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
40 static struct hlist_head
*pid_hash
;
41 static int pidhash_shift
;
42 struct pid init_struct_pid
= INIT_STRUCT_PID
;
44 int pid_max
= PID_MAX_DEFAULT
;
46 #define RESERVED_PIDS 300
48 int pid_max_min
= RESERVED_PIDS
+ 1;
49 int pid_max_max
= PID_MAX_LIMIT
;
51 #define BITS_PER_PAGE (PAGE_SIZE*8)
52 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
54 static inline int mk_pid(struct pid_namespace
*pid_ns
,
55 struct pidmap
*map
, int off
)
57 return (map
- pid_ns
->pidmap
)*BITS_PER_PAGE
+ off
;
60 #define find_next_offset(map, off) \
61 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
64 * PID-map pages start out as NULL, they get allocated upon
65 * first use and are never deallocated. This way a low pid_max
66 * value does not cause lots of bitmaps to be allocated, but
67 * the scheme scales to up to 4 million PIDs, runtime.
69 struct pid_namespace init_pid_ns
= {
71 .refcount
= ATOMIC_INIT(2),
74 [ 0 ... PIDMAP_ENTRIES
-1] = { ATOMIC_INIT(BITS_PER_PAGE
), NULL
}
78 .child_reaper
= &init_task
,
80 EXPORT_SYMBOL_GPL(init_pid_ns
);
82 int is_container_init(struct task_struct
*tsk
)
89 if (pid
!= NULL
&& pid
->numbers
[pid
->level
].nr
== 1)
95 EXPORT_SYMBOL(is_container_init
);
98 * Note: disable interrupts while the pidmap_lock is held as an
99 * interrupt might come in and do read_lock(&tasklist_lock).
101 * If we don't disable interrupts there is a nasty deadlock between
102 * detach_pid()->free_pid() and another cpu that does
103 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
104 * read_lock(&tasklist_lock);
106 * After we clean up the tasklist_lock and know there are no
107 * irq handlers that take it we can leave the interrupts enabled.
108 * For now it is easier to be safe than to prove it can't happen.
111 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(pidmap_lock
);
113 static fastcall
void free_pidmap(struct pid_namespace
*pid_ns
, int pid
)
115 struct pidmap
*map
= pid_ns
->pidmap
+ pid
/ BITS_PER_PAGE
;
116 int offset
= pid
& BITS_PER_PAGE_MASK
;
118 clear_bit(offset
, map
->page
);
119 atomic_inc(&map
->nr_free
);
122 static int alloc_pidmap(struct pid_namespace
*pid_ns
)
124 int i
, offset
, max_scan
, pid
, last
= pid_ns
->last_pid
;
130 offset
= pid
& BITS_PER_PAGE_MASK
;
131 map
= &pid_ns
->pidmap
[pid
/BITS_PER_PAGE
];
132 max_scan
= (pid_max
+ BITS_PER_PAGE
- 1)/BITS_PER_PAGE
- !offset
;
133 for (i
= 0; i
<= max_scan
; ++i
) {
134 if (unlikely(!map
->page
)) {
135 void *page
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
137 * Free the page if someone raced with us
140 spin_lock_irq(&pidmap_lock
);
145 spin_unlock_irq(&pidmap_lock
);
146 if (unlikely(!map
->page
))
149 if (likely(atomic_read(&map
->nr_free
))) {
151 if (!test_and_set_bit(offset
, map
->page
)) {
152 atomic_dec(&map
->nr_free
);
153 pid_ns
->last_pid
= pid
;
156 offset
= find_next_offset(map
, offset
);
157 pid
= mk_pid(pid_ns
, map
, offset
);
159 * find_next_offset() found a bit, the pid from it
160 * is in-bounds, and if we fell back to the last
161 * bitmap block and the final block was the same
162 * as the starting point, pid is before last_pid.
164 } while (offset
< BITS_PER_PAGE
&& pid
< pid_max
&&
165 (i
!= max_scan
|| pid
< last
||
166 !((last
+1) & BITS_PER_PAGE_MASK
)));
168 if (map
< &pid_ns
->pidmap
[(pid_max
-1)/BITS_PER_PAGE
]) {
172 map
= &pid_ns
->pidmap
[0];
173 offset
= RESERVED_PIDS
;
174 if (unlikely(last
== offset
))
177 pid
= mk_pid(pid_ns
, map
, offset
);
182 static int next_pidmap(struct pid_namespace
*pid_ns
, int last
)
185 struct pidmap
*map
, *end
;
187 offset
= (last
+ 1) & BITS_PER_PAGE_MASK
;
188 map
= &pid_ns
->pidmap
[(last
+ 1)/BITS_PER_PAGE
];
189 end
= &pid_ns
->pidmap
[PIDMAP_ENTRIES
];
190 for (; map
< end
; map
++, offset
= 0) {
191 if (unlikely(!map
->page
))
193 offset
= find_next_bit((map
)->page
, BITS_PER_PAGE
, offset
);
194 if (offset
< BITS_PER_PAGE
)
195 return mk_pid(pid_ns
, map
, offset
);
200 fastcall
void put_pid(struct pid
*pid
)
202 struct pid_namespace
*ns
;
207 ns
= pid
->numbers
[pid
->level
].ns
;
208 if ((atomic_read(&pid
->count
) == 1) ||
209 atomic_dec_and_test(&pid
->count
)) {
210 kmem_cache_free(ns
->pid_cachep
, pid
);
214 EXPORT_SYMBOL_GPL(put_pid
);
216 static void delayed_put_pid(struct rcu_head
*rhp
)
218 struct pid
*pid
= container_of(rhp
, struct pid
, rcu
);
222 fastcall
void free_pid(struct pid
*pid
)
224 /* We can be called with write_lock_irq(&tasklist_lock) held */
228 spin_lock_irqsave(&pidmap_lock
, flags
);
229 for (i
= 0; i
<= pid
->level
; i
++)
230 hlist_del_rcu(&pid
->numbers
[i
].pid_chain
);
231 spin_unlock_irqrestore(&pidmap_lock
, flags
);
233 for (i
= 0; i
<= pid
->level
; i
++)
234 free_pidmap(pid
->numbers
[i
].ns
, pid
->numbers
[i
].nr
);
236 call_rcu(&pid
->rcu
, delayed_put_pid
);
239 struct pid
*alloc_pid(struct pid_namespace
*ns
)
244 struct pid_namespace
*tmp
;
247 pid
= kmem_cache_alloc(ns
->pid_cachep
, GFP_KERNEL
);
252 for (i
= ns
->level
; i
>= 0; i
--) {
253 nr
= alloc_pidmap(tmp
);
257 pid
->numbers
[i
].nr
= nr
;
258 pid
->numbers
[i
].ns
= tmp
;
263 pid
->level
= ns
->level
;
264 pid
->nr
= pid
->numbers
[0].nr
;
265 atomic_set(&pid
->count
, 1);
266 for (type
= 0; type
< PIDTYPE_MAX
; ++type
)
267 INIT_HLIST_HEAD(&pid
->tasks
[type
]);
269 spin_lock_irq(&pidmap_lock
);
270 for (i
= ns
->level
; i
>= 0; i
--) {
271 upid
= &pid
->numbers
[i
];
272 hlist_add_head_rcu(&upid
->pid_chain
,
273 &pid_hash
[pid_hashfn(upid
->nr
, upid
->ns
)]);
275 spin_unlock_irq(&pidmap_lock
);
281 for (i
++; i
<= ns
->level
; i
++)
282 free_pidmap(pid
->numbers
[i
].ns
, pid
->numbers
[i
].nr
);
284 kmem_cache_free(ns
->pid_cachep
, pid
);
289 struct pid
* fastcall
find_pid_ns(int nr
, struct pid_namespace
*ns
)
291 struct hlist_node
*elem
;
294 hlist_for_each_entry_rcu(pnr
, elem
,
295 &pid_hash
[pid_hashfn(nr
, ns
)], pid_chain
)
296 if (pnr
->nr
== nr
&& pnr
->ns
== ns
)
297 return container_of(pnr
, struct pid
,
302 EXPORT_SYMBOL_GPL(find_pid_ns
);
305 * attach_pid() must be called with the tasklist_lock write-held.
307 int fastcall
attach_pid(struct task_struct
*task
, enum pid_type type
,
310 struct pid_link
*link
;
312 link
= &task
->pids
[type
];
314 hlist_add_head_rcu(&link
->node
, &pid
->tasks
[type
]);
319 void fastcall
detach_pid(struct task_struct
*task
, enum pid_type type
)
321 struct pid_link
*link
;
325 link
= &task
->pids
[type
];
328 hlist_del_rcu(&link
->node
);
331 for (tmp
= PIDTYPE_MAX
; --tmp
>= 0; )
332 if (!hlist_empty(&pid
->tasks
[tmp
]))
338 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
339 void fastcall
transfer_pid(struct task_struct
*old
, struct task_struct
*new,
342 new->pids
[type
].pid
= old
->pids
[type
].pid
;
343 hlist_replace_rcu(&old
->pids
[type
].node
, &new->pids
[type
].node
);
344 old
->pids
[type
].pid
= NULL
;
347 struct task_struct
* fastcall
pid_task(struct pid
*pid
, enum pid_type type
)
349 struct task_struct
*result
= NULL
;
351 struct hlist_node
*first
;
352 first
= rcu_dereference(pid
->tasks
[type
].first
);
354 result
= hlist_entry(first
, struct task_struct
, pids
[(type
)].node
);
360 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
362 struct task_struct
*find_task_by_pid_type_ns(int type
, int nr
,
363 struct pid_namespace
*ns
)
365 return pid_task(find_pid_ns(nr
, ns
), type
);
368 EXPORT_SYMBOL(find_task_by_pid_type_ns
);
370 struct pid
*get_task_pid(struct task_struct
*task
, enum pid_type type
)
374 pid
= get_pid(task
->pids
[type
].pid
);
379 struct task_struct
*fastcall
get_pid_task(struct pid
*pid
, enum pid_type type
)
381 struct task_struct
*result
;
383 result
= pid_task(pid
, type
);
385 get_task_struct(result
);
390 struct pid
*find_get_pid(pid_t nr
)
395 pid
= get_pid(find_vpid(nr
));
401 pid_t
pid_nr_ns(struct pid
*pid
, struct pid_namespace
*ns
)
406 if (pid
&& ns
->level
<= pid
->level
) {
407 upid
= &pid
->numbers
[ns
->level
];
415 * Used by proc to find the first pid that is greater then or equal to nr.
417 * If there is a pid at nr this function is exactly the same as find_pid.
419 struct pid
*find_ge_pid(int nr
, struct pid_namespace
*ns
)
424 pid
= find_pid_ns(nr
, ns
);
427 nr
= next_pidmap(ns
, nr
);
432 EXPORT_SYMBOL_GPL(find_get_pid
);
437 struct kmem_cache
*cachep
;
438 struct list_head list
;
441 static LIST_HEAD(pid_caches_lh
);
442 static DEFINE_MUTEX(pid_caches_mutex
);
445 * creates the kmem cache to allocate pids from.
446 * @nr_ids: the number of numerical ids this pid will have to carry
449 static struct kmem_cache
*create_pid_cachep(int nr_ids
)
451 struct pid_cache
*pcache
;
452 struct kmem_cache
*cachep
;
454 mutex_lock(&pid_caches_mutex
);
455 list_for_each_entry (pcache
, &pid_caches_lh
, list
)
456 if (pcache
->nr_ids
== nr_ids
)
459 pcache
= kmalloc(sizeof(struct pid_cache
), GFP_KERNEL
);
463 snprintf(pcache
->name
, sizeof(pcache
->name
), "pid_%d", nr_ids
);
464 cachep
= kmem_cache_create(pcache
->name
,
465 sizeof(struct pid
) + (nr_ids
- 1) * sizeof(struct upid
),
466 0, SLAB_HWCACHE_ALIGN
, NULL
);
470 pcache
->nr_ids
= nr_ids
;
471 pcache
->cachep
= cachep
;
472 list_add(&pcache
->list
, &pid_caches_lh
);
474 mutex_unlock(&pid_caches_mutex
);
475 return pcache
->cachep
;
480 mutex_unlock(&pid_caches_mutex
);
484 static struct pid_namespace
*create_pid_namespace(int level
)
486 struct pid_namespace
*ns
;
489 ns
= kmalloc(sizeof(struct pid_namespace
), GFP_KERNEL
);
493 ns
->pidmap
[0].page
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
494 if (!ns
->pidmap
[0].page
)
497 ns
->pid_cachep
= create_pid_cachep(level
+ 1);
498 if (ns
->pid_cachep
== NULL
)
501 kref_init(&ns
->kref
);
503 ns
->child_reaper
= NULL
;
506 set_bit(0, ns
->pidmap
[0].page
);
507 atomic_set(&ns
->pidmap
[0].nr_free
, BITS_PER_PAGE
- 1);
509 for (i
= 1; i
< PIDMAP_ENTRIES
; i
++) {
510 ns
->pidmap
[i
].page
= 0;
511 atomic_set(&ns
->pidmap
[i
].nr_free
, BITS_PER_PAGE
);
517 kfree(ns
->pidmap
[0].page
);
521 return ERR_PTR(-ENOMEM
);
524 static void destroy_pid_namespace(struct pid_namespace
*ns
)
528 for (i
= 0; i
< PIDMAP_ENTRIES
; i
++)
529 kfree(ns
->pidmap
[i
].page
);
533 struct pid_namespace
*copy_pid_ns(unsigned long flags
, struct pid_namespace
*old_ns
)
535 struct pid_namespace
*new_ns
;
538 new_ns
= get_pid_ns(old_ns
);
539 if (!(flags
& CLONE_NEWPID
))
542 new_ns
= ERR_PTR(-EINVAL
);
543 if (flags
& CLONE_THREAD
)
546 new_ns
= create_pid_namespace(old_ns
->level
+ 1);
548 new_ns
->parent
= get_pid_ns(old_ns
);
556 void free_pid_ns(struct kref
*kref
)
558 struct pid_namespace
*ns
, *parent
;
560 ns
= container_of(kref
, struct pid_namespace
, kref
);
563 destroy_pid_namespace(ns
);
570 * The pid hash table is scaled according to the amount of memory in the
571 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
574 void __init
pidhash_init(void)
577 unsigned long megabytes
= nr_kernel_pages
>> (20 - PAGE_SHIFT
);
579 pidhash_shift
= max(4, fls(megabytes
* 4));
580 pidhash_shift
= min(12, pidhash_shift
);
581 pidhash_size
= 1 << pidhash_shift
;
583 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
584 pidhash_size
, pidhash_shift
,
585 pidhash_size
* sizeof(struct hlist_head
));
587 pid_hash
= alloc_bootmem(pidhash_size
* sizeof(*(pid_hash
)));
589 panic("Could not alloc pidhash!\n");
590 for (i
= 0; i
< pidhash_size
; i
++)
591 INIT_HLIST_HEAD(&pid_hash
[i
]);
594 void __init
pidmap_init(void)
596 init_pid_ns
.pidmap
[0].page
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
597 /* Reserve PID 0. We never call free_pidmap(0) */
598 set_bit(0, init_pid_ns
.pidmap
[0].page
);
599 atomic_dec(&init_pid_ns
.pidmap
[0].nr_free
);
601 init_pid_ns
.pid_cachep
= create_pid_cachep(1);
602 if (init_pid_ns
.pid_cachep
== NULL
)
603 panic("Can't create pid_1 cachep\n");