]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Generic pidhash and scalable, time-bounded PID allocator | |
3 | * | |
4 | * (C) 2002-2003 William Irwin, IBM | |
5 | * (C) 2004 William Irwin, Oracle | |
6 | * (C) 2002-2004 Ingo Molnar, Red Hat | |
7 | * | |
8 | * pid-structures are backing objects for tasks sharing a given ID to chain | |
9 | * against. There is very little to them aside from hashing them and | |
10 | * parking tasks using given ID's on a list. | |
11 | * | |
12 | * The hash is always changed with the tasklist_lock write-acquired, | |
13 | * and the hash is only accessed with the tasklist_lock at least | |
14 | * read-acquired, so there's no additional SMP locking needed here. | |
15 | * | |
16 | * We have a list of bitmap pages, which bitmaps represent the PID space. | |
17 | * Allocating and freeing PIDs is completely lockless. The worst-case | |
18 | * allocation scenario when all but one out of 1 million PIDs possible are | |
19 | * allocated already: the scanning of 32 list entries and at most PAGE_SIZE | |
20 | * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). | |
21 | */ | |
22 | ||
23 | #include <linux/mm.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/bootmem.h> | |
28 | #include <linux/hash.h> | |
61a58c6c | 29 | #include <linux/pid_namespace.h> |
820e45db | 30 | #include <linux/init_task.h> |
1da177e4 LT |
31 | |
32 | #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift) | |
92476d7f | 33 | static struct hlist_head *pid_hash; |
1da177e4 | 34 | static int pidhash_shift; |
820e45db | 35 | struct pid init_struct_pid = INIT_STRUCT_PID; |
1da177e4 LT |
36 | |
37 | int pid_max = PID_MAX_DEFAULT; | |
1da177e4 LT |
38 | |
39 | #define RESERVED_PIDS 300 | |
40 | ||
41 | int pid_max_min = RESERVED_PIDS + 1; | |
42 | int pid_max_max = PID_MAX_LIMIT; | |
43 | ||
1da177e4 LT |
44 | #define BITS_PER_PAGE (PAGE_SIZE*8) |
45 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) | |
3fbc9648 | 46 | |
61a58c6c SB |
47 | static inline int mk_pid(struct pid_namespace *pid_ns, |
48 | struct pidmap *map, int off) | |
3fbc9648 | 49 | { |
61a58c6c | 50 | return (map - pid_ns->pidmap)*BITS_PER_PAGE + off; |
3fbc9648 SB |
51 | } |
52 | ||
1da177e4 LT |
53 | #define find_next_offset(map, off) \ |
54 | find_next_zero_bit((map)->page, BITS_PER_PAGE, off) | |
55 | ||
56 | /* | |
57 | * PID-map pages start out as NULL, they get allocated upon | |
58 | * first use and are never deallocated. This way a low pid_max | |
59 | * value does not cause lots of bitmaps to be allocated, but | |
60 | * the scheme scales to up to 4 million PIDs, runtime. | |
61 | */ | |
61a58c6c | 62 | struct pid_namespace init_pid_ns = { |
9a575a92 CLG |
63 | .kref = { |
64 | .refcount = ATOMIC_INIT(2), | |
65 | }, | |
3fbc9648 SB |
66 | .pidmap = { |
67 | [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } | |
68 | }, | |
84d73786 | 69 | .last_pid = 0, |
faacbfd3 PE |
70 | .level = 0, |
71 | .child_reaper = &init_task, | |
3fbc9648 | 72 | }; |
1da177e4 | 73 | |
b460cbc5 SH |
74 | int is_global_init(struct task_struct *tsk) |
75 | { | |
76 | return tsk == init_pid_ns.child_reaper; | |
77 | } | |
78 | ||
92476d7f EB |
79 | /* |
80 | * Note: disable interrupts while the pidmap_lock is held as an | |
81 | * interrupt might come in and do read_lock(&tasklist_lock). | |
82 | * | |
83 | * If we don't disable interrupts there is a nasty deadlock between | |
84 | * detach_pid()->free_pid() and another cpu that does | |
85 | * spin_lock(&pidmap_lock) followed by an interrupt routine that does | |
86 | * read_lock(&tasklist_lock); | |
87 | * | |
88 | * After we clean up the tasklist_lock and know there are no | |
89 | * irq handlers that take it we can leave the interrupts enabled. | |
90 | * For now it is easier to be safe than to prove it can't happen. | |
91 | */ | |
3fbc9648 | 92 | |
1da177e4 LT |
93 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); |
94 | ||
61a58c6c | 95 | static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid) |
1da177e4 | 96 | { |
61a58c6c | 97 | struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE; |
1da177e4 LT |
98 | int offset = pid & BITS_PER_PAGE_MASK; |
99 | ||
100 | clear_bit(offset, map->page); | |
101 | atomic_inc(&map->nr_free); | |
102 | } | |
103 | ||
61a58c6c | 104 | static int alloc_pidmap(struct pid_namespace *pid_ns) |
1da177e4 | 105 | { |
61a58c6c | 106 | int i, offset, max_scan, pid, last = pid_ns->last_pid; |
6a1f3b84 | 107 | struct pidmap *map; |
1da177e4 LT |
108 | |
109 | pid = last + 1; | |
110 | if (pid >= pid_max) | |
111 | pid = RESERVED_PIDS; | |
112 | offset = pid & BITS_PER_PAGE_MASK; | |
61a58c6c | 113 | map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; |
1da177e4 LT |
114 | max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; |
115 | for (i = 0; i <= max_scan; ++i) { | |
116 | if (unlikely(!map->page)) { | |
3fbc9648 | 117 | void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
1da177e4 LT |
118 | /* |
119 | * Free the page if someone raced with us | |
120 | * installing it: | |
121 | */ | |
92476d7f | 122 | spin_lock_irq(&pidmap_lock); |
1da177e4 | 123 | if (map->page) |
3fbc9648 | 124 | kfree(page); |
1da177e4 | 125 | else |
3fbc9648 | 126 | map->page = page; |
92476d7f | 127 | spin_unlock_irq(&pidmap_lock); |
1da177e4 LT |
128 | if (unlikely(!map->page)) |
129 | break; | |
130 | } | |
131 | if (likely(atomic_read(&map->nr_free))) { | |
132 | do { | |
133 | if (!test_and_set_bit(offset, map->page)) { | |
134 | atomic_dec(&map->nr_free); | |
61a58c6c | 135 | pid_ns->last_pid = pid; |
1da177e4 LT |
136 | return pid; |
137 | } | |
138 | offset = find_next_offset(map, offset); | |
61a58c6c | 139 | pid = mk_pid(pid_ns, map, offset); |
1da177e4 LT |
140 | /* |
141 | * find_next_offset() found a bit, the pid from it | |
142 | * is in-bounds, and if we fell back to the last | |
143 | * bitmap block and the final block was the same | |
144 | * as the starting point, pid is before last_pid. | |
145 | */ | |
146 | } while (offset < BITS_PER_PAGE && pid < pid_max && | |
147 | (i != max_scan || pid < last || | |
148 | !((last+1) & BITS_PER_PAGE_MASK))); | |
149 | } | |
61a58c6c | 150 | if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { |
1da177e4 LT |
151 | ++map; |
152 | offset = 0; | |
153 | } else { | |
61a58c6c | 154 | map = &pid_ns->pidmap[0]; |
1da177e4 LT |
155 | offset = RESERVED_PIDS; |
156 | if (unlikely(last == offset)) | |
157 | break; | |
158 | } | |
61a58c6c | 159 | pid = mk_pid(pid_ns, map, offset); |
1da177e4 LT |
160 | } |
161 | return -1; | |
162 | } | |
163 | ||
61a58c6c | 164 | static int next_pidmap(struct pid_namespace *pid_ns, int last) |
0804ef4b EB |
165 | { |
166 | int offset; | |
f40f50d3 | 167 | struct pidmap *map, *end; |
0804ef4b EB |
168 | |
169 | offset = (last + 1) & BITS_PER_PAGE_MASK; | |
61a58c6c SB |
170 | map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; |
171 | end = &pid_ns->pidmap[PIDMAP_ENTRIES]; | |
f40f50d3 | 172 | for (; map < end; map++, offset = 0) { |
0804ef4b EB |
173 | if (unlikely(!map->page)) |
174 | continue; | |
175 | offset = find_next_bit((map)->page, BITS_PER_PAGE, offset); | |
176 | if (offset < BITS_PER_PAGE) | |
61a58c6c | 177 | return mk_pid(pid_ns, map, offset); |
0804ef4b EB |
178 | } |
179 | return -1; | |
180 | } | |
181 | ||
92476d7f EB |
182 | fastcall void put_pid(struct pid *pid) |
183 | { | |
baf8f0f8 PE |
184 | struct pid_namespace *ns; |
185 | ||
92476d7f EB |
186 | if (!pid) |
187 | return; | |
baf8f0f8 PE |
188 | |
189 | /* FIXME - this must be the namespace this pid lives in */ | |
190 | ns = &init_pid_ns; | |
92476d7f EB |
191 | if ((atomic_read(&pid->count) == 1) || |
192 | atomic_dec_and_test(&pid->count)) | |
baf8f0f8 | 193 | kmem_cache_free(ns->pid_cachep, pid); |
92476d7f | 194 | } |
bbf73147 | 195 | EXPORT_SYMBOL_GPL(put_pid); |
92476d7f EB |
196 | |
197 | static void delayed_put_pid(struct rcu_head *rhp) | |
198 | { | |
199 | struct pid *pid = container_of(rhp, struct pid, rcu); | |
200 | put_pid(pid); | |
201 | } | |
202 | ||
203 | fastcall void free_pid(struct pid *pid) | |
204 | { | |
205 | /* We can be called with write_lock_irq(&tasklist_lock) held */ | |
206 | unsigned long flags; | |
207 | ||
208 | spin_lock_irqsave(&pidmap_lock, flags); | |
209 | hlist_del_rcu(&pid->pid_chain); | |
210 | spin_unlock_irqrestore(&pidmap_lock, flags); | |
211 | ||
0f245285 | 212 | free_pidmap(&init_pid_ns, pid->nr); |
92476d7f EB |
213 | call_rcu(&pid->rcu, delayed_put_pid); |
214 | } | |
215 | ||
216 | struct pid *alloc_pid(void) | |
217 | { | |
218 | struct pid *pid; | |
219 | enum pid_type type; | |
220 | int nr = -1; | |
baf8f0f8 | 221 | struct pid_namespace *ns; |
92476d7f | 222 | |
2894d650 | 223 | ns = task_active_pid_ns(current); |
baf8f0f8 | 224 | pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); |
92476d7f EB |
225 | if (!pid) |
226 | goto out; | |
227 | ||
baf8f0f8 | 228 | nr = alloc_pidmap(ns); |
92476d7f EB |
229 | if (nr < 0) |
230 | goto out_free; | |
231 | ||
232 | atomic_set(&pid->count, 1); | |
233 | pid->nr = nr; | |
234 | for (type = 0; type < PIDTYPE_MAX; ++type) | |
235 | INIT_HLIST_HEAD(&pid->tasks[type]); | |
236 | ||
237 | spin_lock_irq(&pidmap_lock); | |
238 | hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]); | |
239 | spin_unlock_irq(&pidmap_lock); | |
240 | ||
241 | out: | |
242 | return pid; | |
243 | ||
244 | out_free: | |
baf8f0f8 | 245 | kmem_cache_free(ns->pid_cachep, pid); |
92476d7f EB |
246 | pid = NULL; |
247 | goto out; | |
248 | } | |
249 | ||
250 | struct pid * fastcall find_pid(int nr) | |
1da177e4 LT |
251 | { |
252 | struct hlist_node *elem; | |
253 | struct pid *pid; | |
254 | ||
e56d0903 | 255 | hlist_for_each_entry_rcu(pid, elem, |
92476d7f | 256 | &pid_hash[pid_hashfn(nr)], pid_chain) { |
1da177e4 LT |
257 | if (pid->nr == nr) |
258 | return pid; | |
259 | } | |
260 | return NULL; | |
261 | } | |
bbf73147 | 262 | EXPORT_SYMBOL_GPL(find_pid); |
1da177e4 | 263 | |
e713d0da SB |
264 | /* |
265 | * attach_pid() must be called with the tasklist_lock write-held. | |
266 | */ | |
267 | int fastcall attach_pid(struct task_struct *task, enum pid_type type, | |
268 | struct pid *pid) | |
1da177e4 | 269 | { |
92476d7f | 270 | struct pid_link *link; |
92476d7f | 271 | |
92476d7f | 272 | link = &task->pids[type]; |
e713d0da | 273 | link->pid = pid; |
92476d7f | 274 | hlist_add_head_rcu(&link->node, &pid->tasks[type]); |
1da177e4 LT |
275 | |
276 | return 0; | |
277 | } | |
278 | ||
36c8b586 | 279 | void fastcall detach_pid(struct task_struct *task, enum pid_type type) |
1da177e4 | 280 | { |
92476d7f EB |
281 | struct pid_link *link; |
282 | struct pid *pid; | |
283 | int tmp; | |
1da177e4 | 284 | |
92476d7f EB |
285 | link = &task->pids[type]; |
286 | pid = link->pid; | |
1da177e4 | 287 | |
92476d7f EB |
288 | hlist_del_rcu(&link->node); |
289 | link->pid = NULL; | |
1da177e4 | 290 | |
92476d7f EB |
291 | for (tmp = PIDTYPE_MAX; --tmp >= 0; ) |
292 | if (!hlist_empty(&pid->tasks[tmp])) | |
293 | return; | |
1da177e4 | 294 | |
92476d7f | 295 | free_pid(pid); |
1da177e4 LT |
296 | } |
297 | ||
c18258c6 EB |
298 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ |
299 | void fastcall transfer_pid(struct task_struct *old, struct task_struct *new, | |
300 | enum pid_type type) | |
301 | { | |
302 | new->pids[type].pid = old->pids[type].pid; | |
303 | hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); | |
304 | old->pids[type].pid = NULL; | |
305 | } | |
306 | ||
92476d7f | 307 | struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) |
1da177e4 | 308 | { |
92476d7f EB |
309 | struct task_struct *result = NULL; |
310 | if (pid) { | |
311 | struct hlist_node *first; | |
312 | first = rcu_dereference(pid->tasks[type].first); | |
313 | if (first) | |
314 | result = hlist_entry(first, struct task_struct, pids[(type)].node); | |
315 | } | |
316 | return result; | |
317 | } | |
1da177e4 | 318 | |
92476d7f EB |
319 | /* |
320 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. | |
321 | */ | |
36c8b586 | 322 | struct task_struct *find_task_by_pid_type(int type, int nr) |
92476d7f EB |
323 | { |
324 | return pid_task(find_pid(nr), type); | |
325 | } | |
1da177e4 | 326 | |
92476d7f | 327 | EXPORT_SYMBOL(find_task_by_pid_type); |
1da177e4 | 328 | |
1a657f78 ON |
329 | struct pid *get_task_pid(struct task_struct *task, enum pid_type type) |
330 | { | |
331 | struct pid *pid; | |
332 | rcu_read_lock(); | |
333 | pid = get_pid(task->pids[type].pid); | |
334 | rcu_read_unlock(); | |
335 | return pid; | |
336 | } | |
337 | ||
92476d7f EB |
338 | struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type) |
339 | { | |
340 | struct task_struct *result; | |
341 | rcu_read_lock(); | |
342 | result = pid_task(pid, type); | |
343 | if (result) | |
344 | get_task_struct(result); | |
345 | rcu_read_unlock(); | |
346 | return result; | |
1da177e4 LT |
347 | } |
348 | ||
92476d7f | 349 | struct pid *find_get_pid(pid_t nr) |
1da177e4 LT |
350 | { |
351 | struct pid *pid; | |
352 | ||
92476d7f EB |
353 | rcu_read_lock(); |
354 | pid = get_pid(find_pid(nr)); | |
355 | rcu_read_unlock(); | |
1da177e4 | 356 | |
92476d7f | 357 | return pid; |
1da177e4 LT |
358 | } |
359 | ||
0804ef4b EB |
360 | /* |
361 | * Used by proc to find the first pid that is greater then or equal to nr. | |
362 | * | |
363 | * If there is a pid at nr this function is exactly the same as find_pid. | |
364 | */ | |
365 | struct pid *find_ge_pid(int nr) | |
366 | { | |
367 | struct pid *pid; | |
368 | ||
369 | do { | |
370 | pid = find_pid(nr); | |
371 | if (pid) | |
372 | break; | |
2894d650 | 373 | nr = next_pidmap(task_active_pid_ns(current), nr); |
0804ef4b EB |
374 | } while (nr > 0); |
375 | ||
376 | return pid; | |
377 | } | |
bbf73147 | 378 | EXPORT_SYMBOL_GPL(find_get_pid); |
0804ef4b | 379 | |
baf8f0f8 PE |
380 | struct pid_cache { |
381 | int nr_ids; | |
382 | char name[16]; | |
383 | struct kmem_cache *cachep; | |
384 | struct list_head list; | |
385 | }; | |
386 | ||
387 | static LIST_HEAD(pid_caches_lh); | |
388 | static DEFINE_MUTEX(pid_caches_mutex); | |
389 | ||
390 | /* | |
391 | * creates the kmem cache to allocate pids from. | |
392 | * @nr_ids: the number of numerical ids this pid will have to carry | |
393 | */ | |
394 | ||
395 | static struct kmem_cache *create_pid_cachep(int nr_ids) | |
396 | { | |
397 | struct pid_cache *pcache; | |
398 | struct kmem_cache *cachep; | |
399 | ||
400 | mutex_lock(&pid_caches_mutex); | |
401 | list_for_each_entry (pcache, &pid_caches_lh, list) | |
402 | if (pcache->nr_ids == nr_ids) | |
403 | goto out; | |
404 | ||
405 | pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL); | |
406 | if (pcache == NULL) | |
407 | goto err_alloc; | |
408 | ||
409 | snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids); | |
410 | cachep = kmem_cache_create(pcache->name, | |
411 | /* FIXME add numerical ids here */ | |
412 | sizeof(struct pid), 0, SLAB_HWCACHE_ALIGN, NULL); | |
413 | if (cachep == NULL) | |
414 | goto err_cachep; | |
415 | ||
416 | pcache->nr_ids = nr_ids; | |
417 | pcache->cachep = cachep; | |
418 | list_add(&pcache->list, &pid_caches_lh); | |
419 | out: | |
420 | mutex_unlock(&pid_caches_mutex); | |
421 | return pcache->cachep; | |
422 | ||
423 | err_cachep: | |
424 | kfree(pcache); | |
425 | err_alloc: | |
426 | mutex_unlock(&pid_caches_mutex); | |
427 | return NULL; | |
428 | } | |
429 | ||
213dd266 | 430 | struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns) |
9a575a92 | 431 | { |
e3222c4e | 432 | BUG_ON(!old_ns); |
9a575a92 | 433 | get_pid_ns(old_ns); |
e3222c4e | 434 | return old_ns; |
9a575a92 CLG |
435 | } |
436 | ||
437 | void free_pid_ns(struct kref *kref) | |
438 | { | |
439 | struct pid_namespace *ns; | |
440 | ||
441 | ns = container_of(kref, struct pid_namespace, kref); | |
442 | kfree(ns); | |
443 | } | |
444 | ||
1da177e4 LT |
445 | /* |
446 | * The pid hash table is scaled according to the amount of memory in the | |
447 | * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or | |
448 | * more. | |
449 | */ | |
450 | void __init pidhash_init(void) | |
451 | { | |
92476d7f | 452 | int i, pidhash_size; |
1da177e4 LT |
453 | unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT); |
454 | ||
455 | pidhash_shift = max(4, fls(megabytes * 4)); | |
456 | pidhash_shift = min(12, pidhash_shift); | |
457 | pidhash_size = 1 << pidhash_shift; | |
458 | ||
459 | printk("PID hash table entries: %d (order: %d, %Zd bytes)\n", | |
460 | pidhash_size, pidhash_shift, | |
92476d7f EB |
461 | pidhash_size * sizeof(struct hlist_head)); |
462 | ||
463 | pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); | |
464 | if (!pid_hash) | |
465 | panic("Could not alloc pidhash!\n"); | |
466 | for (i = 0; i < pidhash_size; i++) | |
467 | INIT_HLIST_HEAD(&pid_hash[i]); | |
1da177e4 LT |
468 | } |
469 | ||
470 | void __init pidmap_init(void) | |
471 | { | |
61a58c6c | 472 | init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
73b9ebfe | 473 | /* Reserve PID 0. We never call free_pidmap(0) */ |
61a58c6c SB |
474 | set_bit(0, init_pid_ns.pidmap[0].page); |
475 | atomic_dec(&init_pid_ns.pidmap[0].nr_free); | |
92476d7f | 476 | |
baf8f0f8 PE |
477 | init_pid_ns.pid_cachep = create_pid_cachep(1); |
478 | if (init_pid_ns.pid_cachep == NULL) | |
479 | panic("Can't create pid_1 cachep\n"); | |
1da177e4 | 480 | } |