]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Generic pidhash and scalable, time-bounded PID allocator | |
3 | * | |
4 | * (C) 2002-2003 William Irwin, IBM | |
5 | * (C) 2004 William Irwin, Oracle | |
6 | * (C) 2002-2004 Ingo Molnar, Red Hat | |
7 | * | |
8 | * pid-structures are backing objects for tasks sharing a given ID to chain | |
9 | * against. There is very little to them aside from hashing them and | |
10 | * parking tasks using given ID's on a list. | |
11 | * | |
12 | * The hash is always changed with the tasklist_lock write-acquired, | |
13 | * and the hash is only accessed with the tasklist_lock at least | |
14 | * read-acquired, so there's no additional SMP locking needed here. | |
15 | * | |
16 | * We have a list of bitmap pages, which bitmaps represent the PID space. | |
17 | * Allocating and freeing PIDs is completely lockless. The worst-case | |
18 | * allocation scenario when all but one out of 1 million PIDs possible are | |
19 | * allocated already: the scanning of 32 list entries and at most PAGE_SIZE | |
20 | * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). | |
21 | * | |
22 | * Pid namespaces: | |
23 | * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. | |
24 | * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM | |
25 | * Many thanks to Oleg Nesterov for comments and help | |
26 | * | |
27 | */ | |
28 | ||
29 | #include <linux/mm.h> | |
30 | #include <linux/module.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/bootmem.h> | |
34 | #include <linux/hash.h> | |
35 | #include <linux/pid_namespace.h> | |
36 | #include <linux/init_task.h> | |
37 | #include <linux/syscalls.h> | |
38 | ||
39 | #define pid_hashfn(nr, ns) \ | |
40 | hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) | |
41 | static struct hlist_head *pid_hash; | |
42 | static int pidhash_shift; | |
43 | struct pid init_struct_pid = INIT_STRUCT_PID; | |
44 | ||
45 | int pid_max = PID_MAX_DEFAULT; | |
46 | ||
47 | #define RESERVED_PIDS 300 | |
48 | ||
49 | int pid_max_min = RESERVED_PIDS + 1; | |
50 | int pid_max_max = PID_MAX_LIMIT; | |
51 | ||
52 | #define BITS_PER_PAGE (PAGE_SIZE*8) | |
53 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) | |
54 | ||
55 | static inline int mk_pid(struct pid_namespace *pid_ns, | |
56 | struct pidmap *map, int off) | |
57 | { | |
58 | return (map - pid_ns->pidmap)*BITS_PER_PAGE + off; | |
59 | } | |
60 | ||
61 | #define find_next_offset(map, off) \ | |
62 | find_next_zero_bit((map)->page, BITS_PER_PAGE, off) | |
63 | ||
64 | /* | |
65 | * PID-map pages start out as NULL, they get allocated upon | |
66 | * first use and are never deallocated. This way a low pid_max | |
67 | * value does not cause lots of bitmaps to be allocated, but | |
68 | * the scheme scales to up to 4 million PIDs, runtime. | |
69 | */ | |
70 | struct pid_namespace init_pid_ns = { | |
71 | .kref = { | |
72 | .refcount = ATOMIC_INIT(2), | |
73 | }, | |
74 | .pidmap = { | |
75 | [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } | |
76 | }, | |
77 | .last_pid = 0, | |
78 | .level = 0, | |
79 | .child_reaper = &init_task, | |
80 | }; | |
81 | EXPORT_SYMBOL_GPL(init_pid_ns); | |
82 | ||
83 | int is_container_init(struct task_struct *tsk) | |
84 | { | |
85 | int ret = 0; | |
86 | struct pid *pid; | |
87 | ||
88 | rcu_read_lock(); | |
89 | pid = task_pid(tsk); | |
90 | if (pid != NULL && pid->numbers[pid->level].nr == 1) | |
91 | ret = 1; | |
92 | rcu_read_unlock(); | |
93 | ||
94 | return ret; | |
95 | } | |
96 | EXPORT_SYMBOL(is_container_init); | |
97 | ||
98 | /* | |
99 | * Note: disable interrupts while the pidmap_lock is held as an | |
100 | * interrupt might come in and do read_lock(&tasklist_lock). | |
101 | * | |
102 | * If we don't disable interrupts there is a nasty deadlock between | |
103 | * detach_pid()->free_pid() and another cpu that does | |
104 | * spin_lock(&pidmap_lock) followed by an interrupt routine that does | |
105 | * read_lock(&tasklist_lock); | |
106 | * | |
107 | * After we clean up the tasklist_lock and know there are no | |
108 | * irq handlers that take it we can leave the interrupts enabled. | |
109 | * For now it is easier to be safe than to prove it can't happen. | |
110 | */ | |
111 | ||
112 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); | |
113 | ||
114 | static void free_pidmap(struct upid *upid) | |
115 | { | |
116 | int nr = upid->nr; | |
117 | struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE; | |
118 | int offset = nr & BITS_PER_PAGE_MASK; | |
119 | ||
120 | clear_bit(offset, map->page); | |
121 | atomic_inc(&map->nr_free); | |
122 | } | |
123 | ||
124 | static int alloc_pidmap(struct pid_namespace *pid_ns) | |
125 | { | |
126 | int i, offset, max_scan, pid, last = pid_ns->last_pid; | |
127 | struct pidmap *map; | |
128 | ||
129 | pid = last + 1; | |
130 | if (pid >= pid_max) | |
131 | pid = RESERVED_PIDS; | |
132 | offset = pid & BITS_PER_PAGE_MASK; | |
133 | map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; | |
134 | max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; | |
135 | for (i = 0; i <= max_scan; ++i) { | |
136 | if (unlikely(!map->page)) { | |
137 | void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); | |
138 | /* | |
139 | * Free the page if someone raced with us | |
140 | * installing it: | |
141 | */ | |
142 | spin_lock_irq(&pidmap_lock); | |
143 | if (map->page) | |
144 | kfree(page); | |
145 | else | |
146 | map->page = page; | |
147 | spin_unlock_irq(&pidmap_lock); | |
148 | if (unlikely(!map->page)) | |
149 | break; | |
150 | } | |
151 | if (likely(atomic_read(&map->nr_free))) { | |
152 | do { | |
153 | if (!test_and_set_bit(offset, map->page)) { | |
154 | atomic_dec(&map->nr_free); | |
155 | pid_ns->last_pid = pid; | |
156 | return pid; | |
157 | } | |
158 | offset = find_next_offset(map, offset); | |
159 | pid = mk_pid(pid_ns, map, offset); | |
160 | /* | |
161 | * find_next_offset() found a bit, the pid from it | |
162 | * is in-bounds, and if we fell back to the last | |
163 | * bitmap block and the final block was the same | |
164 | * as the starting point, pid is before last_pid. | |
165 | */ | |
166 | } while (offset < BITS_PER_PAGE && pid < pid_max && | |
167 | (i != max_scan || pid < last || | |
168 | !((last+1) & BITS_PER_PAGE_MASK))); | |
169 | } | |
170 | if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { | |
171 | ++map; | |
172 | offset = 0; | |
173 | } else { | |
174 | map = &pid_ns->pidmap[0]; | |
175 | offset = RESERVED_PIDS; | |
176 | if (unlikely(last == offset)) | |
177 | break; | |
178 | } | |
179 | pid = mk_pid(pid_ns, map, offset); | |
180 | } | |
181 | return -1; | |
182 | } | |
183 | ||
184 | int next_pidmap(struct pid_namespace *pid_ns, int last) | |
185 | { | |
186 | int offset; | |
187 | struct pidmap *map, *end; | |
188 | ||
189 | offset = (last + 1) & BITS_PER_PAGE_MASK; | |
190 | map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; | |
191 | end = &pid_ns->pidmap[PIDMAP_ENTRIES]; | |
192 | for (; map < end; map++, offset = 0) { | |
193 | if (unlikely(!map->page)) | |
194 | continue; | |
195 | offset = find_next_bit((map)->page, BITS_PER_PAGE, offset); | |
196 | if (offset < BITS_PER_PAGE) | |
197 | return mk_pid(pid_ns, map, offset); | |
198 | } | |
199 | return -1; | |
200 | } | |
201 | ||
202 | void put_pid(struct pid *pid) | |
203 | { | |
204 | struct pid_namespace *ns; | |
205 | ||
206 | if (!pid) | |
207 | return; | |
208 | ||
209 | ns = pid->numbers[pid->level].ns; | |
210 | if ((atomic_read(&pid->count) == 1) || | |
211 | atomic_dec_and_test(&pid->count)) { | |
212 | kmem_cache_free(ns->pid_cachep, pid); | |
213 | put_pid_ns(ns); | |
214 | } | |
215 | } | |
216 | EXPORT_SYMBOL_GPL(put_pid); | |
217 | ||
218 | static void delayed_put_pid(struct rcu_head *rhp) | |
219 | { | |
220 | struct pid *pid = container_of(rhp, struct pid, rcu); | |
221 | put_pid(pid); | |
222 | } | |
223 | ||
224 | void free_pid(struct pid *pid) | |
225 | { | |
226 | /* We can be called with write_lock_irq(&tasklist_lock) held */ | |
227 | int i; | |
228 | unsigned long flags; | |
229 | ||
230 | spin_lock_irqsave(&pidmap_lock, flags); | |
231 | for (i = 0; i <= pid->level; i++) | |
232 | hlist_del_rcu(&pid->numbers[i].pid_chain); | |
233 | spin_unlock_irqrestore(&pidmap_lock, flags); | |
234 | ||
235 | for (i = 0; i <= pid->level; i++) | |
236 | free_pidmap(pid->numbers + i); | |
237 | ||
238 | call_rcu(&pid->rcu, delayed_put_pid); | |
239 | } | |
240 | ||
241 | struct pid *alloc_pid(struct pid_namespace *ns) | |
242 | { | |
243 | struct pid *pid; | |
244 | enum pid_type type; | |
245 | int i, nr; | |
246 | struct pid_namespace *tmp; | |
247 | struct upid *upid; | |
248 | ||
249 | pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); | |
250 | if (!pid) | |
251 | goto out; | |
252 | ||
253 | tmp = ns; | |
254 | for (i = ns->level; i >= 0; i--) { | |
255 | nr = alloc_pidmap(tmp); | |
256 | if (nr < 0) | |
257 | goto out_free; | |
258 | ||
259 | pid->numbers[i].nr = nr; | |
260 | pid->numbers[i].ns = tmp; | |
261 | tmp = tmp->parent; | |
262 | } | |
263 | ||
264 | get_pid_ns(ns); | |
265 | pid->level = ns->level; | |
266 | atomic_set(&pid->count, 1); | |
267 | for (type = 0; type < PIDTYPE_MAX; ++type) | |
268 | INIT_HLIST_HEAD(&pid->tasks[type]); | |
269 | ||
270 | spin_lock_irq(&pidmap_lock); | |
271 | for (i = ns->level; i >= 0; i--) { | |
272 | upid = &pid->numbers[i]; | |
273 | hlist_add_head_rcu(&upid->pid_chain, | |
274 | &pid_hash[pid_hashfn(upid->nr, upid->ns)]); | |
275 | } | |
276 | spin_unlock_irq(&pidmap_lock); | |
277 | ||
278 | out: | |
279 | return pid; | |
280 | ||
281 | out_free: | |
282 | while (++i <= ns->level) | |
283 | free_pidmap(pid->numbers + i); | |
284 | ||
285 | kmem_cache_free(ns->pid_cachep, pid); | |
286 | pid = NULL; | |
287 | goto out; | |
288 | } | |
289 | ||
290 | struct pid *find_pid_ns(int nr, struct pid_namespace *ns) | |
291 | { | |
292 | struct hlist_node *elem; | |
293 | struct upid *pnr; | |
294 | ||
295 | hlist_for_each_entry_rcu(pnr, elem, | |
296 | &pid_hash[pid_hashfn(nr, ns)], pid_chain) | |
297 | if (pnr->nr == nr && pnr->ns == ns) | |
298 | return container_of(pnr, struct pid, | |
299 | numbers[ns->level]); | |
300 | ||
301 | return NULL; | |
302 | } | |
303 | EXPORT_SYMBOL_GPL(find_pid_ns); | |
304 | ||
305 | struct pid *find_vpid(int nr) | |
306 | { | |
307 | return find_pid_ns(nr, current->nsproxy->pid_ns); | |
308 | } | |
309 | EXPORT_SYMBOL_GPL(find_vpid); | |
310 | ||
311 | struct pid *find_pid(int nr) | |
312 | { | |
313 | return find_pid_ns(nr, &init_pid_ns); | |
314 | } | |
315 | EXPORT_SYMBOL_GPL(find_pid); | |
316 | ||
317 | /* | |
318 | * attach_pid() must be called with the tasklist_lock write-held. | |
319 | */ | |
320 | int attach_pid(struct task_struct *task, enum pid_type type, | |
321 | struct pid *pid) | |
322 | { | |
323 | struct pid_link *link; | |
324 | ||
325 | link = &task->pids[type]; | |
326 | link->pid = pid; | |
327 | hlist_add_head_rcu(&link->node, &pid->tasks[type]); | |
328 | ||
329 | return 0; | |
330 | } | |
331 | ||
332 | void detach_pid(struct task_struct *task, enum pid_type type) | |
333 | { | |
334 | struct pid_link *link; | |
335 | struct pid *pid; | |
336 | int tmp; | |
337 | ||
338 | link = &task->pids[type]; | |
339 | pid = link->pid; | |
340 | ||
341 | hlist_del_rcu(&link->node); | |
342 | link->pid = NULL; | |
343 | ||
344 | for (tmp = PIDTYPE_MAX; --tmp >= 0; ) | |
345 | if (!hlist_empty(&pid->tasks[tmp])) | |
346 | return; | |
347 | ||
348 | free_pid(pid); | |
349 | } | |
350 | ||
351 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ | |
352 | void transfer_pid(struct task_struct *old, struct task_struct *new, | |
353 | enum pid_type type) | |
354 | { | |
355 | new->pids[type].pid = old->pids[type].pid; | |
356 | hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); | |
357 | } | |
358 | ||
359 | struct task_struct *pid_task(struct pid *pid, enum pid_type type) | |
360 | { | |
361 | struct task_struct *result = NULL; | |
362 | if (pid) { | |
363 | struct hlist_node *first; | |
364 | first = rcu_dereference(pid->tasks[type].first); | |
365 | if (first) | |
366 | result = hlist_entry(first, struct task_struct, pids[(type)].node); | |
367 | } | |
368 | return result; | |
369 | } | |
370 | EXPORT_SYMBOL(pid_task); | |
371 | ||
372 | /* | |
373 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. | |
374 | */ | |
375 | struct task_struct *find_task_by_pid_type_ns(int type, int nr, | |
376 | struct pid_namespace *ns) | |
377 | { | |
378 | return pid_task(find_pid_ns(nr, ns), type); | |
379 | } | |
380 | ||
381 | EXPORT_SYMBOL(find_task_by_pid_type_ns); | |
382 | ||
383 | struct task_struct *find_task_by_vpid(pid_t vnr) | |
384 | { | |
385 | return find_task_by_pid_type_ns(PIDTYPE_PID, vnr, | |
386 | current->nsproxy->pid_ns); | |
387 | } | |
388 | EXPORT_SYMBOL(find_task_by_vpid); | |
389 | ||
390 | struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) | |
391 | { | |
392 | return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns); | |
393 | } | |
394 | EXPORT_SYMBOL(find_task_by_pid_ns); | |
395 | ||
396 | struct pid *get_task_pid(struct task_struct *task, enum pid_type type) | |
397 | { | |
398 | struct pid *pid; | |
399 | rcu_read_lock(); | |
400 | pid = get_pid(task->pids[type].pid); | |
401 | rcu_read_unlock(); | |
402 | return pid; | |
403 | } | |
404 | ||
405 | struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) | |
406 | { | |
407 | struct task_struct *result; | |
408 | rcu_read_lock(); | |
409 | result = pid_task(pid, type); | |
410 | if (result) | |
411 | get_task_struct(result); | |
412 | rcu_read_unlock(); | |
413 | return result; | |
414 | } | |
415 | ||
416 | struct pid *find_get_pid(pid_t nr) | |
417 | { | |
418 | struct pid *pid; | |
419 | ||
420 | rcu_read_lock(); | |
421 | pid = get_pid(find_vpid(nr)); | |
422 | rcu_read_unlock(); | |
423 | ||
424 | return pid; | |
425 | } | |
426 | ||
427 | pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) | |
428 | { | |
429 | struct upid *upid; | |
430 | pid_t nr = 0; | |
431 | ||
432 | if (pid && ns->level <= pid->level) { | |
433 | upid = &pid->numbers[ns->level]; | |
434 | if (upid->ns == ns) | |
435 | nr = upid->nr; | |
436 | } | |
437 | return nr; | |
438 | } | |
439 | ||
440 | pid_t pid_vnr(struct pid *pid) | |
441 | { | |
442 | return pid_nr_ns(pid, current->nsproxy->pid_ns); | |
443 | } | |
444 | EXPORT_SYMBOL_GPL(pid_vnr); | |
445 | ||
446 | pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) | |
447 | { | |
448 | return pid_nr_ns(task_pid(tsk), ns); | |
449 | } | |
450 | EXPORT_SYMBOL(task_pid_nr_ns); | |
451 | ||
452 | pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) | |
453 | { | |
454 | return pid_nr_ns(task_tgid(tsk), ns); | |
455 | } | |
456 | EXPORT_SYMBOL(task_tgid_nr_ns); | |
457 | ||
458 | pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) | |
459 | { | |
460 | return pid_nr_ns(task_pgrp(tsk), ns); | |
461 | } | |
462 | EXPORT_SYMBOL(task_pgrp_nr_ns); | |
463 | ||
464 | pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) | |
465 | { | |
466 | return pid_nr_ns(task_session(tsk), ns); | |
467 | } | |
468 | EXPORT_SYMBOL(task_session_nr_ns); | |
469 | ||
470 | /* | |
471 | * Used by proc to find the first pid that is greater then or equal to nr. | |
472 | * | |
473 | * If there is a pid at nr this function is exactly the same as find_pid. | |
474 | */ | |
475 | struct pid *find_ge_pid(int nr, struct pid_namespace *ns) | |
476 | { | |
477 | struct pid *pid; | |
478 | ||
479 | do { | |
480 | pid = find_pid_ns(nr, ns); | |
481 | if (pid) | |
482 | break; | |
483 | nr = next_pidmap(ns, nr); | |
484 | } while (nr > 0); | |
485 | ||
486 | return pid; | |
487 | } | |
488 | EXPORT_SYMBOL_GPL(find_get_pid); | |
489 | ||
490 | /* | |
491 | * The pid hash table is scaled according to the amount of memory in the | |
492 | * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or | |
493 | * more. | |
494 | */ | |
495 | void __init pidhash_init(void) | |
496 | { | |
497 | int i, pidhash_size; | |
498 | unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT); | |
499 | ||
500 | pidhash_shift = max(4, fls(megabytes * 4)); | |
501 | pidhash_shift = min(12, pidhash_shift); | |
502 | pidhash_size = 1 << pidhash_shift; | |
503 | ||
504 | printk("PID hash table entries: %d (order: %d, %Zd bytes)\n", | |
505 | pidhash_size, pidhash_shift, | |
506 | pidhash_size * sizeof(struct hlist_head)); | |
507 | ||
508 | pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); | |
509 | if (!pid_hash) | |
510 | panic("Could not alloc pidhash!\n"); | |
511 | for (i = 0; i < pidhash_size; i++) | |
512 | INIT_HLIST_HEAD(&pid_hash[i]); | |
513 | } | |
514 | ||
515 | void __init pidmap_init(void) | |
516 | { | |
517 | init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); | |
518 | /* Reserve PID 0. We never call free_pidmap(0) */ | |
519 | set_bit(0, init_pid_ns.pidmap[0].page); | |
520 | atomic_dec(&init_pid_ns.pidmap[0].nr_free); | |
521 | ||
522 | init_pid_ns.pid_cachep = KMEM_CACHE(pid, | |
523 | SLAB_HWCACHE_ALIGN | SLAB_PANIC); | |
524 | } |