]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/pid.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[mirror_ubuntu-bionic-kernel.git] / kernel / pid.c
1 /*
2 * Generic pidhash and scalable, time-bounded PID allocator
3 *
4 * (C) 2002-2003 Nadia Yvette Chambers, IBM
5 * (C) 2004 Nadia Yvette Chambers, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
7 *
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
11 *
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
15 *
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21 *
22 * Pid namespaces:
23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 * Many thanks to Oleg Nesterov for comments and help
26 *
27 */
28
29 #include <linux/mm.h>
30 #include <linux/export.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/rculist.h>
34 #include <linux/bootmem.h>
35 #include <linux/hash.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/proc_ns.h>
40 #include <linux/proc_fs.h>
41 #include <linux/sched/task.h>
42 #include <linux/idr.h>
43
44 struct pid init_struct_pid = INIT_STRUCT_PID;
45
46 int pid_max = PID_MAX_DEFAULT;
47
48 #define RESERVED_PIDS 300
49
50 int pid_max_min = RESERVED_PIDS + 1;
51 int pid_max_max = PID_MAX_LIMIT;
52
53 /*
54 * PID-map pages start out as NULL, they get allocated upon
55 * first use and are never deallocated. This way a low pid_max
56 * value does not cause lots of bitmaps to be allocated, but
57 * the scheme scales to up to 4 million PIDs, runtime.
58 */
59 struct pid_namespace init_pid_ns = {
60 .kref = KREF_INIT(2),
61 .idr = IDR_INIT,
62 .pid_allocated = PIDNS_ADDING,
63 .level = 0,
64 .child_reaper = &init_task,
65 .user_ns = &init_user_ns,
66 .ns.inum = PROC_PID_INIT_INO,
67 #ifdef CONFIG_PID_NS
68 .ns.ops = &pidns_operations,
69 #endif
70 };
71 EXPORT_SYMBOL_GPL(init_pid_ns);
72
73 /*
74 * Note: disable interrupts while the pidmap_lock is held as an
75 * interrupt might come in and do read_lock(&tasklist_lock).
76 *
77 * If we don't disable interrupts there is a nasty deadlock between
78 * detach_pid()->free_pid() and another cpu that does
79 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
80 * read_lock(&tasklist_lock);
81 *
82 * After we clean up the tasklist_lock and know there are no
83 * irq handlers that take it we can leave the interrupts enabled.
84 * For now it is easier to be safe than to prove it can't happen.
85 */
86
87 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
88
89 void put_pid(struct pid *pid)
90 {
91 struct pid_namespace *ns;
92
93 if (!pid)
94 return;
95
96 ns = pid->numbers[pid->level].ns;
97 if ((atomic_read(&pid->count) == 1) ||
98 atomic_dec_and_test(&pid->count)) {
99 kmem_cache_free(ns->pid_cachep, pid);
100 put_pid_ns(ns);
101 }
102 }
103 EXPORT_SYMBOL_GPL(put_pid);
104
105 static void delayed_put_pid(struct rcu_head *rhp)
106 {
107 struct pid *pid = container_of(rhp, struct pid, rcu);
108 put_pid(pid);
109 }
110
111 void free_pid(struct pid *pid)
112 {
113 /* We can be called with write_lock_irq(&tasklist_lock) held */
114 int i;
115 unsigned long flags;
116
117 spin_lock_irqsave(&pidmap_lock, flags);
118 for (i = 0; i <= pid->level; i++) {
119 struct upid *upid = pid->numbers + i;
120 struct pid_namespace *ns = upid->ns;
121 switch (--ns->pid_allocated) {
122 case 2:
123 case 1:
124 /* When all that is left in the pid namespace
125 * is the reaper wake up the reaper. The reaper
126 * may be sleeping in zap_pid_ns_processes().
127 */
128 wake_up_process(ns->child_reaper);
129 break;
130 case PIDNS_ADDING:
131 /* Handle a fork failure of the first process */
132 WARN_ON(ns->child_reaper);
133 ns->pid_allocated = 0;
134 /* fall through */
135 case 0:
136 schedule_work(&ns->proc_work);
137 break;
138 }
139
140 idr_remove(&ns->idr, upid->nr);
141 }
142 spin_unlock_irqrestore(&pidmap_lock, flags);
143
144 call_rcu(&pid->rcu, delayed_put_pid);
145 }
146
147 struct pid *alloc_pid(struct pid_namespace *ns)
148 {
149 struct pid *pid;
150 enum pid_type type;
151 int i, nr;
152 struct pid_namespace *tmp;
153 struct upid *upid;
154 int retval = -ENOMEM;
155
156 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
157 if (!pid)
158 return ERR_PTR(retval);
159
160 tmp = ns;
161 pid->level = ns->level;
162
163 for (i = ns->level; i >= 0; i--) {
164 int pid_min = 1;
165
166 idr_preload(GFP_KERNEL);
167 spin_lock_irq(&pidmap_lock);
168
169 /*
170 * init really needs pid 1, but after reaching the maximum
171 * wrap back to RESERVED_PIDS
172 */
173 if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
174 pid_min = RESERVED_PIDS;
175
176 /*
177 * Store a null pointer so find_pid_ns does not find
178 * a partially initialized PID (see below).
179 */
180 nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
181 pid_max, GFP_ATOMIC);
182 spin_unlock_irq(&pidmap_lock);
183 idr_preload_end();
184
185 if (nr < 0) {
186 retval = nr;
187 goto out_free;
188 }
189
190 pid->numbers[i].nr = nr;
191 pid->numbers[i].ns = tmp;
192 tmp = tmp->parent;
193 }
194
195 if (unlikely(is_child_reaper(pid))) {
196 if (pid_ns_prepare_proc(ns)) {
197 disable_pid_allocation(ns);
198 goto out_free;
199 }
200 }
201
202 get_pid_ns(ns);
203 atomic_set(&pid->count, 1);
204 for (type = 0; type < PIDTYPE_MAX; ++type)
205 INIT_HLIST_HEAD(&pid->tasks[type]);
206
207 upid = pid->numbers + ns->level;
208 spin_lock_irq(&pidmap_lock);
209 if (!(ns->pid_allocated & PIDNS_ADDING))
210 goto out_unlock;
211 for ( ; upid >= pid->numbers; --upid) {
212 /* Make the PID visible to find_pid_ns. */
213 idr_replace(&upid->ns->idr, pid, upid->nr);
214 upid->ns->pid_allocated++;
215 }
216 spin_unlock_irq(&pidmap_lock);
217
218 return pid;
219
220 out_unlock:
221 spin_unlock_irq(&pidmap_lock);
222 put_pid_ns(ns);
223
224 out_free:
225 spin_lock_irq(&pidmap_lock);
226 while (++i <= ns->level)
227 idr_remove(&ns->idr, (pid->numbers + i)->nr);
228
229 spin_unlock_irq(&pidmap_lock);
230
231 kmem_cache_free(ns->pid_cachep, pid);
232 return ERR_PTR(retval);
233 }
234
235 void disable_pid_allocation(struct pid_namespace *ns)
236 {
237 spin_lock_irq(&pidmap_lock);
238 ns->pid_allocated &= ~PIDNS_ADDING;
239 spin_unlock_irq(&pidmap_lock);
240 }
241
242 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
243 {
244 return idr_find(&ns->idr, nr);
245 }
246 EXPORT_SYMBOL_GPL(find_pid_ns);
247
248 struct pid *find_vpid(int nr)
249 {
250 return find_pid_ns(nr, task_active_pid_ns(current));
251 }
252 EXPORT_SYMBOL_GPL(find_vpid);
253
254 /*
255 * attach_pid() must be called with the tasklist_lock write-held.
256 */
257 void attach_pid(struct task_struct *task, enum pid_type type)
258 {
259 struct pid_link *link = &task->pids[type];
260 hlist_add_head_rcu(&link->node, &link->pid->tasks[type]);
261 }
262
263 static void __change_pid(struct task_struct *task, enum pid_type type,
264 struct pid *new)
265 {
266 struct pid_link *link;
267 struct pid *pid;
268 int tmp;
269
270 link = &task->pids[type];
271 pid = link->pid;
272
273 hlist_del_rcu(&link->node);
274 link->pid = new;
275
276 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
277 if (!hlist_empty(&pid->tasks[tmp]))
278 return;
279
280 free_pid(pid);
281 }
282
283 void detach_pid(struct task_struct *task, enum pid_type type)
284 {
285 __change_pid(task, type, NULL);
286 }
287
288 void change_pid(struct task_struct *task, enum pid_type type,
289 struct pid *pid)
290 {
291 __change_pid(task, type, pid);
292 attach_pid(task, type);
293 }
294
295 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
296 void transfer_pid(struct task_struct *old, struct task_struct *new,
297 enum pid_type type)
298 {
299 new->pids[type].pid = old->pids[type].pid;
300 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
301 }
302
303 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
304 {
305 struct task_struct *result = NULL;
306 if (pid) {
307 struct hlist_node *first;
308 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
309 lockdep_tasklist_lock_is_held());
310 if (first)
311 result = hlist_entry(first, struct task_struct, pids[(type)].node);
312 }
313 return result;
314 }
315 EXPORT_SYMBOL(pid_task);
316
317 /*
318 * Must be called under rcu_read_lock().
319 */
320 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
321 {
322 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
323 "find_task_by_pid_ns() needs rcu_read_lock() protection");
324 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
325 }
326
327 struct task_struct *find_task_by_vpid(pid_t vnr)
328 {
329 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
330 }
331
332 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
333 {
334 struct pid *pid;
335 rcu_read_lock();
336 if (type != PIDTYPE_PID)
337 task = task->group_leader;
338 pid = get_pid(rcu_dereference(task->pids[type].pid));
339 rcu_read_unlock();
340 return pid;
341 }
342 EXPORT_SYMBOL_GPL(get_task_pid);
343
344 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
345 {
346 struct task_struct *result;
347 rcu_read_lock();
348 result = pid_task(pid, type);
349 if (result)
350 get_task_struct(result);
351 rcu_read_unlock();
352 return result;
353 }
354 EXPORT_SYMBOL_GPL(get_pid_task);
355
356 struct pid *find_get_pid(pid_t nr)
357 {
358 struct pid *pid;
359
360 rcu_read_lock();
361 pid = get_pid(find_vpid(nr));
362 rcu_read_unlock();
363
364 return pid;
365 }
366 EXPORT_SYMBOL_GPL(find_get_pid);
367
368 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
369 {
370 struct upid *upid;
371 pid_t nr = 0;
372
373 if (pid && ns->level <= pid->level) {
374 upid = &pid->numbers[ns->level];
375 if (upid->ns == ns)
376 nr = upid->nr;
377 }
378 return nr;
379 }
380 EXPORT_SYMBOL_GPL(pid_nr_ns);
381
382 pid_t pid_vnr(struct pid *pid)
383 {
384 return pid_nr_ns(pid, task_active_pid_ns(current));
385 }
386 EXPORT_SYMBOL_GPL(pid_vnr);
387
388 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
389 struct pid_namespace *ns)
390 {
391 pid_t nr = 0;
392
393 rcu_read_lock();
394 if (!ns)
395 ns = task_active_pid_ns(current);
396 if (likely(pid_alive(task))) {
397 if (type != PIDTYPE_PID) {
398 if (type == __PIDTYPE_TGID)
399 type = PIDTYPE_PID;
400
401 task = task->group_leader;
402 }
403 nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
404 }
405 rcu_read_unlock();
406
407 return nr;
408 }
409 EXPORT_SYMBOL(__task_pid_nr_ns);
410
411 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
412 {
413 return ns_of_pid(task_pid(tsk));
414 }
415 EXPORT_SYMBOL_GPL(task_active_pid_ns);
416
417 /*
418 * Used by proc to find the first pid that is greater than or equal to nr.
419 *
420 * If there is a pid at nr this function is exactly the same as find_pid_ns.
421 */
422 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
423 {
424 return idr_get_next(&ns->idr, &nr);
425 }
426
427 void __init pid_idr_init(void)
428 {
429 /* Verify no one has done anything silly: */
430 BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
431
432 /* bump default and minimum pid_max based on number of cpus */
433 pid_max = min(pid_max_max, max_t(int, pid_max,
434 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
435 pid_max_min = max_t(int, pid_max_min,
436 PIDS_PER_CPU_MIN * num_possible_cpus());
437 pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
438
439 idr_init(&init_pid_ns.idr);
440
441 init_pid_ns.pid_cachep = KMEM_CACHE(pid,
442 SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
443 }