]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Pid namespaces | |
3 | * | |
4 | * Authors: | |
5 | * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. | |
6 | * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM | |
7 | * Many thanks to Oleg Nesterov for comments and help | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/pid.h> | |
12 | #include <linux/pid_namespace.h> | |
13 | #include <linux/user_namespace.h> | |
14 | #include <linux/syscalls.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/acct.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/proc_ns.h> | |
19 | #include <linux/reboot.h> | |
20 | #include <linux/export.h> | |
21 | ||
22 | struct pid_cache { | |
23 | int nr_ids; | |
24 | char name[16]; | |
25 | struct kmem_cache *cachep; | |
26 | struct list_head list; | |
27 | }; | |
28 | ||
29 | static LIST_HEAD(pid_caches_lh); | |
30 | static DEFINE_MUTEX(pid_caches_mutex); | |
31 | static struct kmem_cache *pid_ns_cachep; | |
32 | ||
33 | /* | |
34 | * creates the kmem cache to allocate pids from. | |
35 | * @nr_ids: the number of numerical ids this pid will have to carry | |
36 | */ | |
37 | ||
38 | static struct kmem_cache *create_pid_cachep(int nr_ids) | |
39 | { | |
40 | struct pid_cache *pcache; | |
41 | struct kmem_cache *cachep; | |
42 | ||
43 | mutex_lock(&pid_caches_mutex); | |
44 | list_for_each_entry(pcache, &pid_caches_lh, list) | |
45 | if (pcache->nr_ids == nr_ids) | |
46 | goto out; | |
47 | ||
48 | pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL); | |
49 | if (pcache == NULL) | |
50 | goto err_alloc; | |
51 | ||
52 | snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids); | |
53 | cachep = kmem_cache_create(pcache->name, | |
54 | sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid), | |
55 | 0, SLAB_HWCACHE_ALIGN, NULL); | |
56 | if (cachep == NULL) | |
57 | goto err_cachep; | |
58 | ||
59 | pcache->nr_ids = nr_ids; | |
60 | pcache->cachep = cachep; | |
61 | list_add(&pcache->list, &pid_caches_lh); | |
62 | out: | |
63 | mutex_unlock(&pid_caches_mutex); | |
64 | return pcache->cachep; | |
65 | ||
66 | err_cachep: | |
67 | kfree(pcache); | |
68 | err_alloc: | |
69 | mutex_unlock(&pid_caches_mutex); | |
70 | return NULL; | |
71 | } | |
72 | ||
73 | static void proc_cleanup_work(struct work_struct *work) | |
74 | { | |
75 | struct pid_namespace *ns = container_of(work, struct pid_namespace, proc_work); | |
76 | pid_ns_release_proc(ns); | |
77 | } | |
78 | ||
79 | /* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */ | |
80 | #define MAX_PID_NS_LEVEL 32 | |
81 | ||
82 | static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns, | |
83 | struct pid_namespace *parent_pid_ns) | |
84 | { | |
85 | struct pid_namespace *ns; | |
86 | unsigned int level = parent_pid_ns->level + 1; | |
87 | int i; | |
88 | int err; | |
89 | ||
90 | if (level > MAX_PID_NS_LEVEL) { | |
91 | err = -EINVAL; | |
92 | goto out; | |
93 | } | |
94 | ||
95 | err = -ENOMEM; | |
96 | ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); | |
97 | if (ns == NULL) | |
98 | goto out; | |
99 | ||
100 | ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); | |
101 | if (!ns->pidmap[0].page) | |
102 | goto out_free; | |
103 | ||
104 | ns->pid_cachep = create_pid_cachep(level + 1); | |
105 | if (ns->pid_cachep == NULL) | |
106 | goto out_free_map; | |
107 | ||
108 | err = ns_alloc_inum(&ns->ns); | |
109 | if (err) | |
110 | goto out_free_map; | |
111 | ns->ns.ops = &pidns_operations; | |
112 | ||
113 | kref_init(&ns->kref); | |
114 | ns->level = level; | |
115 | ns->parent = get_pid_ns(parent_pid_ns); | |
116 | ns->user_ns = get_user_ns(user_ns); | |
117 | ns->nr_hashed = PIDNS_HASH_ADDING; | |
118 | INIT_WORK(&ns->proc_work, proc_cleanup_work); | |
119 | ||
120 | set_bit(0, ns->pidmap[0].page); | |
121 | atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); | |
122 | ||
123 | for (i = 1; i < PIDMAP_ENTRIES; i++) | |
124 | atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE); | |
125 | ||
126 | return ns; | |
127 | ||
128 | out_free_map: | |
129 | kfree(ns->pidmap[0].page); | |
130 | out_free: | |
131 | kmem_cache_free(pid_ns_cachep, ns); | |
132 | out: | |
133 | return ERR_PTR(err); | |
134 | } | |
135 | ||
136 | static void delayed_free_pidns(struct rcu_head *p) | |
137 | { | |
138 | kmem_cache_free(pid_ns_cachep, | |
139 | container_of(p, struct pid_namespace, rcu)); | |
140 | } | |
141 | ||
142 | static void destroy_pid_namespace(struct pid_namespace *ns) | |
143 | { | |
144 | int i; | |
145 | ||
146 | ns_free_inum(&ns->ns); | |
147 | for (i = 0; i < PIDMAP_ENTRIES; i++) | |
148 | kfree(ns->pidmap[i].page); | |
149 | put_user_ns(ns->user_ns); | |
150 | call_rcu(&ns->rcu, delayed_free_pidns); | |
151 | } | |
152 | ||
153 | struct pid_namespace *copy_pid_ns(unsigned long flags, | |
154 | struct user_namespace *user_ns, struct pid_namespace *old_ns) | |
155 | { | |
156 | if (!(flags & CLONE_NEWPID)) | |
157 | return get_pid_ns(old_ns); | |
158 | if (task_active_pid_ns(current) != old_ns) | |
159 | return ERR_PTR(-EINVAL); | |
160 | return create_pid_namespace(user_ns, old_ns); | |
161 | } | |
162 | ||
163 | static void free_pid_ns(struct kref *kref) | |
164 | { | |
165 | struct pid_namespace *ns; | |
166 | ||
167 | ns = container_of(kref, struct pid_namespace, kref); | |
168 | destroy_pid_namespace(ns); | |
169 | } | |
170 | ||
171 | void put_pid_ns(struct pid_namespace *ns) | |
172 | { | |
173 | struct pid_namespace *parent; | |
174 | ||
175 | while (ns != &init_pid_ns) { | |
176 | parent = ns->parent; | |
177 | if (!kref_put(&ns->kref, free_pid_ns)) | |
178 | break; | |
179 | ns = parent; | |
180 | } | |
181 | } | |
182 | EXPORT_SYMBOL_GPL(put_pid_ns); | |
183 | ||
184 | void zap_pid_ns_processes(struct pid_namespace *pid_ns) | |
185 | { | |
186 | int nr; | |
187 | int rc; | |
188 | struct task_struct *task, *me = current; | |
189 | int init_pids = thread_group_leader(me) ? 1 : 2; | |
190 | ||
191 | /* Don't allow any more processes into the pid namespace */ | |
192 | disable_pid_allocation(pid_ns); | |
193 | ||
194 | /* | |
195 | * Ignore SIGCHLD causing any terminated children to autoreap. | |
196 | * This speeds up the namespace shutdown, plus see the comment | |
197 | * below. | |
198 | */ | |
199 | spin_lock_irq(&me->sighand->siglock); | |
200 | me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN; | |
201 | spin_unlock_irq(&me->sighand->siglock); | |
202 | ||
203 | /* | |
204 | * The last thread in the cgroup-init thread group is terminating. | |
205 | * Find remaining pid_ts in the namespace, signal and wait for them | |
206 | * to exit. | |
207 | * | |
208 | * Note: This signals each threads in the namespace - even those that | |
209 | * belong to the same thread group, To avoid this, we would have | |
210 | * to walk the entire tasklist looking a processes in this | |
211 | * namespace, but that could be unnecessarily expensive if the | |
212 | * pid namespace has just a few processes. Or we need to | |
213 | * maintain a tasklist for each pid namespace. | |
214 | * | |
215 | */ | |
216 | read_lock(&tasklist_lock); | |
217 | nr = next_pidmap(pid_ns, 1); | |
218 | while (nr > 0) { | |
219 | rcu_read_lock(); | |
220 | ||
221 | task = pid_task(find_vpid(nr), PIDTYPE_PID); | |
222 | if (task && !__fatal_signal_pending(task)) | |
223 | send_sig_info(SIGKILL, SEND_SIG_FORCED, task); | |
224 | ||
225 | rcu_read_unlock(); | |
226 | ||
227 | nr = next_pidmap(pid_ns, nr); | |
228 | } | |
229 | read_unlock(&tasklist_lock); | |
230 | ||
231 | /* | |
232 | * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD. | |
233 | * sys_wait4() will also block until our children traced from the | |
234 | * parent namespace are detached and become EXIT_DEAD. | |
235 | */ | |
236 | do { | |
237 | clear_thread_flag(TIF_SIGPENDING); | |
238 | rc = sys_wait4(-1, NULL, __WALL, NULL); | |
239 | } while (rc != -ECHILD); | |
240 | ||
241 | /* | |
242 | * sys_wait4() above can't reap the EXIT_DEAD children but we do not | |
243 | * really care, we could reparent them to the global init. We could | |
244 | * exit and reap ->child_reaper even if it is not the last thread in | |
245 | * this pid_ns, free_pid(nr_hashed == 0) calls proc_cleanup_work(), | |
246 | * pid_ns can not go away until proc_kill_sb() drops the reference. | |
247 | * | |
248 | * But this ns can also have other tasks injected by setns()+fork(). | |
249 | * Again, ignoring the user visible semantics we do not really need | |
250 | * to wait until they are all reaped, but they can be reparented to | |
251 | * us and thus we need to ensure that pid->child_reaper stays valid | |
252 | * until they all go away. See free_pid()->wake_up_process(). | |
253 | * | |
254 | * We rely on ignored SIGCHLD, an injected zombie must be autoreaped | |
255 | * if reparented. | |
256 | */ | |
257 | for (;;) { | |
258 | set_current_state(TASK_UNINTERRUPTIBLE); | |
259 | if (pid_ns->nr_hashed == init_pids) | |
260 | break; | |
261 | schedule(); | |
262 | } | |
263 | __set_current_state(TASK_RUNNING); | |
264 | ||
265 | if (pid_ns->reboot) | |
266 | current->signal->group_exit_code = pid_ns->reboot; | |
267 | ||
268 | acct_exit_ns(pid_ns); | |
269 | return; | |
270 | } | |
271 | ||
272 | #ifdef CONFIG_CHECKPOINT_RESTORE | |
273 | static int pid_ns_ctl_handler(struct ctl_table *table, int write, | |
274 | void __user *buffer, size_t *lenp, loff_t *ppos) | |
275 | { | |
276 | struct pid_namespace *pid_ns = task_active_pid_ns(current); | |
277 | struct ctl_table tmp = *table; | |
278 | ||
279 | if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN)) | |
280 | return -EPERM; | |
281 | ||
282 | /* | |
283 | * Writing directly to ns' last_pid field is OK, since this field | |
284 | * is volatile in a living namespace anyway and a code writing to | |
285 | * it should synchronize its usage with external means. | |
286 | */ | |
287 | ||
288 | tmp.data = &pid_ns->last_pid; | |
289 | return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); | |
290 | } | |
291 | ||
292 | extern int pid_max; | |
293 | static int zero = 0; | |
294 | static struct ctl_table pid_ns_ctl_table[] = { | |
295 | { | |
296 | .procname = "ns_last_pid", | |
297 | .maxlen = sizeof(int), | |
298 | .mode = 0666, /* permissions are checked in the handler */ | |
299 | .proc_handler = pid_ns_ctl_handler, | |
300 | .extra1 = &zero, | |
301 | .extra2 = &pid_max, | |
302 | }, | |
303 | { } | |
304 | }; | |
305 | static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } }; | |
306 | #endif /* CONFIG_CHECKPOINT_RESTORE */ | |
307 | ||
308 | int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) | |
309 | { | |
310 | if (pid_ns == &init_pid_ns) | |
311 | return 0; | |
312 | ||
313 | switch (cmd) { | |
314 | case LINUX_REBOOT_CMD_RESTART2: | |
315 | case LINUX_REBOOT_CMD_RESTART: | |
316 | pid_ns->reboot = SIGHUP; | |
317 | break; | |
318 | ||
319 | case LINUX_REBOOT_CMD_POWER_OFF: | |
320 | case LINUX_REBOOT_CMD_HALT: | |
321 | pid_ns->reboot = SIGINT; | |
322 | break; | |
323 | default: | |
324 | return -EINVAL; | |
325 | } | |
326 | ||
327 | read_lock(&tasklist_lock); | |
328 | force_sig(SIGKILL, pid_ns->child_reaper); | |
329 | read_unlock(&tasklist_lock); | |
330 | ||
331 | do_exit(0); | |
332 | ||
333 | /* Not reached */ | |
334 | return 0; | |
335 | } | |
336 | ||
337 | static inline struct pid_namespace *to_pid_ns(struct ns_common *ns) | |
338 | { | |
339 | return container_of(ns, struct pid_namespace, ns); | |
340 | } | |
341 | ||
342 | static struct ns_common *pidns_get(struct task_struct *task) | |
343 | { | |
344 | struct pid_namespace *ns; | |
345 | ||
346 | rcu_read_lock(); | |
347 | ns = task_active_pid_ns(task); | |
348 | if (ns) | |
349 | get_pid_ns(ns); | |
350 | rcu_read_unlock(); | |
351 | ||
352 | return ns ? &ns->ns : NULL; | |
353 | } | |
354 | ||
355 | static void pidns_put(struct ns_common *ns) | |
356 | { | |
357 | put_pid_ns(to_pid_ns(ns)); | |
358 | } | |
359 | ||
360 | static int pidns_install(struct nsproxy *nsproxy, struct ns_common *ns) | |
361 | { | |
362 | struct pid_namespace *active = task_active_pid_ns(current); | |
363 | struct pid_namespace *ancestor, *new = to_pid_ns(ns); | |
364 | ||
365 | if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) || | |
366 | !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) | |
367 | return -EPERM; | |
368 | ||
369 | /* | |
370 | * Only allow entering the current active pid namespace | |
371 | * or a child of the current active pid namespace. | |
372 | * | |
373 | * This is required for fork to return a usable pid value and | |
374 | * this maintains the property that processes and their | |
375 | * children can not escape their current pid namespace. | |
376 | */ | |
377 | if (new->level < active->level) | |
378 | return -EINVAL; | |
379 | ||
380 | ancestor = new; | |
381 | while (ancestor->level > active->level) | |
382 | ancestor = ancestor->parent; | |
383 | if (ancestor != active) | |
384 | return -EINVAL; | |
385 | ||
386 | put_pid_ns(nsproxy->pid_ns_for_children); | |
387 | nsproxy->pid_ns_for_children = get_pid_ns(new); | |
388 | return 0; | |
389 | } | |
390 | ||
391 | const struct proc_ns_operations pidns_operations = { | |
392 | .name = "pid", | |
393 | .type = CLONE_NEWPID, | |
394 | .get = pidns_get, | |
395 | .put = pidns_put, | |
396 | .install = pidns_install, | |
397 | }; | |
398 | ||
399 | static __init int pid_namespaces_init(void) | |
400 | { | |
401 | pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); | |
402 | ||
403 | #ifdef CONFIG_CHECKPOINT_RESTORE | |
404 | register_sysctl_paths(kern_path, pid_ns_ctl_table); | |
405 | #endif | |
406 | return 0; | |
407 | } | |
408 | ||
409 | __initcall(pid_namespaces_init); |