]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/user_namespace.c
userns: Add per user namespace sysctls.
[mirror_ubuntu-zesty-kernel.git] / kernel / user_namespace.c
CommitLineData
acce292c
CLG
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License as
4 * published by the Free Software Foundation, version 2 of the
5 * License.
6 */
7
9984de1a 8#include <linux/export.h>
acce292c 9#include <linux/nsproxy.h>
1aeb272c 10#include <linux/slab.h>
acce292c 11#include <linux/user_namespace.h>
0bb80f24 12#include <linux/proc_ns.h>
5c1469de 13#include <linux/highuid.h>
18b6e041 14#include <linux/cred.h>
973c5914 15#include <linux/securebits.h>
22d917d8
EB
16#include <linux/keyctl.h>
17#include <linux/key-type.h>
18#include <keys/user-type.h>
19#include <linux/seq_file.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/ctype.h>
f76d207a 23#include <linux/projid.h>
e66eded8 24#include <linux/fs_struct.h>
acce292c 25
dbec2846
EB
26extern bool setup_userns_sysctls(struct user_namespace *ns);
27extern void retire_userns_sysctls(struct user_namespace *ns);
28
6164281a 29static struct kmem_cache *user_ns_cachep __read_mostly;
f0d62aec 30static DEFINE_MUTEX(userns_state_mutex);
6164281a 31
6708075f
EB
32static bool new_idmap_permitted(const struct file *file,
33 struct user_namespace *ns, int cap_setid,
22d917d8 34 struct uid_gid_map *map);
b032132c 35static void free_user_ns(struct work_struct *work);
22d917d8 36
cde1975b
EB
37static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
38{
39 /* Start with the same capabilities as init but useless for doing
40 * anything as the capabilities are bound to the new user namespace.
41 */
42 cred->securebits = SECUREBITS_DEFAULT;
43 cred->cap_inheritable = CAP_EMPTY_SET;
44 cred->cap_permitted = CAP_FULL_SET;
45 cred->cap_effective = CAP_FULL_SET;
58319057 46 cred->cap_ambient = CAP_EMPTY_SET;
cde1975b
EB
47 cred->cap_bset = CAP_FULL_SET;
48#ifdef CONFIG_KEYS
49 key_put(cred->request_key_auth);
50 cred->request_key_auth = NULL;
51#endif
52 /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
53 cred->user_ns = user_ns;
54}
55
77ec739d 56/*
18b6e041
SH
57 * Create a new user namespace, deriving the creator from the user in the
58 * passed credentials, and replacing that user with the new root user for the
59 * new namespace.
60 *
61 * This is called by copy_creds(), which will finish setting the target task's
62 * credentials.
77ec739d 63 */
18b6e041 64int create_user_ns(struct cred *new)
77ec739d 65{
0093ccb6 66 struct user_namespace *ns, *parent_ns = new->user_ns;
078de5f7
EB
67 kuid_t owner = new->euid;
68 kgid_t group = new->egid;
98f842e6 69 int ret;
783291e6 70
8742f229
ON
71 if (parent_ns->level > 32)
72 return -EUSERS;
73
3151527e
EB
74 /*
75 * Verify that we can not violate the policy of which files
76 * may be accessed that is specified by the root directory,
77 * by verifing that the root directory is at the root of the
78 * mount namespace which allows all files to be accessed.
79 */
80 if (current_chrooted())
81 return -EPERM;
82
783291e6
EB
83 /* The creator needs a mapping in the parent user namespace
84 * or else we won't be able to reasonably tell userspace who
85 * created a user_namespace.
86 */
87 if (!kuid_has_mapping(parent_ns, owner) ||
88 !kgid_has_mapping(parent_ns, group))
89 return -EPERM;
77ec739d 90
22d917d8 91 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
77ec739d 92 if (!ns)
18b6e041 93 return -ENOMEM;
77ec739d 94
6344c433 95 ret = ns_alloc_inum(&ns->ns);
98f842e6
EB
96 if (ret) {
97 kmem_cache_free(user_ns_cachep, ns);
98 return ret;
99 }
33c42940 100 ns->ns.ops = &userns_operations;
98f842e6 101
c61a2810 102 atomic_set(&ns->count, 1);
cde1975b 103 /* Leave the new->user_ns reference with the new user namespace. */
aeb3ae9d 104 ns->parent = parent_ns;
8742f229 105 ns->level = parent_ns->level + 1;
783291e6
EB
106 ns->owner = owner;
107 ns->group = group;
b032132c 108 INIT_WORK(&ns->work, free_user_ns);
22d917d8 109
9cc46516
EB
110 /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
111 mutex_lock(&userns_state_mutex);
112 ns->flags = parent_ns->flags;
113 mutex_unlock(&userns_state_mutex);
114
f36f8c75
DH
115#ifdef CONFIG_PERSISTENT_KEYRINGS
116 init_rwsem(&ns->persistent_keyring_register_sem);
117#endif
dbec2846
EB
118 ret = -ENOMEM;
119 if (!setup_userns_sysctls(ns))
120 goto fail_keyring;
121
122 set_cred_user_ns(new, ns);
18b6e041 123 return 0;
dbec2846
EB
124fail_keyring:
125#ifdef CONFIG_PERSISTENT_KEYRINGS
126 key_put(ns->persistent_keyring_register);
127#endif
128 ns_free_inum(&ns->ns);
129 kmem_cache_free(user_ns_cachep, ns);
130 return ret;
acce292c
CLG
131}
132
b2e0d987
EB
133int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
134{
135 struct cred *cred;
6160968c 136 int err = -ENOMEM;
b2e0d987
EB
137
138 if (!(unshare_flags & CLONE_NEWUSER))
139 return 0;
140
141 cred = prepare_creds();
6160968c
ON
142 if (cred) {
143 err = create_user_ns(cred);
144 if (err)
145 put_cred(cred);
146 else
147 *new_cred = cred;
148 }
b2e0d987 149
6160968c 150 return err;
b2e0d987
EB
151}
152
b032132c 153static void free_user_ns(struct work_struct *work)
acce292c 154{
b032132c
EB
155 struct user_namespace *parent, *ns =
156 container_of(work, struct user_namespace, work);
783291e6 157
c61a2810
EB
158 do {
159 parent = ns->parent;
dbec2846 160 retire_userns_sysctls(ns);
f36f8c75
DH
161#ifdef CONFIG_PERSISTENT_KEYRINGS
162 key_put(ns->persistent_keyring_register);
163#endif
6344c433 164 ns_free_inum(&ns->ns);
c61a2810
EB
165 kmem_cache_free(user_ns_cachep, ns);
166 ns = parent;
167 } while (atomic_dec_and_test(&parent->count));
acce292c 168}
b032132c
EB
169
170void __put_user_ns(struct user_namespace *ns)
171{
172 schedule_work(&ns->work);
173}
174EXPORT_SYMBOL(__put_user_ns);
5c1469de 175
22d917d8 176static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
5c1469de 177{
22d917d8
EB
178 unsigned idx, extents;
179 u32 first, last, id2;
5c1469de 180
22d917d8 181 id2 = id + count - 1;
5c1469de 182
22d917d8
EB
183 /* Find the matching extent */
184 extents = map->nr_extents;
e79323bd 185 smp_rmb();
22d917d8
EB
186 for (idx = 0; idx < extents; idx++) {
187 first = map->extent[idx].first;
188 last = first + map->extent[idx].count - 1;
189 if (id >= first && id <= last &&
190 (id2 >= first && id2 <= last))
191 break;
192 }
193 /* Map the id or note failure */
194 if (idx < extents)
195 id = (id - first) + map->extent[idx].lower_first;
196 else
197 id = (u32) -1;
198
199 return id;
200}
201
202static u32 map_id_down(struct uid_gid_map *map, u32 id)
203{
204 unsigned idx, extents;
205 u32 first, last;
206
207 /* Find the matching extent */
208 extents = map->nr_extents;
e79323bd 209 smp_rmb();
22d917d8
EB
210 for (idx = 0; idx < extents; idx++) {
211 first = map->extent[idx].first;
212 last = first + map->extent[idx].count - 1;
213 if (id >= first && id <= last)
214 break;
215 }
216 /* Map the id or note failure */
217 if (idx < extents)
218 id = (id - first) + map->extent[idx].lower_first;
219 else
220 id = (u32) -1;
221
222 return id;
223}
224
225static u32 map_id_up(struct uid_gid_map *map, u32 id)
226{
227 unsigned idx, extents;
228 u32 first, last;
229
230 /* Find the matching extent */
231 extents = map->nr_extents;
e79323bd 232 smp_rmb();
22d917d8
EB
233 for (idx = 0; idx < extents; idx++) {
234 first = map->extent[idx].lower_first;
235 last = first + map->extent[idx].count - 1;
236 if (id >= first && id <= last)
237 break;
5c1469de 238 }
22d917d8
EB
239 /* Map the id or note failure */
240 if (idx < extents)
241 id = (id - first) + map->extent[idx].first;
242 else
243 id = (u32) -1;
244
245 return id;
246}
247
248/**
249 * make_kuid - Map a user-namespace uid pair into a kuid.
250 * @ns: User namespace that the uid is in
251 * @uid: User identifier
252 *
253 * Maps a user-namespace uid pair into a kernel internal kuid,
254 * and returns that kuid.
255 *
256 * When there is no mapping defined for the user-namespace uid
257 * pair INVALID_UID is returned. Callers are expected to test
b080e047 258 * for and handle INVALID_UID being returned. INVALID_UID
22d917d8
EB
259 * may be tested for using uid_valid().
260 */
261kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
262{
263 /* Map the uid to a global kernel uid */
264 return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
265}
266EXPORT_SYMBOL(make_kuid);
267
268/**
269 * from_kuid - Create a uid from a kuid user-namespace pair.
270 * @targ: The user namespace we want a uid in.
271 * @kuid: The kernel internal uid to start with.
272 *
273 * Map @kuid into the user-namespace specified by @targ and
274 * return the resulting uid.
275 *
276 * There is always a mapping into the initial user_namespace.
277 *
278 * If @kuid has no mapping in @targ (uid_t)-1 is returned.
279 */
280uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
281{
282 /* Map the uid from a global kernel uid */
283 return map_id_up(&targ->uid_map, __kuid_val(kuid));
284}
285EXPORT_SYMBOL(from_kuid);
286
287/**
288 * from_kuid_munged - Create a uid from a kuid user-namespace pair.
289 * @targ: The user namespace we want a uid in.
290 * @kuid: The kernel internal uid to start with.
291 *
292 * Map @kuid into the user-namespace specified by @targ and
293 * return the resulting uid.
294 *
295 * There is always a mapping into the initial user_namespace.
296 *
297 * Unlike from_kuid from_kuid_munged never fails and always
298 * returns a valid uid. This makes from_kuid_munged appropriate
299 * for use in syscalls like stat and getuid where failing the
300 * system call and failing to provide a valid uid are not an
301 * options.
302 *
303 * If @kuid has no mapping in @targ overflowuid is returned.
304 */
305uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
306{
307 uid_t uid;
308 uid = from_kuid(targ, kuid);
309
310 if (uid == (uid_t) -1)
311 uid = overflowuid;
312 return uid;
313}
314EXPORT_SYMBOL(from_kuid_munged);
315
316/**
317 * make_kgid - Map a user-namespace gid pair into a kgid.
318 * @ns: User namespace that the gid is in
68a9a435 319 * @gid: group identifier
22d917d8
EB
320 *
321 * Maps a user-namespace gid pair into a kernel internal kgid,
322 * and returns that kgid.
323 *
324 * When there is no mapping defined for the user-namespace gid
325 * pair INVALID_GID is returned. Callers are expected to test
326 * for and handle INVALID_GID being returned. INVALID_GID may be
327 * tested for using gid_valid().
328 */
329kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
330{
331 /* Map the gid to a global kernel gid */
332 return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
333}
334EXPORT_SYMBOL(make_kgid);
335
336/**
337 * from_kgid - Create a gid from a kgid user-namespace pair.
338 * @targ: The user namespace we want a gid in.
339 * @kgid: The kernel internal gid to start with.
340 *
341 * Map @kgid into the user-namespace specified by @targ and
342 * return the resulting gid.
343 *
344 * There is always a mapping into the initial user_namespace.
345 *
346 * If @kgid has no mapping in @targ (gid_t)-1 is returned.
347 */
348gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
349{
350 /* Map the gid from a global kernel gid */
351 return map_id_up(&targ->gid_map, __kgid_val(kgid));
352}
353EXPORT_SYMBOL(from_kgid);
354
355/**
356 * from_kgid_munged - Create a gid from a kgid user-namespace pair.
357 * @targ: The user namespace we want a gid in.
358 * @kgid: The kernel internal gid to start with.
359 *
360 * Map @kgid into the user-namespace specified by @targ and
361 * return the resulting gid.
362 *
363 * There is always a mapping into the initial user_namespace.
364 *
365 * Unlike from_kgid from_kgid_munged never fails and always
366 * returns a valid gid. This makes from_kgid_munged appropriate
367 * for use in syscalls like stat and getgid where failing the
368 * system call and failing to provide a valid gid are not options.
369 *
370 * If @kgid has no mapping in @targ overflowgid is returned.
371 */
372gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
373{
374 gid_t gid;
375 gid = from_kgid(targ, kgid);
376
377 if (gid == (gid_t) -1)
378 gid = overflowgid;
379 return gid;
380}
381EXPORT_SYMBOL(from_kgid_munged);
382
f76d207a
EB
383/**
384 * make_kprojid - Map a user-namespace projid pair into a kprojid.
385 * @ns: User namespace that the projid is in
386 * @projid: Project identifier
387 *
388 * Maps a user-namespace uid pair into a kernel internal kuid,
389 * and returns that kuid.
390 *
391 * When there is no mapping defined for the user-namespace projid
392 * pair INVALID_PROJID is returned. Callers are expected to test
393 * for and handle handle INVALID_PROJID being returned. INVALID_PROJID
394 * may be tested for using projid_valid().
395 */
396kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
397{
398 /* Map the uid to a global kernel uid */
399 return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
400}
401EXPORT_SYMBOL(make_kprojid);
402
403/**
404 * from_kprojid - Create a projid from a kprojid user-namespace pair.
405 * @targ: The user namespace we want a projid in.
406 * @kprojid: The kernel internal project identifier to start with.
407 *
408 * Map @kprojid into the user-namespace specified by @targ and
409 * return the resulting projid.
410 *
411 * There is always a mapping into the initial user_namespace.
412 *
413 * If @kprojid has no mapping in @targ (projid_t)-1 is returned.
414 */
415projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
416{
417 /* Map the uid from a global kernel uid */
418 return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
419}
420EXPORT_SYMBOL(from_kprojid);
421
422/**
423 * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
424 * @targ: The user namespace we want a projid in.
425 * @kprojid: The kernel internal projid to start with.
426 *
427 * Map @kprojid into the user-namespace specified by @targ and
428 * return the resulting projid.
429 *
430 * There is always a mapping into the initial user_namespace.
431 *
432 * Unlike from_kprojid from_kprojid_munged never fails and always
433 * returns a valid projid. This makes from_kprojid_munged
434 * appropriate for use in syscalls like stat and where
435 * failing the system call and failing to provide a valid projid are
436 * not an options.
437 *
438 * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
439 */
440projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
441{
442 projid_t projid;
443 projid = from_kprojid(targ, kprojid);
444
445 if (projid == (projid_t) -1)
446 projid = OVERFLOW_PROJID;
447 return projid;
448}
449EXPORT_SYMBOL(from_kprojid_munged);
450
451
22d917d8
EB
452static int uid_m_show(struct seq_file *seq, void *v)
453{
454 struct user_namespace *ns = seq->private;
455 struct uid_gid_extent *extent = v;
456 struct user_namespace *lower_ns;
457 uid_t lower;
5c1469de 458
c450f371 459 lower_ns = seq_user_ns(seq);
22d917d8
EB
460 if ((lower_ns == ns) && lower_ns->parent)
461 lower_ns = lower_ns->parent;
462
463 lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
464
465 seq_printf(seq, "%10u %10u %10u\n",
466 extent->first,
467 lower,
468 extent->count);
469
470 return 0;
5c1469de
EB
471}
472
22d917d8 473static int gid_m_show(struct seq_file *seq, void *v)
5c1469de 474{
22d917d8
EB
475 struct user_namespace *ns = seq->private;
476 struct uid_gid_extent *extent = v;
477 struct user_namespace *lower_ns;
478 gid_t lower;
5c1469de 479
c450f371 480 lower_ns = seq_user_ns(seq);
22d917d8
EB
481 if ((lower_ns == ns) && lower_ns->parent)
482 lower_ns = lower_ns->parent;
5c1469de 483
22d917d8
EB
484 lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
485
486 seq_printf(seq, "%10u %10u %10u\n",
487 extent->first,
488 lower,
489 extent->count);
490
491 return 0;
492}
493
f76d207a
EB
494static int projid_m_show(struct seq_file *seq, void *v)
495{
496 struct user_namespace *ns = seq->private;
497 struct uid_gid_extent *extent = v;
498 struct user_namespace *lower_ns;
499 projid_t lower;
500
501 lower_ns = seq_user_ns(seq);
502 if ((lower_ns == ns) && lower_ns->parent)
503 lower_ns = lower_ns->parent;
504
505 lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
506
507 seq_printf(seq, "%10u %10u %10u\n",
508 extent->first,
509 lower,
510 extent->count);
511
512 return 0;
513}
514
68a9a435
FF
515static void *m_start(struct seq_file *seq, loff_t *ppos,
516 struct uid_gid_map *map)
22d917d8
EB
517{
518 struct uid_gid_extent *extent = NULL;
519 loff_t pos = *ppos;
520
521 if (pos < map->nr_extents)
522 extent = &map->extent[pos];
523
524 return extent;
525}
526
527static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
528{
529 struct user_namespace *ns = seq->private;
530
531 return m_start(seq, ppos, &ns->uid_map);
532}
533
534static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
535{
536 struct user_namespace *ns = seq->private;
537
538 return m_start(seq, ppos, &ns->gid_map);
539}
540
f76d207a
EB
541static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
542{
543 struct user_namespace *ns = seq->private;
544
545 return m_start(seq, ppos, &ns->projid_map);
546}
547
22d917d8
EB
548static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
549{
550 (*pos)++;
551 return seq->op->start(seq, pos);
552}
553
554static void m_stop(struct seq_file *seq, void *v)
555{
556 return;
557}
558
ccf94f1b 559const struct seq_operations proc_uid_seq_operations = {
22d917d8
EB
560 .start = uid_m_start,
561 .stop = m_stop,
562 .next = m_next,
563 .show = uid_m_show,
564};
565
ccf94f1b 566const struct seq_operations proc_gid_seq_operations = {
22d917d8
EB
567 .start = gid_m_start,
568 .stop = m_stop,
569 .next = m_next,
570 .show = gid_m_show,
571};
572
ccf94f1b 573const struct seq_operations proc_projid_seq_operations = {
f76d207a
EB
574 .start = projid_m_start,
575 .stop = m_stop,
576 .next = m_next,
577 .show = projid_m_show,
578};
579
68a9a435
FF
580static bool mappings_overlap(struct uid_gid_map *new_map,
581 struct uid_gid_extent *extent)
0bd14b4f
EB
582{
583 u32 upper_first, lower_first, upper_last, lower_last;
584 unsigned idx;
585
586 upper_first = extent->first;
587 lower_first = extent->lower_first;
588 upper_last = upper_first + extent->count - 1;
589 lower_last = lower_first + extent->count - 1;
590
591 for (idx = 0; idx < new_map->nr_extents; idx++) {
592 u32 prev_upper_first, prev_lower_first;
593 u32 prev_upper_last, prev_lower_last;
594 struct uid_gid_extent *prev;
595
596 prev = &new_map->extent[idx];
597
598 prev_upper_first = prev->first;
599 prev_lower_first = prev->lower_first;
600 prev_upper_last = prev_upper_first + prev->count - 1;
601 prev_lower_last = prev_lower_first + prev->count - 1;
602
603 /* Does the upper range intersect a previous extent? */
604 if ((prev_upper_first <= upper_last) &&
605 (prev_upper_last >= upper_first))
606 return true;
607
608 /* Does the lower range intersect a previous extent? */
609 if ((prev_lower_first <= lower_last) &&
610 (prev_lower_last >= lower_first))
611 return true;
612 }
613 return false;
614}
615
22d917d8
EB
616static ssize_t map_write(struct file *file, const char __user *buf,
617 size_t count, loff_t *ppos,
618 int cap_setid,
619 struct uid_gid_map *map,
620 struct uid_gid_map *parent_map)
621{
622 struct seq_file *seq = file->private_data;
623 struct user_namespace *ns = seq->private;
624 struct uid_gid_map new_map;
625 unsigned idx;
0bd14b4f 626 struct uid_gid_extent *extent = NULL;
70f6cbb6 627 char *kbuf = NULL, *pos, *next_line;
22d917d8
EB
628 ssize_t ret = -EINVAL;
629
630 /*
f0d62aec 631 * The userns_state_mutex serializes all writes to any given map.
22d917d8
EB
632 *
633 * Any map is only ever written once.
634 *
635 * An id map fits within 1 cache line on most architectures.
636 *
637 * On read nothing needs to be done unless you are on an
638 * architecture with a crazy cache coherency model like alpha.
639 *
640 * There is a one time data dependency between reading the
641 * count of the extents and the values of the extents. The
642 * desired behavior is to see the values of the extents that
643 * were written before the count of the extents.
644 *
645 * To achieve this smp_wmb() is used on guarantee the write
e79323bd
MP
646 * order and smp_rmb() is guaranteed that we don't have crazy
647 * architectures returning stale data.
22d917d8 648 */
f0d62aec 649 mutex_lock(&userns_state_mutex);
22d917d8
EB
650
651 ret = -EPERM;
652 /* Only allow one successful write to the map */
653 if (map->nr_extents != 0)
654 goto out;
655
41c21e35
AL
656 /*
657 * Adjusting namespace settings requires capabilities on the target.
5c1469de 658 */
41c21e35 659 if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
22d917d8
EB
660 goto out;
661
36476bea 662 /* Only allow < page size writes at the beginning of the file */
22d917d8
EB
663 ret = -EINVAL;
664 if ((*ppos != 0) || (count >= PAGE_SIZE))
665 goto out;
666
667 /* Slurp in the user data */
70f6cbb6
AV
668 kbuf = memdup_user_nul(buf, count);
669 if (IS_ERR(kbuf)) {
670 ret = PTR_ERR(kbuf);
671 kbuf = NULL;
22d917d8 672 goto out;
70f6cbb6 673 }
22d917d8
EB
674
675 /* Parse the user data */
676 ret = -EINVAL;
677 pos = kbuf;
678 new_map.nr_extents = 0;
68a9a435 679 for (; pos; pos = next_line) {
22d917d8
EB
680 extent = &new_map.extent[new_map.nr_extents];
681
682 /* Find the end of line and ensure I don't look past it */
683 next_line = strchr(pos, '\n');
684 if (next_line) {
685 *next_line = '\0';
686 next_line++;
687 if (*next_line == '\0')
688 next_line = NULL;
5c1469de 689 }
22d917d8
EB
690
691 pos = skip_spaces(pos);
692 extent->first = simple_strtoul(pos, &pos, 10);
693 if (!isspace(*pos))
694 goto out;
695
696 pos = skip_spaces(pos);
697 extent->lower_first = simple_strtoul(pos, &pos, 10);
698 if (!isspace(*pos))
699 goto out;
700
701 pos = skip_spaces(pos);
702 extent->count = simple_strtoul(pos, &pos, 10);
703 if (*pos && !isspace(*pos))
704 goto out;
705
706 /* Verify there is not trailing junk on the line */
707 pos = skip_spaces(pos);
708 if (*pos != '\0')
709 goto out;
710
711 /* Verify we have been given valid starting values */
712 if ((extent->first == (u32) -1) ||
68a9a435 713 (extent->lower_first == (u32) -1))
22d917d8
EB
714 goto out;
715
68a9a435
FF
716 /* Verify count is not zero and does not cause the
717 * extent to wrap
718 */
22d917d8
EB
719 if ((extent->first + extent->count) <= extent->first)
720 goto out;
68a9a435
FF
721 if ((extent->lower_first + extent->count) <=
722 extent->lower_first)
22d917d8
EB
723 goto out;
724
0bd14b4f
EB
725 /* Do the ranges in extent overlap any previous extents? */
726 if (mappings_overlap(&new_map, extent))
22d917d8
EB
727 goto out;
728
729 new_map.nr_extents++;
22d917d8
EB
730
731 /* Fail if the file contains too many extents */
732 if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) &&
733 (next_line != NULL))
734 goto out;
5c1469de 735 }
22d917d8
EB
736 /* Be very certaint the new map actually exists */
737 if (new_map.nr_extents == 0)
738 goto out;
739
740 ret = -EPERM;
741 /* Validate the user is allowed to use user id's mapped to. */
6708075f 742 if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
22d917d8
EB
743 goto out;
744
745 /* Map the lower ids from the parent user namespace to the
746 * kernel global id space.
747 */
748 for (idx = 0; idx < new_map.nr_extents; idx++) {
749 u32 lower_first;
750 extent = &new_map.extent[idx];
751
752 lower_first = map_id_range_down(parent_map,
753 extent->lower_first,
754 extent->count);
755
756 /* Fail if we can not map the specified extent to
757 * the kernel global id space.
758 */
759 if (lower_first == (u32) -1)
760 goto out;
761
762 extent->lower_first = lower_first;
763 }
764
765 /* Install the map */
766 memcpy(map->extent, new_map.extent,
767 new_map.nr_extents*sizeof(new_map.extent[0]));
768 smp_wmb();
769 map->nr_extents = new_map.nr_extents;
770
771 *ppos = count;
772 ret = count;
773out:
f0d62aec 774 mutex_unlock(&userns_state_mutex);
70f6cbb6 775 kfree(kbuf);
22d917d8
EB
776 return ret;
777}
778
68a9a435
FF
779ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
780 size_t size, loff_t *ppos)
22d917d8
EB
781{
782 struct seq_file *seq = file->private_data;
783 struct user_namespace *ns = seq->private;
c450f371 784 struct user_namespace *seq_ns = seq_user_ns(seq);
22d917d8
EB
785
786 if (!ns->parent)
787 return -EPERM;
788
c450f371
EB
789 if ((seq_ns != ns) && (seq_ns != ns->parent))
790 return -EPERM;
791
22d917d8
EB
792 return map_write(file, buf, size, ppos, CAP_SETUID,
793 &ns->uid_map, &ns->parent->uid_map);
794}
795
68a9a435
FF
796ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
797 size_t size, loff_t *ppos)
22d917d8
EB
798{
799 struct seq_file *seq = file->private_data;
800 struct user_namespace *ns = seq->private;
c450f371 801 struct user_namespace *seq_ns = seq_user_ns(seq);
22d917d8
EB
802
803 if (!ns->parent)
804 return -EPERM;
805
c450f371
EB
806 if ((seq_ns != ns) && (seq_ns != ns->parent))
807 return -EPERM;
808
22d917d8
EB
809 return map_write(file, buf, size, ppos, CAP_SETGID,
810 &ns->gid_map, &ns->parent->gid_map);
811}
812
68a9a435
FF
813ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
814 size_t size, loff_t *ppos)
f76d207a
EB
815{
816 struct seq_file *seq = file->private_data;
817 struct user_namespace *ns = seq->private;
818 struct user_namespace *seq_ns = seq_user_ns(seq);
819
820 if (!ns->parent)
821 return -EPERM;
822
823 if ((seq_ns != ns) && (seq_ns != ns->parent))
824 return -EPERM;
825
826 /* Anyone can set any valid project id no capability needed */
827 return map_write(file, buf, size, ppos, -1,
828 &ns->projid_map, &ns->parent->projid_map);
829}
830
68a9a435 831static bool new_idmap_permitted(const struct file *file,
6708075f 832 struct user_namespace *ns, int cap_setid,
22d917d8
EB
833 struct uid_gid_map *new_map)
834{
f95d7918 835 const struct cred *cred = file->f_cred;
0542f17b
EB
836 /* Don't allow mappings that would allow anything that wouldn't
837 * be allowed without the establishment of unprivileged mappings.
838 */
f95d7918
EB
839 if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
840 uid_eq(ns->owner, cred->euid)) {
37657da3
EB
841 u32 id = new_map->extent[0].lower_first;
842 if (cap_setid == CAP_SETUID) {
843 kuid_t uid = make_kuid(ns->parent, id);
f95d7918 844 if (uid_eq(uid, cred->euid))
37657da3 845 return true;
68a9a435 846 } else if (cap_setid == CAP_SETGID) {
37657da3 847 kgid_t gid = make_kgid(ns->parent, id);
66d2f338
EB
848 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
849 gid_eq(gid, cred->egid))
37657da3
EB
850 return true;
851 }
852 }
853
f76d207a
EB
854 /* Allow anyone to set a mapping that doesn't require privilege */
855 if (!cap_valid(cap_setid))
856 return true;
857
22d917d8
EB
858 /* Allow the specified ids if we have the appropriate capability
859 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
6708075f 860 * And the opener of the id file also had the approprpiate capability.
22d917d8 861 */
6708075f
EB
862 if (ns_capable(ns->parent, cap_setid) &&
863 file_ns_capable(file, ns->parent, cap_setid))
22d917d8 864 return true;
5c1469de 865
22d917d8 866 return false;
5c1469de 867}
6164281a 868
9cc46516
EB
869int proc_setgroups_show(struct seq_file *seq, void *v)
870{
871 struct user_namespace *ns = seq->private;
872 unsigned long userns_flags = ACCESS_ONCE(ns->flags);
873
874 seq_printf(seq, "%s\n",
875 (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
876 "allow" : "deny");
877 return 0;
878}
879
880ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
881 size_t count, loff_t *ppos)
882{
883 struct seq_file *seq = file->private_data;
884 struct user_namespace *ns = seq->private;
885 char kbuf[8], *pos;
886 bool setgroups_allowed;
887 ssize_t ret;
888
889 /* Only allow a very narrow range of strings to be written */
890 ret = -EINVAL;
891 if ((*ppos != 0) || (count >= sizeof(kbuf)))
892 goto out;
893
894 /* What was written? */
895 ret = -EFAULT;
896 if (copy_from_user(kbuf, buf, count))
897 goto out;
898 kbuf[count] = '\0';
899 pos = kbuf;
900
901 /* What is being requested? */
902 ret = -EINVAL;
903 if (strncmp(pos, "allow", 5) == 0) {
904 pos += 5;
905 setgroups_allowed = true;
906 }
907 else if (strncmp(pos, "deny", 4) == 0) {
908 pos += 4;
909 setgroups_allowed = false;
910 }
911 else
912 goto out;
913
914 /* Verify there is not trailing junk on the line */
915 pos = skip_spaces(pos);
916 if (*pos != '\0')
917 goto out;
918
919 ret = -EPERM;
920 mutex_lock(&userns_state_mutex);
921 if (setgroups_allowed) {
922 /* Enabling setgroups after setgroups has been disabled
923 * is not allowed.
924 */
925 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
926 goto out_unlock;
927 } else {
928 /* Permanently disabling setgroups after setgroups has
929 * been enabled by writing the gid_map is not allowed.
930 */
931 if (ns->gid_map.nr_extents != 0)
932 goto out_unlock;
933 ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
934 }
935 mutex_unlock(&userns_state_mutex);
936
937 /* Report a successful write */
938 *ppos = count;
939 ret = count;
940out:
941 return ret;
942out_unlock:
943 mutex_unlock(&userns_state_mutex);
944 goto out;
945}
946
273d2c67
EB
947bool userns_may_setgroups(const struct user_namespace *ns)
948{
949 bool allowed;
950
f0d62aec 951 mutex_lock(&userns_state_mutex);
273d2c67
EB
952 /* It is not safe to use setgroups until a gid mapping in
953 * the user namespace has been established.
954 */
955 allowed = ns->gid_map.nr_extents != 0;
9cc46516
EB
956 /* Is setgroups allowed? */
957 allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
f0d62aec 958 mutex_unlock(&userns_state_mutex);
273d2c67
EB
959
960 return allowed;
961}
962
d07b846f
SF
963/*
964 * Returns true if @ns is the same namespace as or a descendant of
965 * @target_ns.
966 */
967bool current_in_userns(const struct user_namespace *target_ns)
968{
969 struct user_namespace *ns;
970 for (ns = current_user_ns(); ns; ns = ns->parent) {
971 if (ns == target_ns)
972 return true;
973 }
974 return false;
975}
976
3c041184
AV
977static inline struct user_namespace *to_user_ns(struct ns_common *ns)
978{
979 return container_of(ns, struct user_namespace, ns);
980}
981
64964528 982static struct ns_common *userns_get(struct task_struct *task)
cde1975b
EB
983{
984 struct user_namespace *user_ns;
985
986 rcu_read_lock();
987 user_ns = get_user_ns(__task_cred(task)->user_ns);
988 rcu_read_unlock();
989
3c041184 990 return user_ns ? &user_ns->ns : NULL;
cde1975b
EB
991}
992
64964528 993static void userns_put(struct ns_common *ns)
cde1975b 994{
3c041184 995 put_user_ns(to_user_ns(ns));
cde1975b
EB
996}
997
64964528 998static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
cde1975b 999{
3c041184 1000 struct user_namespace *user_ns = to_user_ns(ns);
cde1975b
EB
1001 struct cred *cred;
1002
1003 /* Don't allow gaining capabilities by reentering
1004 * the same user namespace.
1005 */
1006 if (user_ns == current_user_ns())
1007 return -EINVAL;
1008
faf00da5
EB
1009 /* Tasks that share a thread group must share a user namespace */
1010 if (!thread_group_empty(current))
cde1975b
EB
1011 return -EINVAL;
1012
e66eded8
EB
1013 if (current->fs->users != 1)
1014 return -EINVAL;
1015
cde1975b
EB
1016 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
1017 return -EPERM;
1018
1019 cred = prepare_creds();
1020 if (!cred)
1021 return -ENOMEM;
1022
1023 put_user_ns(cred->user_ns);
1024 set_cred_user_ns(cred, get_user_ns(user_ns));
1025
1026 return commit_creds(cred);
1027}
1028
1029const struct proc_ns_operations userns_operations = {
1030 .name = "user",
1031 .type = CLONE_NEWUSER,
1032 .get = userns_get,
1033 .put = userns_put,
1034 .install = userns_install,
1035};
1036
6164281a
PE
1037static __init int user_namespaces_init(void)
1038{
1039 user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
1040 return 0;
1041}
c96d6660 1042subsys_initcall(user_namespaces_init);