]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - kernel/user_namespace.c
UBUNTU: Ubuntu-5.11.0-20.21
[mirror_ubuntu-hirsute-kernel.git] / kernel / user_namespace.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/export.h>
4 #include <linux/nsproxy.h>
5 #include <linux/slab.h>
6 #include <linux/sched/signal.h>
7 #include <linux/user_namespace.h>
8 #include <linux/proc_ns.h>
9 #include <linux/highuid.h>
10 #include <linux/cred.h>
11 #include <linux/securebits.h>
12 #include <linux/keyctl.h>
13 #include <linux/key-type.h>
14 #include <keys/user-type.h>
15 #include <linux/seq_file.h>
16 #include <linux/fs.h>
17 #include <linux/uaccess.h>
18 #include <linux/ctype.h>
19 #include <linux/projid.h>
20 #include <linux/fs_struct.h>
21 #include <linux/bsearch.h>
22 #include <linux/sort.h>
23
24 /*
25 * sysctl determining whether unprivileged users may unshare a new
26 * userns. Allowed by default
27 */
28 int unprivileged_userns_clone = 1;
29
30 static struct kmem_cache *user_ns_cachep __read_mostly;
31 static DEFINE_MUTEX(userns_state_mutex);
32
33 static bool new_idmap_permitted(const struct file *file,
34 struct user_namespace *ns, int cap_setid,
35 struct uid_gid_map *map);
36 static void free_user_ns(struct work_struct *work);
37
38 static struct ucounts *inc_user_namespaces(struct user_namespace *ns, kuid_t uid)
39 {
40 return inc_ucount(ns, uid, UCOUNT_USER_NAMESPACES);
41 }
42
43 static void dec_user_namespaces(struct ucounts *ucounts)
44 {
45 return dec_ucount(ucounts, UCOUNT_USER_NAMESPACES);
46 }
47
48 static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
49 {
50 /* Start with the same capabilities as init but useless for doing
51 * anything as the capabilities are bound to the new user namespace.
52 */
53 cred->securebits = SECUREBITS_DEFAULT;
54 cred->cap_inheritable = CAP_EMPTY_SET;
55 cred->cap_permitted = CAP_FULL_SET;
56 cred->cap_effective = CAP_FULL_SET;
57 cred->cap_ambient = CAP_EMPTY_SET;
58 cred->cap_bset = CAP_FULL_SET;
59 #ifdef CONFIG_KEYS
60 key_put(cred->request_key_auth);
61 cred->request_key_auth = NULL;
62 #endif
63 /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
64 cred->user_ns = user_ns;
65 }
66
67 /*
68 * Create a new user namespace, deriving the creator from the user in the
69 * passed credentials, and replacing that user with the new root user for the
70 * new namespace.
71 *
72 * This is called by copy_creds(), which will finish setting the target task's
73 * credentials.
74 */
75 int create_user_ns(struct cred *new)
76 {
77 struct user_namespace *ns, *parent_ns = new->user_ns;
78 kuid_t owner = new->euid;
79 kgid_t group = new->egid;
80 struct ucounts *ucounts;
81 int ret, i;
82
83 ret = -ENOSPC;
84 if (parent_ns->level > 32)
85 goto fail;
86
87 ucounts = inc_user_namespaces(parent_ns, owner);
88 if (!ucounts)
89 goto fail;
90
91 /*
92 * Verify that we can not violate the policy of which files
93 * may be accessed that is specified by the root directory,
94 * by verifing that the root directory is at the root of the
95 * mount namespace which allows all files to be accessed.
96 */
97 ret = -EPERM;
98 if (current_chrooted())
99 goto fail_dec;
100
101 /* The creator needs a mapping in the parent user namespace
102 * or else we won't be able to reasonably tell userspace who
103 * created a user_namespace.
104 */
105 ret = -EPERM;
106 if (!kuid_has_mapping(parent_ns, owner) ||
107 !kgid_has_mapping(parent_ns, group))
108 goto fail_dec;
109
110 ret = -ENOMEM;
111 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
112 if (!ns)
113 goto fail_dec;
114
115 ns->parent_could_setfcap = cap_raised(new->cap_effective, CAP_SETFCAP);
116 ret = ns_alloc_inum(&ns->ns);
117 if (ret)
118 goto fail_free;
119 ns->ns.ops = &userns_operations;
120
121 refcount_set(&ns->ns.count, 1);
122 /* Leave the new->user_ns reference with the new user namespace. */
123 ns->parent = parent_ns;
124 ns->level = parent_ns->level + 1;
125 ns->owner = owner;
126 ns->group = group;
127 INIT_WORK(&ns->work, free_user_ns);
128 for (i = 0; i < UCOUNT_COUNTS; i++) {
129 ns->ucount_max[i] = INT_MAX;
130 }
131 ns->ucounts = ucounts;
132
133 /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
134 mutex_lock(&userns_state_mutex);
135 ns->flags = parent_ns->flags;
136 mutex_unlock(&userns_state_mutex);
137
138 #ifdef CONFIG_KEYS
139 INIT_LIST_HEAD(&ns->keyring_name_list);
140 init_rwsem(&ns->keyring_sem);
141 #endif
142 ret = -ENOMEM;
143 if (!setup_userns_sysctls(ns))
144 goto fail_keyring;
145
146 set_cred_user_ns(new, ns);
147 return 0;
148 fail_keyring:
149 #ifdef CONFIG_PERSISTENT_KEYRINGS
150 key_put(ns->persistent_keyring_register);
151 #endif
152 ns_free_inum(&ns->ns);
153 fail_free:
154 kmem_cache_free(user_ns_cachep, ns);
155 fail_dec:
156 dec_user_namespaces(ucounts);
157 fail:
158 return ret;
159 }
160
161 int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
162 {
163 struct cred *cred;
164 int err = -ENOMEM;
165
166 if (!(unshare_flags & CLONE_NEWUSER))
167 return 0;
168
169 cred = prepare_creds();
170 if (cred) {
171 err = create_user_ns(cred);
172 if (err)
173 put_cred(cred);
174 else
175 *new_cred = cred;
176 }
177
178 return err;
179 }
180
181 static void free_user_ns(struct work_struct *work)
182 {
183 struct user_namespace *parent, *ns =
184 container_of(work, struct user_namespace, work);
185
186 do {
187 struct ucounts *ucounts = ns->ucounts;
188 parent = ns->parent;
189 if (ns->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
190 kfree(ns->gid_map.forward);
191 kfree(ns->gid_map.reverse);
192 }
193 if (ns->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
194 kfree(ns->uid_map.forward);
195 kfree(ns->uid_map.reverse);
196 }
197 if (ns->projid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
198 kfree(ns->projid_map.forward);
199 kfree(ns->projid_map.reverse);
200 }
201 retire_userns_sysctls(ns);
202 key_free_user_ns(ns);
203 ns_free_inum(&ns->ns);
204 kmem_cache_free(user_ns_cachep, ns);
205 dec_user_namespaces(ucounts);
206 ns = parent;
207 } while (refcount_dec_and_test(&parent->ns.count));
208 }
209
210 void __put_user_ns(struct user_namespace *ns)
211 {
212 schedule_work(&ns->work);
213 }
214 EXPORT_SYMBOL(__put_user_ns);
215
216 /**
217 * idmap_key struct holds the information necessary to find an idmapping in a
218 * sorted idmap array. It is passed to cmp_map_id() as first argument.
219 */
220 struct idmap_key {
221 bool map_up; /* true -> id from kid; false -> kid from id */
222 u32 id; /* id to find */
223 u32 count; /* == 0 unless used with map_id_range_down() */
224 };
225
226 /**
227 * cmp_map_id - Function to be passed to bsearch() to find the requested
228 * idmapping. Expects struct idmap_key to be passed via @k.
229 */
230 static int cmp_map_id(const void *k, const void *e)
231 {
232 u32 first, last, id2;
233 const struct idmap_key *key = k;
234 const struct uid_gid_extent *el = e;
235
236 id2 = key->id + key->count - 1;
237
238 /* handle map_id_{down,up}() */
239 if (key->map_up)
240 first = el->lower_first;
241 else
242 first = el->first;
243
244 last = first + el->count - 1;
245
246 if (key->id >= first && key->id <= last &&
247 (id2 >= first && id2 <= last))
248 return 0;
249
250 if (key->id < first || id2 < first)
251 return -1;
252
253 return 1;
254 }
255
256 /**
257 * map_id_range_down_max - Find idmap via binary search in ordered idmap array.
258 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
259 */
260 static struct uid_gid_extent *
261 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
262 {
263 struct idmap_key key;
264
265 key.map_up = false;
266 key.count = count;
267 key.id = id;
268
269 return bsearch(&key, map->forward, extents,
270 sizeof(struct uid_gid_extent), cmp_map_id);
271 }
272
273 /**
274 * map_id_range_down_base - Find idmap via binary search in static extent array.
275 * Can only be called if number of mappings is equal or less than
276 * UID_GID_MAP_MAX_BASE_EXTENTS.
277 */
278 static struct uid_gid_extent *
279 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
280 {
281 unsigned idx;
282 u32 first, last, id2;
283
284 id2 = id + count - 1;
285
286 /* Find the matching extent */
287 for (idx = 0; idx < extents; idx++) {
288 first = map->extent[idx].first;
289 last = first + map->extent[idx].count - 1;
290 if (id >= first && id <= last &&
291 (id2 >= first && id2 <= last))
292 return &map->extent[idx];
293 }
294 return NULL;
295 }
296
297 static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
298 {
299 struct uid_gid_extent *extent;
300 unsigned extents = map->nr_extents;
301 smp_rmb();
302
303 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
304 extent = map_id_range_down_base(extents, map, id, count);
305 else
306 extent = map_id_range_down_max(extents, map, id, count);
307
308 /* Map the id or note failure */
309 if (extent)
310 id = (id - extent->first) + extent->lower_first;
311 else
312 id = (u32) -1;
313
314 return id;
315 }
316
317 static u32 map_id_down(struct uid_gid_map *map, u32 id)
318 {
319 return map_id_range_down(map, id, 1);
320 }
321
322 /**
323 * map_id_up_base - Find idmap via binary search in static extent array.
324 * Can only be called if number of mappings is equal or less than
325 * UID_GID_MAP_MAX_BASE_EXTENTS.
326 */
327 static struct uid_gid_extent *
328 map_id_up_base(unsigned extents, struct uid_gid_map *map, u32 id)
329 {
330 unsigned idx;
331 u32 first, last;
332
333 /* Find the matching extent */
334 for (idx = 0; idx < extents; idx++) {
335 first = map->extent[idx].lower_first;
336 last = first + map->extent[idx].count - 1;
337 if (id >= first && id <= last)
338 return &map->extent[idx];
339 }
340 return NULL;
341 }
342
343 /**
344 * map_id_up_max - Find idmap via binary search in ordered idmap array.
345 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
346 */
347 static struct uid_gid_extent *
348 map_id_up_max(unsigned extents, struct uid_gid_map *map, u32 id)
349 {
350 struct idmap_key key;
351
352 key.map_up = true;
353 key.count = 1;
354 key.id = id;
355
356 return bsearch(&key, map->reverse, extents,
357 sizeof(struct uid_gid_extent), cmp_map_id);
358 }
359
360 static u32 map_id_up(struct uid_gid_map *map, u32 id)
361 {
362 struct uid_gid_extent *extent;
363 unsigned extents = map->nr_extents;
364 smp_rmb();
365
366 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
367 extent = map_id_up_base(extents, map, id);
368 else
369 extent = map_id_up_max(extents, map, id);
370
371 /* Map the id or note failure */
372 if (extent)
373 id = (id - extent->lower_first) + extent->first;
374 else
375 id = (u32) -1;
376
377 return id;
378 }
379
380 /**
381 * make_kuid - Map a user-namespace uid pair into a kuid.
382 * @ns: User namespace that the uid is in
383 * @uid: User identifier
384 *
385 * Maps a user-namespace uid pair into a kernel internal kuid,
386 * and returns that kuid.
387 *
388 * When there is no mapping defined for the user-namespace uid
389 * pair INVALID_UID is returned. Callers are expected to test
390 * for and handle INVALID_UID being returned. INVALID_UID
391 * may be tested for using uid_valid().
392 */
393 kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
394 {
395 /* Map the uid to a global kernel uid */
396 return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
397 }
398 EXPORT_SYMBOL(make_kuid);
399
400 /**
401 * from_kuid - Create a uid from a kuid user-namespace pair.
402 * @targ: The user namespace we want a uid in.
403 * @kuid: The kernel internal uid to start with.
404 *
405 * Map @kuid into the user-namespace specified by @targ and
406 * return the resulting uid.
407 *
408 * There is always a mapping into the initial user_namespace.
409 *
410 * If @kuid has no mapping in @targ (uid_t)-1 is returned.
411 */
412 uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
413 {
414 /* Map the uid from a global kernel uid */
415 return map_id_up(&targ->uid_map, __kuid_val(kuid));
416 }
417 EXPORT_SYMBOL(from_kuid);
418
419 /**
420 * from_kuid_munged - Create a uid from a kuid user-namespace pair.
421 * @targ: The user namespace we want a uid in.
422 * @kuid: The kernel internal uid to start with.
423 *
424 * Map @kuid into the user-namespace specified by @targ and
425 * return the resulting uid.
426 *
427 * There is always a mapping into the initial user_namespace.
428 *
429 * Unlike from_kuid from_kuid_munged never fails and always
430 * returns a valid uid. This makes from_kuid_munged appropriate
431 * for use in syscalls like stat and getuid where failing the
432 * system call and failing to provide a valid uid are not an
433 * options.
434 *
435 * If @kuid has no mapping in @targ overflowuid is returned.
436 */
437 uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
438 {
439 uid_t uid;
440 uid = from_kuid(targ, kuid);
441
442 if (uid == (uid_t) -1)
443 uid = overflowuid;
444 return uid;
445 }
446 EXPORT_SYMBOL(from_kuid_munged);
447
448 /**
449 * make_kgid - Map a user-namespace gid pair into a kgid.
450 * @ns: User namespace that the gid is in
451 * @gid: group identifier
452 *
453 * Maps a user-namespace gid pair into a kernel internal kgid,
454 * and returns that kgid.
455 *
456 * When there is no mapping defined for the user-namespace gid
457 * pair INVALID_GID is returned. Callers are expected to test
458 * for and handle INVALID_GID being returned. INVALID_GID may be
459 * tested for using gid_valid().
460 */
461 kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
462 {
463 /* Map the gid to a global kernel gid */
464 return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
465 }
466 EXPORT_SYMBOL(make_kgid);
467
468 /**
469 * from_kgid - Create a gid from a kgid user-namespace pair.
470 * @targ: The user namespace we want a gid in.
471 * @kgid: The kernel internal gid to start with.
472 *
473 * Map @kgid into the user-namespace specified by @targ and
474 * return the resulting gid.
475 *
476 * There is always a mapping into the initial user_namespace.
477 *
478 * If @kgid has no mapping in @targ (gid_t)-1 is returned.
479 */
480 gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
481 {
482 /* Map the gid from a global kernel gid */
483 return map_id_up(&targ->gid_map, __kgid_val(kgid));
484 }
485 EXPORT_SYMBOL(from_kgid);
486
487 /**
488 * from_kgid_munged - Create a gid from a kgid user-namespace pair.
489 * @targ: The user namespace we want a gid in.
490 * @kgid: The kernel internal gid to start with.
491 *
492 * Map @kgid into the user-namespace specified by @targ and
493 * return the resulting gid.
494 *
495 * There is always a mapping into the initial user_namespace.
496 *
497 * Unlike from_kgid from_kgid_munged never fails and always
498 * returns a valid gid. This makes from_kgid_munged appropriate
499 * for use in syscalls like stat and getgid where failing the
500 * system call and failing to provide a valid gid are not options.
501 *
502 * If @kgid has no mapping in @targ overflowgid is returned.
503 */
504 gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
505 {
506 gid_t gid;
507 gid = from_kgid(targ, kgid);
508
509 if (gid == (gid_t) -1)
510 gid = overflowgid;
511 return gid;
512 }
513 EXPORT_SYMBOL(from_kgid_munged);
514
515 /**
516 * make_kprojid - Map a user-namespace projid pair into a kprojid.
517 * @ns: User namespace that the projid is in
518 * @projid: Project identifier
519 *
520 * Maps a user-namespace uid pair into a kernel internal kuid,
521 * and returns that kuid.
522 *
523 * When there is no mapping defined for the user-namespace projid
524 * pair INVALID_PROJID is returned. Callers are expected to test
525 * for and handle INVALID_PROJID being returned. INVALID_PROJID
526 * may be tested for using projid_valid().
527 */
528 kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
529 {
530 /* Map the uid to a global kernel uid */
531 return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
532 }
533 EXPORT_SYMBOL(make_kprojid);
534
535 /**
536 * from_kprojid - Create a projid from a kprojid user-namespace pair.
537 * @targ: The user namespace we want a projid in.
538 * @kprojid: The kernel internal project identifier to start with.
539 *
540 * Map @kprojid into the user-namespace specified by @targ and
541 * return the resulting projid.
542 *
543 * There is always a mapping into the initial user_namespace.
544 *
545 * If @kprojid has no mapping in @targ (projid_t)-1 is returned.
546 */
547 projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
548 {
549 /* Map the uid from a global kernel uid */
550 return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
551 }
552 EXPORT_SYMBOL(from_kprojid);
553
554 /**
555 * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
556 * @targ: The user namespace we want a projid in.
557 * @kprojid: The kernel internal projid to start with.
558 *
559 * Map @kprojid into the user-namespace specified by @targ and
560 * return the resulting projid.
561 *
562 * There is always a mapping into the initial user_namespace.
563 *
564 * Unlike from_kprojid from_kprojid_munged never fails and always
565 * returns a valid projid. This makes from_kprojid_munged
566 * appropriate for use in syscalls like stat and where
567 * failing the system call and failing to provide a valid projid are
568 * not an options.
569 *
570 * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
571 */
572 projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
573 {
574 projid_t projid;
575 projid = from_kprojid(targ, kprojid);
576
577 if (projid == (projid_t) -1)
578 projid = OVERFLOW_PROJID;
579 return projid;
580 }
581 EXPORT_SYMBOL(from_kprojid_munged);
582
583
584 static int uid_m_show(struct seq_file *seq, void *v)
585 {
586 struct user_namespace *ns = seq->private;
587 struct uid_gid_extent *extent = v;
588 struct user_namespace *lower_ns;
589 uid_t lower;
590
591 lower_ns = seq_user_ns(seq);
592 if ((lower_ns == ns) && lower_ns->parent)
593 lower_ns = lower_ns->parent;
594
595 lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
596
597 seq_printf(seq, "%10u %10u %10u\n",
598 extent->first,
599 lower,
600 extent->count);
601
602 return 0;
603 }
604
605 static int gid_m_show(struct seq_file *seq, void *v)
606 {
607 struct user_namespace *ns = seq->private;
608 struct uid_gid_extent *extent = v;
609 struct user_namespace *lower_ns;
610 gid_t lower;
611
612 lower_ns = seq_user_ns(seq);
613 if ((lower_ns == ns) && lower_ns->parent)
614 lower_ns = lower_ns->parent;
615
616 lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
617
618 seq_printf(seq, "%10u %10u %10u\n",
619 extent->first,
620 lower,
621 extent->count);
622
623 return 0;
624 }
625
626 static int projid_m_show(struct seq_file *seq, void *v)
627 {
628 struct user_namespace *ns = seq->private;
629 struct uid_gid_extent *extent = v;
630 struct user_namespace *lower_ns;
631 projid_t lower;
632
633 lower_ns = seq_user_ns(seq);
634 if ((lower_ns == ns) && lower_ns->parent)
635 lower_ns = lower_ns->parent;
636
637 lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
638
639 seq_printf(seq, "%10u %10u %10u\n",
640 extent->first,
641 lower,
642 extent->count);
643
644 return 0;
645 }
646
647 static void *m_start(struct seq_file *seq, loff_t *ppos,
648 struct uid_gid_map *map)
649 {
650 loff_t pos = *ppos;
651 unsigned extents = map->nr_extents;
652 smp_rmb();
653
654 if (pos >= extents)
655 return NULL;
656
657 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
658 return &map->extent[pos];
659
660 return &map->forward[pos];
661 }
662
663 static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
664 {
665 struct user_namespace *ns = seq->private;
666
667 return m_start(seq, ppos, &ns->uid_map);
668 }
669
670 static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
671 {
672 struct user_namespace *ns = seq->private;
673
674 return m_start(seq, ppos, &ns->gid_map);
675 }
676
677 static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
678 {
679 struct user_namespace *ns = seq->private;
680
681 return m_start(seq, ppos, &ns->projid_map);
682 }
683
684 static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
685 {
686 (*pos)++;
687 return seq->op->start(seq, pos);
688 }
689
690 static void m_stop(struct seq_file *seq, void *v)
691 {
692 return;
693 }
694
695 const struct seq_operations proc_uid_seq_operations = {
696 .start = uid_m_start,
697 .stop = m_stop,
698 .next = m_next,
699 .show = uid_m_show,
700 };
701
702 const struct seq_operations proc_gid_seq_operations = {
703 .start = gid_m_start,
704 .stop = m_stop,
705 .next = m_next,
706 .show = gid_m_show,
707 };
708
709 const struct seq_operations proc_projid_seq_operations = {
710 .start = projid_m_start,
711 .stop = m_stop,
712 .next = m_next,
713 .show = projid_m_show,
714 };
715
716 static bool mappings_overlap(struct uid_gid_map *new_map,
717 struct uid_gid_extent *extent)
718 {
719 u32 upper_first, lower_first, upper_last, lower_last;
720 unsigned idx;
721
722 upper_first = extent->first;
723 lower_first = extent->lower_first;
724 upper_last = upper_first + extent->count - 1;
725 lower_last = lower_first + extent->count - 1;
726
727 for (idx = 0; idx < new_map->nr_extents; idx++) {
728 u32 prev_upper_first, prev_lower_first;
729 u32 prev_upper_last, prev_lower_last;
730 struct uid_gid_extent *prev;
731
732 if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
733 prev = &new_map->extent[idx];
734 else
735 prev = &new_map->forward[idx];
736
737 prev_upper_first = prev->first;
738 prev_lower_first = prev->lower_first;
739 prev_upper_last = prev_upper_first + prev->count - 1;
740 prev_lower_last = prev_lower_first + prev->count - 1;
741
742 /* Does the upper range intersect a previous extent? */
743 if ((prev_upper_first <= upper_last) &&
744 (prev_upper_last >= upper_first))
745 return true;
746
747 /* Does the lower range intersect a previous extent? */
748 if ((prev_lower_first <= lower_last) &&
749 (prev_lower_last >= lower_first))
750 return true;
751 }
752 return false;
753 }
754
755 /**
756 * insert_extent - Safely insert a new idmap extent into struct uid_gid_map.
757 * Takes care to allocate a 4K block of memory if the number of mappings exceeds
758 * UID_GID_MAP_MAX_BASE_EXTENTS.
759 */
760 static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
761 {
762 struct uid_gid_extent *dest;
763
764 if (map->nr_extents == UID_GID_MAP_MAX_BASE_EXTENTS) {
765 struct uid_gid_extent *forward;
766
767 /* Allocate memory for 340 mappings. */
768 forward = kmalloc_array(UID_GID_MAP_MAX_EXTENTS,
769 sizeof(struct uid_gid_extent),
770 GFP_KERNEL);
771 if (!forward)
772 return -ENOMEM;
773
774 /* Copy over memory. Only set up memory for the forward pointer.
775 * Defer the memory setup for the reverse pointer.
776 */
777 memcpy(forward, map->extent,
778 map->nr_extents * sizeof(map->extent[0]));
779
780 map->forward = forward;
781 map->reverse = NULL;
782 }
783
784 if (map->nr_extents < UID_GID_MAP_MAX_BASE_EXTENTS)
785 dest = &map->extent[map->nr_extents];
786 else
787 dest = &map->forward[map->nr_extents];
788
789 *dest = *extent;
790 map->nr_extents++;
791 return 0;
792 }
793
794 /* cmp function to sort() forward mappings */
795 static int cmp_extents_forward(const void *a, const void *b)
796 {
797 const struct uid_gid_extent *e1 = a;
798 const struct uid_gid_extent *e2 = b;
799
800 if (e1->first < e2->first)
801 return -1;
802
803 if (e1->first > e2->first)
804 return 1;
805
806 return 0;
807 }
808
809 /* cmp function to sort() reverse mappings */
810 static int cmp_extents_reverse(const void *a, const void *b)
811 {
812 const struct uid_gid_extent *e1 = a;
813 const struct uid_gid_extent *e2 = b;
814
815 if (e1->lower_first < e2->lower_first)
816 return -1;
817
818 if (e1->lower_first > e2->lower_first)
819 return 1;
820
821 return 0;
822 }
823
824 /**
825 * sort_idmaps - Sorts an array of idmap entries.
826 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
827 */
828 static int sort_idmaps(struct uid_gid_map *map)
829 {
830 if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
831 return 0;
832
833 /* Sort forward array. */
834 sort(map->forward, map->nr_extents, sizeof(struct uid_gid_extent),
835 cmp_extents_forward, NULL);
836
837 /* Only copy the memory from forward we actually need. */
838 map->reverse = kmemdup(map->forward,
839 map->nr_extents * sizeof(struct uid_gid_extent),
840 GFP_KERNEL);
841 if (!map->reverse)
842 return -ENOMEM;
843
844 /* Sort reverse array. */
845 sort(map->reverse, map->nr_extents, sizeof(struct uid_gid_extent),
846 cmp_extents_reverse, NULL);
847
848 return 0;
849 }
850
851 /**
852 * verify_root_map() - check the uid 0 mapping
853 * @file: idmapping file
854 * @map_ns: user namespace of the target process
855 * @new_map: requested idmap
856 *
857 * If a process requests mapping parent uid 0 into the new ns, verify that the
858 * process writing the map had the CAP_SETFCAP capability as the target process
859 * will be able to write fscaps that are valid in ancestor user namespaces.
860 *
861 * Return: true if the mapping is allowed, false if not.
862 */
863 static bool verify_root_map(const struct file *file,
864 struct user_namespace *map_ns,
865 struct uid_gid_map *new_map)
866 {
867 int idx;
868 const struct user_namespace *file_ns = file->f_cred->user_ns;
869 struct uid_gid_extent *extent0 = NULL;
870
871 for (idx = 0; idx < new_map->nr_extents; idx++) {
872 if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
873 extent0 = &new_map->extent[idx];
874 else
875 extent0 = &new_map->forward[idx];
876 if (extent0->lower_first == 0)
877 break;
878
879 extent0 = NULL;
880 }
881
882 if (!extent0)
883 return true;
884
885 if (map_ns == file_ns) {
886 /* The process unshared its ns and is writing to its own
887 * /proc/self/uid_map. User already has full capabilites in
888 * the new namespace. Verify that the parent had CAP_SETFCAP
889 * when it unshared.
890 * */
891 if (!file_ns->parent_could_setfcap)
892 return false;
893 } else {
894 /* Process p1 is writing to uid_map of p2, who is in a child
895 * user namespace to p1's. Verify that the opener of the map
896 * file has CAP_SETFCAP against the parent of the new map
897 * namespace */
898 if (!file_ns_capable(file, map_ns->parent, CAP_SETFCAP))
899 return false;
900 }
901
902 return true;
903 }
904
905 static ssize_t map_write(struct file *file, const char __user *buf,
906 size_t count, loff_t *ppos,
907 int cap_setid,
908 struct uid_gid_map *map,
909 struct uid_gid_map *parent_map)
910 {
911 struct seq_file *seq = file->private_data;
912 struct user_namespace *map_ns = seq->private;
913 struct uid_gid_map new_map;
914 unsigned idx;
915 struct uid_gid_extent extent;
916 char *kbuf = NULL, *pos, *next_line;
917 ssize_t ret;
918
919 /* Only allow < page size writes at the beginning of the file */
920 if ((*ppos != 0) || (count >= PAGE_SIZE))
921 return -EINVAL;
922
923 /* Slurp in the user data */
924 kbuf = memdup_user_nul(buf, count);
925 if (IS_ERR(kbuf))
926 return PTR_ERR(kbuf);
927
928 /*
929 * The userns_state_mutex serializes all writes to any given map.
930 *
931 * Any map is only ever written once.
932 *
933 * An id map fits within 1 cache line on most architectures.
934 *
935 * On read nothing needs to be done unless you are on an
936 * architecture with a crazy cache coherency model like alpha.
937 *
938 * There is a one time data dependency between reading the
939 * count of the extents and the values of the extents. The
940 * desired behavior is to see the values of the extents that
941 * were written before the count of the extents.
942 *
943 * To achieve this smp_wmb() is used on guarantee the write
944 * order and smp_rmb() is guaranteed that we don't have crazy
945 * architectures returning stale data.
946 */
947 mutex_lock(&userns_state_mutex);
948
949 memset(&new_map, 0, sizeof(struct uid_gid_map));
950
951 ret = -EPERM;
952 /* Only allow one successful write to the map */
953 if (map->nr_extents != 0)
954 goto out;
955
956 /*
957 * Adjusting namespace settings requires capabilities on the target.
958 */
959 if (cap_valid(cap_setid) && !file_ns_capable(file, map_ns, CAP_SYS_ADMIN))
960 goto out;
961
962 /* Parse the user data */
963 ret = -EINVAL;
964 pos = kbuf;
965 for (; pos; pos = next_line) {
966
967 /* Find the end of line and ensure I don't look past it */
968 next_line = strchr(pos, '\n');
969 if (next_line) {
970 *next_line = '\0';
971 next_line++;
972 if (*next_line == '\0')
973 next_line = NULL;
974 }
975
976 pos = skip_spaces(pos);
977 extent.first = simple_strtoul(pos, &pos, 10);
978 if (!isspace(*pos))
979 goto out;
980
981 pos = skip_spaces(pos);
982 extent.lower_first = simple_strtoul(pos, &pos, 10);
983 if (!isspace(*pos))
984 goto out;
985
986 pos = skip_spaces(pos);
987 extent.count = simple_strtoul(pos, &pos, 10);
988 if (*pos && !isspace(*pos))
989 goto out;
990
991 /* Verify there is not trailing junk on the line */
992 pos = skip_spaces(pos);
993 if (*pos != '\0')
994 goto out;
995
996 /* Verify we have been given valid starting values */
997 if ((extent.first == (u32) -1) ||
998 (extent.lower_first == (u32) -1))
999 goto out;
1000
1001 /* Verify count is not zero and does not cause the
1002 * extent to wrap
1003 */
1004 if ((extent.first + extent.count) <= extent.first)
1005 goto out;
1006 if ((extent.lower_first + extent.count) <=
1007 extent.lower_first)
1008 goto out;
1009
1010 /* Do the ranges in extent overlap any previous extents? */
1011 if (mappings_overlap(&new_map, &extent))
1012 goto out;
1013
1014 if ((new_map.nr_extents + 1) == UID_GID_MAP_MAX_EXTENTS &&
1015 (next_line != NULL))
1016 goto out;
1017
1018 ret = insert_extent(&new_map, &extent);
1019 if (ret < 0)
1020 goto out;
1021 ret = -EINVAL;
1022 }
1023 /* Be very certaint the new map actually exists */
1024 if (new_map.nr_extents == 0)
1025 goto out;
1026
1027 ret = -EPERM;
1028 /* Validate the user is allowed to use user id's mapped to. */
1029 if (!new_idmap_permitted(file, map_ns, cap_setid, &new_map))
1030 goto out;
1031
1032 ret = -EPERM;
1033 /* Map the lower ids from the parent user namespace to the
1034 * kernel global id space.
1035 */
1036 for (idx = 0; idx < new_map.nr_extents; idx++) {
1037 struct uid_gid_extent *e;
1038 u32 lower_first;
1039
1040 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
1041 e = &new_map.extent[idx];
1042 else
1043 e = &new_map.forward[idx];
1044
1045 lower_first = map_id_range_down(parent_map,
1046 e->lower_first,
1047 e->count);
1048
1049 /* Fail if we can not map the specified extent to
1050 * the kernel global id space.
1051 */
1052 if (lower_first == (u32) -1)
1053 goto out;
1054
1055 e->lower_first = lower_first;
1056 }
1057
1058 /*
1059 * If we want to use binary search for lookup, this clones the extent
1060 * array and sorts both copies.
1061 */
1062 ret = sort_idmaps(&new_map);
1063 if (ret < 0)
1064 goto out;
1065
1066 /* Install the map */
1067 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
1068 memcpy(map->extent, new_map.extent,
1069 new_map.nr_extents * sizeof(new_map.extent[0]));
1070 } else {
1071 map->forward = new_map.forward;
1072 map->reverse = new_map.reverse;
1073 }
1074 smp_wmb();
1075 map->nr_extents = new_map.nr_extents;
1076
1077 *ppos = count;
1078 ret = count;
1079 out:
1080 if (ret < 0 && new_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
1081 kfree(new_map.forward);
1082 kfree(new_map.reverse);
1083 map->forward = NULL;
1084 map->reverse = NULL;
1085 map->nr_extents = 0;
1086 }
1087
1088 mutex_unlock(&userns_state_mutex);
1089 kfree(kbuf);
1090 return ret;
1091 }
1092
1093 ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
1094 size_t size, loff_t *ppos)
1095 {
1096 struct seq_file *seq = file->private_data;
1097 struct user_namespace *ns = seq->private;
1098 struct user_namespace *seq_ns = seq_user_ns(seq);
1099
1100 if (!ns->parent)
1101 return -EPERM;
1102
1103 if ((seq_ns != ns) && (seq_ns != ns->parent))
1104 return -EPERM;
1105
1106 return map_write(file, buf, size, ppos, CAP_SETUID,
1107 &ns->uid_map, &ns->parent->uid_map);
1108 }
1109
1110 ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
1111 size_t size, loff_t *ppos)
1112 {
1113 struct seq_file *seq = file->private_data;
1114 struct user_namespace *ns = seq->private;
1115 struct user_namespace *seq_ns = seq_user_ns(seq);
1116
1117 if (!ns->parent)
1118 return -EPERM;
1119
1120 if ((seq_ns != ns) && (seq_ns != ns->parent))
1121 return -EPERM;
1122
1123 return map_write(file, buf, size, ppos, CAP_SETGID,
1124 &ns->gid_map, &ns->parent->gid_map);
1125 }
1126
1127 ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
1128 size_t size, loff_t *ppos)
1129 {
1130 struct seq_file *seq = file->private_data;
1131 struct user_namespace *ns = seq->private;
1132 struct user_namespace *seq_ns = seq_user_ns(seq);
1133
1134 if (!ns->parent)
1135 return -EPERM;
1136
1137 if ((seq_ns != ns) && (seq_ns != ns->parent))
1138 return -EPERM;
1139
1140 /* Anyone can set any valid project id no capability needed */
1141 return map_write(file, buf, size, ppos, -1,
1142 &ns->projid_map, &ns->parent->projid_map);
1143 }
1144
1145 static bool new_idmap_permitted(const struct file *file,
1146 struct user_namespace *ns, int cap_setid,
1147 struct uid_gid_map *new_map)
1148 {
1149 const struct cred *cred = file->f_cred;
1150
1151 if (cap_setid == CAP_SETUID && !verify_root_map(file, ns, new_map))
1152 return false;
1153
1154 /* Don't allow mappings that would allow anything that wouldn't
1155 * be allowed without the establishment of unprivileged mappings.
1156 */
1157 if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
1158 uid_eq(ns->owner, cred->euid)) {
1159 u32 id = new_map->extent[0].lower_first;
1160 if (cap_setid == CAP_SETUID) {
1161 kuid_t uid = make_kuid(ns->parent, id);
1162 if (uid_eq(uid, cred->euid))
1163 return true;
1164 } else if (cap_setid == CAP_SETGID) {
1165 kgid_t gid = make_kgid(ns->parent, id);
1166 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
1167 gid_eq(gid, cred->egid))
1168 return true;
1169 }
1170 }
1171
1172 /* Allow anyone to set a mapping that doesn't require privilege */
1173 if (!cap_valid(cap_setid))
1174 return true;
1175
1176 /* Allow the specified ids if we have the appropriate capability
1177 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
1178 * And the opener of the id file also had the approprpiate capability.
1179 */
1180 if (ns_capable(ns->parent, cap_setid) &&
1181 file_ns_capable(file, ns->parent, cap_setid))
1182 return true;
1183
1184 return false;
1185 }
1186
1187 int proc_setgroups_show(struct seq_file *seq, void *v)
1188 {
1189 struct user_namespace *ns = seq->private;
1190 unsigned long userns_flags = READ_ONCE(ns->flags);
1191
1192 seq_printf(seq, "%s\n",
1193 (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
1194 "allow" : "deny");
1195 return 0;
1196 }
1197
1198 ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
1199 size_t count, loff_t *ppos)
1200 {
1201 struct seq_file *seq = file->private_data;
1202 struct user_namespace *ns = seq->private;
1203 char kbuf[8], *pos;
1204 bool setgroups_allowed;
1205 ssize_t ret;
1206
1207 /* Only allow a very narrow range of strings to be written */
1208 ret = -EINVAL;
1209 if ((*ppos != 0) || (count >= sizeof(kbuf)))
1210 goto out;
1211
1212 /* What was written? */
1213 ret = -EFAULT;
1214 if (copy_from_user(kbuf, buf, count))
1215 goto out;
1216 kbuf[count] = '\0';
1217 pos = kbuf;
1218
1219 /* What is being requested? */
1220 ret = -EINVAL;
1221 if (strncmp(pos, "allow", 5) == 0) {
1222 pos += 5;
1223 setgroups_allowed = true;
1224 }
1225 else if (strncmp(pos, "deny", 4) == 0) {
1226 pos += 4;
1227 setgroups_allowed = false;
1228 }
1229 else
1230 goto out;
1231
1232 /* Verify there is not trailing junk on the line */
1233 pos = skip_spaces(pos);
1234 if (*pos != '\0')
1235 goto out;
1236
1237 ret = -EPERM;
1238 mutex_lock(&userns_state_mutex);
1239 if (setgroups_allowed) {
1240 /* Enabling setgroups after setgroups has been disabled
1241 * is not allowed.
1242 */
1243 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
1244 goto out_unlock;
1245 } else {
1246 /* Permanently disabling setgroups after setgroups has
1247 * been enabled by writing the gid_map is not allowed.
1248 */
1249 if (ns->gid_map.nr_extents != 0)
1250 goto out_unlock;
1251 ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
1252 }
1253 mutex_unlock(&userns_state_mutex);
1254
1255 /* Report a successful write */
1256 *ppos = count;
1257 ret = count;
1258 out:
1259 return ret;
1260 out_unlock:
1261 mutex_unlock(&userns_state_mutex);
1262 goto out;
1263 }
1264
1265 bool userns_may_setgroups(const struct user_namespace *ns)
1266 {
1267 bool allowed;
1268
1269 mutex_lock(&userns_state_mutex);
1270 /* It is not safe to use setgroups until a gid mapping in
1271 * the user namespace has been established.
1272 */
1273 allowed = ns->gid_map.nr_extents != 0;
1274 /* Is setgroups allowed? */
1275 allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
1276 mutex_unlock(&userns_state_mutex);
1277
1278 return allowed;
1279 }
1280
1281 /*
1282 * Returns true if @child is the same namespace or a descendant of
1283 * @ancestor.
1284 */
1285 bool in_userns(const struct user_namespace *ancestor,
1286 const struct user_namespace *child)
1287 {
1288 const struct user_namespace *ns;
1289 for (ns = child; ns->level > ancestor->level; ns = ns->parent)
1290 ;
1291 return (ns == ancestor);
1292 }
1293
1294 bool current_in_userns(const struct user_namespace *target_ns)
1295 {
1296 return in_userns(target_ns, current_user_ns());
1297 }
1298 EXPORT_SYMBOL(current_in_userns);
1299
1300 static inline struct user_namespace *to_user_ns(struct ns_common *ns)
1301 {
1302 return container_of(ns, struct user_namespace, ns);
1303 }
1304
1305 static struct ns_common *userns_get(struct task_struct *task)
1306 {
1307 struct user_namespace *user_ns;
1308
1309 rcu_read_lock();
1310 user_ns = get_user_ns(__task_cred(task)->user_ns);
1311 rcu_read_unlock();
1312
1313 return user_ns ? &user_ns->ns : NULL;
1314 }
1315
1316 static void userns_put(struct ns_common *ns)
1317 {
1318 put_user_ns(to_user_ns(ns));
1319 }
1320
1321 static int userns_install(struct nsset *nsset, struct ns_common *ns)
1322 {
1323 struct user_namespace *user_ns = to_user_ns(ns);
1324 struct cred *cred;
1325
1326 /* Don't allow gaining capabilities by reentering
1327 * the same user namespace.
1328 */
1329 if (user_ns == current_user_ns())
1330 return -EINVAL;
1331
1332 /* Tasks that share a thread group must share a user namespace */
1333 if (!thread_group_empty(current))
1334 return -EINVAL;
1335
1336 if (current->fs->users != 1)
1337 return -EINVAL;
1338
1339 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
1340 return -EPERM;
1341
1342 cred = nsset_cred(nsset);
1343 if (!cred)
1344 return -EINVAL;
1345
1346 put_user_ns(cred->user_ns);
1347 set_cred_user_ns(cred, get_user_ns(user_ns));
1348
1349 return 0;
1350 }
1351
1352 struct ns_common *ns_get_owner(struct ns_common *ns)
1353 {
1354 struct user_namespace *my_user_ns = current_user_ns();
1355 struct user_namespace *owner, *p;
1356
1357 /* See if the owner is in the current user namespace */
1358 owner = p = ns->ops->owner(ns);
1359 for (;;) {
1360 if (!p)
1361 return ERR_PTR(-EPERM);
1362 if (p == my_user_ns)
1363 break;
1364 p = p->parent;
1365 }
1366
1367 return &get_user_ns(owner)->ns;
1368 }
1369
1370 static struct user_namespace *userns_owner(struct ns_common *ns)
1371 {
1372 return to_user_ns(ns)->parent;
1373 }
1374
1375 const struct proc_ns_operations userns_operations = {
1376 .name = "user",
1377 .type = CLONE_NEWUSER,
1378 .get = userns_get,
1379 .put = userns_put,
1380 .install = userns_install,
1381 .owner = userns_owner,
1382 .get_parent = ns_get_owner,
1383 };
1384
1385 static __init int user_namespaces_init(void)
1386 {
1387 user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
1388 return 0;
1389 }
1390 subsys_initcall(user_namespaces_init);