]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/user_namespace.c
Merge tag '9p-for-5.15-rc1' of git://github.com/martinetd/linux
[mirror_ubuntu-jammy-kernel.git] / kernel / user_namespace.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/export.h>
4 #include <linux/nsproxy.h>
5 #include <linux/slab.h>
6 #include <linux/sched/signal.h>
7 #include <linux/user_namespace.h>
8 #include <linux/proc_ns.h>
9 #include <linux/highuid.h>
10 #include <linux/cred.h>
11 #include <linux/securebits.h>
12 #include <linux/keyctl.h>
13 #include <linux/key-type.h>
14 #include <keys/user-type.h>
15 #include <linux/seq_file.h>
16 #include <linux/fs.h>
17 #include <linux/uaccess.h>
18 #include <linux/ctype.h>
19 #include <linux/projid.h>
20 #include <linux/fs_struct.h>
21 #include <linux/bsearch.h>
22 #include <linux/sort.h>
23
24 static struct kmem_cache *user_ns_cachep __read_mostly;
25 static DEFINE_MUTEX(userns_state_mutex);
26
27 static bool new_idmap_permitted(const struct file *file,
28 struct user_namespace *ns, int cap_setid,
29 struct uid_gid_map *map);
30 static void free_user_ns(struct work_struct *work);
31
32 static struct ucounts *inc_user_namespaces(struct user_namespace *ns, kuid_t uid)
33 {
34 return inc_ucount(ns, uid, UCOUNT_USER_NAMESPACES);
35 }
36
37 static void dec_user_namespaces(struct ucounts *ucounts)
38 {
39 return dec_ucount(ucounts, UCOUNT_USER_NAMESPACES);
40 }
41
42 static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
43 {
44 /* Start with the same capabilities as init but useless for doing
45 * anything as the capabilities are bound to the new user namespace.
46 */
47 cred->securebits = SECUREBITS_DEFAULT;
48 cred->cap_inheritable = CAP_EMPTY_SET;
49 cred->cap_permitted = CAP_FULL_SET;
50 cred->cap_effective = CAP_FULL_SET;
51 cred->cap_ambient = CAP_EMPTY_SET;
52 cred->cap_bset = CAP_FULL_SET;
53 #ifdef CONFIG_KEYS
54 key_put(cred->request_key_auth);
55 cred->request_key_auth = NULL;
56 #endif
57 /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
58 cred->user_ns = user_ns;
59 }
60
61 /*
62 * Create a new user namespace, deriving the creator from the user in the
63 * passed credentials, and replacing that user with the new root user for the
64 * new namespace.
65 *
66 * This is called by copy_creds(), which will finish setting the target task's
67 * credentials.
68 */
69 int create_user_ns(struct cred *new)
70 {
71 struct user_namespace *ns, *parent_ns = new->user_ns;
72 kuid_t owner = new->euid;
73 kgid_t group = new->egid;
74 struct ucounts *ucounts;
75 int ret, i;
76
77 ret = -ENOSPC;
78 if (parent_ns->level > 32)
79 goto fail;
80
81 ucounts = inc_user_namespaces(parent_ns, owner);
82 if (!ucounts)
83 goto fail;
84
85 /*
86 * Verify that we can not violate the policy of which files
87 * may be accessed that is specified by the root directory,
88 * by verifying that the root directory is at the root of the
89 * mount namespace which allows all files to be accessed.
90 */
91 ret = -EPERM;
92 if (current_chrooted())
93 goto fail_dec;
94
95 /* The creator needs a mapping in the parent user namespace
96 * or else we won't be able to reasonably tell userspace who
97 * created a user_namespace.
98 */
99 ret = -EPERM;
100 if (!kuid_has_mapping(parent_ns, owner) ||
101 !kgid_has_mapping(parent_ns, group))
102 goto fail_dec;
103
104 ret = -ENOMEM;
105 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
106 if (!ns)
107 goto fail_dec;
108
109 ns->parent_could_setfcap = cap_raised(new->cap_effective, CAP_SETFCAP);
110 ret = ns_alloc_inum(&ns->ns);
111 if (ret)
112 goto fail_free;
113 ns->ns.ops = &userns_operations;
114
115 refcount_set(&ns->ns.count, 1);
116 /* Leave the new->user_ns reference with the new user namespace. */
117 ns->parent = parent_ns;
118 ns->level = parent_ns->level + 1;
119 ns->owner = owner;
120 ns->group = group;
121 INIT_WORK(&ns->work, free_user_ns);
122 for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++) {
123 ns->ucount_max[i] = INT_MAX;
124 }
125 set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC));
126 set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_MSGQUEUE, rlimit(RLIMIT_MSGQUEUE));
127 set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_SIGPENDING, rlimit(RLIMIT_SIGPENDING));
128 set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_MEMLOCK, rlimit(RLIMIT_MEMLOCK));
129 ns->ucounts = ucounts;
130
131 /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
132 mutex_lock(&userns_state_mutex);
133 ns->flags = parent_ns->flags;
134 mutex_unlock(&userns_state_mutex);
135
136 #ifdef CONFIG_KEYS
137 INIT_LIST_HEAD(&ns->keyring_name_list);
138 init_rwsem(&ns->keyring_sem);
139 #endif
140 ret = -ENOMEM;
141 if (!setup_userns_sysctls(ns))
142 goto fail_keyring;
143
144 set_cred_user_ns(new, ns);
145 return 0;
146 fail_keyring:
147 #ifdef CONFIG_PERSISTENT_KEYRINGS
148 key_put(ns->persistent_keyring_register);
149 #endif
150 ns_free_inum(&ns->ns);
151 fail_free:
152 kmem_cache_free(user_ns_cachep, ns);
153 fail_dec:
154 dec_user_namespaces(ucounts);
155 fail:
156 return ret;
157 }
158
159 int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
160 {
161 struct cred *cred;
162 int err = -ENOMEM;
163
164 if (!(unshare_flags & CLONE_NEWUSER))
165 return 0;
166
167 cred = prepare_creds();
168 if (cred) {
169 err = create_user_ns(cred);
170 if (err)
171 put_cred(cred);
172 else
173 *new_cred = cred;
174 }
175
176 return err;
177 }
178
179 static void free_user_ns(struct work_struct *work)
180 {
181 struct user_namespace *parent, *ns =
182 container_of(work, struct user_namespace, work);
183
184 do {
185 struct ucounts *ucounts = ns->ucounts;
186 parent = ns->parent;
187 if (ns->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
188 kfree(ns->gid_map.forward);
189 kfree(ns->gid_map.reverse);
190 }
191 if (ns->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
192 kfree(ns->uid_map.forward);
193 kfree(ns->uid_map.reverse);
194 }
195 if (ns->projid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
196 kfree(ns->projid_map.forward);
197 kfree(ns->projid_map.reverse);
198 }
199 retire_userns_sysctls(ns);
200 key_free_user_ns(ns);
201 ns_free_inum(&ns->ns);
202 kmem_cache_free(user_ns_cachep, ns);
203 dec_user_namespaces(ucounts);
204 ns = parent;
205 } while (refcount_dec_and_test(&parent->ns.count));
206 }
207
208 void __put_user_ns(struct user_namespace *ns)
209 {
210 schedule_work(&ns->work);
211 }
212 EXPORT_SYMBOL(__put_user_ns);
213
214 /**
215 * idmap_key struct holds the information necessary to find an idmapping in a
216 * sorted idmap array. It is passed to cmp_map_id() as first argument.
217 */
218 struct idmap_key {
219 bool map_up; /* true -> id from kid; false -> kid from id */
220 u32 id; /* id to find */
221 u32 count; /* == 0 unless used with map_id_range_down() */
222 };
223
224 /**
225 * cmp_map_id - Function to be passed to bsearch() to find the requested
226 * idmapping. Expects struct idmap_key to be passed via @k.
227 */
228 static int cmp_map_id(const void *k, const void *e)
229 {
230 u32 first, last, id2;
231 const struct idmap_key *key = k;
232 const struct uid_gid_extent *el = e;
233
234 id2 = key->id + key->count - 1;
235
236 /* handle map_id_{down,up}() */
237 if (key->map_up)
238 first = el->lower_first;
239 else
240 first = el->first;
241
242 last = first + el->count - 1;
243
244 if (key->id >= first && key->id <= last &&
245 (id2 >= first && id2 <= last))
246 return 0;
247
248 if (key->id < first || id2 < first)
249 return -1;
250
251 return 1;
252 }
253
254 /**
255 * map_id_range_down_max - Find idmap via binary search in ordered idmap array.
256 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
257 */
258 static struct uid_gid_extent *
259 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
260 {
261 struct idmap_key key;
262
263 key.map_up = false;
264 key.count = count;
265 key.id = id;
266
267 return bsearch(&key, map->forward, extents,
268 sizeof(struct uid_gid_extent), cmp_map_id);
269 }
270
271 /**
272 * map_id_range_down_base - Find idmap via binary search in static extent array.
273 * Can only be called if number of mappings is equal or less than
274 * UID_GID_MAP_MAX_BASE_EXTENTS.
275 */
276 static struct uid_gid_extent *
277 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
278 {
279 unsigned idx;
280 u32 first, last, id2;
281
282 id2 = id + count - 1;
283
284 /* Find the matching extent */
285 for (idx = 0; idx < extents; idx++) {
286 first = map->extent[idx].first;
287 last = first + map->extent[idx].count - 1;
288 if (id >= first && id <= last &&
289 (id2 >= first && id2 <= last))
290 return &map->extent[idx];
291 }
292 return NULL;
293 }
294
295 static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
296 {
297 struct uid_gid_extent *extent;
298 unsigned extents = map->nr_extents;
299 smp_rmb();
300
301 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
302 extent = map_id_range_down_base(extents, map, id, count);
303 else
304 extent = map_id_range_down_max(extents, map, id, count);
305
306 /* Map the id or note failure */
307 if (extent)
308 id = (id - extent->first) + extent->lower_first;
309 else
310 id = (u32) -1;
311
312 return id;
313 }
314
315 static u32 map_id_down(struct uid_gid_map *map, u32 id)
316 {
317 return map_id_range_down(map, id, 1);
318 }
319
320 /**
321 * map_id_up_base - Find idmap via binary search in static extent array.
322 * Can only be called if number of mappings is equal or less than
323 * UID_GID_MAP_MAX_BASE_EXTENTS.
324 */
325 static struct uid_gid_extent *
326 map_id_up_base(unsigned extents, struct uid_gid_map *map, u32 id)
327 {
328 unsigned idx;
329 u32 first, last;
330
331 /* Find the matching extent */
332 for (idx = 0; idx < extents; idx++) {
333 first = map->extent[idx].lower_first;
334 last = first + map->extent[idx].count - 1;
335 if (id >= first && id <= last)
336 return &map->extent[idx];
337 }
338 return NULL;
339 }
340
341 /**
342 * map_id_up_max - Find idmap via binary search in ordered idmap array.
343 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
344 */
345 static struct uid_gid_extent *
346 map_id_up_max(unsigned extents, struct uid_gid_map *map, u32 id)
347 {
348 struct idmap_key key;
349
350 key.map_up = true;
351 key.count = 1;
352 key.id = id;
353
354 return bsearch(&key, map->reverse, extents,
355 sizeof(struct uid_gid_extent), cmp_map_id);
356 }
357
358 static u32 map_id_up(struct uid_gid_map *map, u32 id)
359 {
360 struct uid_gid_extent *extent;
361 unsigned extents = map->nr_extents;
362 smp_rmb();
363
364 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
365 extent = map_id_up_base(extents, map, id);
366 else
367 extent = map_id_up_max(extents, map, id);
368
369 /* Map the id or note failure */
370 if (extent)
371 id = (id - extent->lower_first) + extent->first;
372 else
373 id = (u32) -1;
374
375 return id;
376 }
377
378 /**
379 * make_kuid - Map a user-namespace uid pair into a kuid.
380 * @ns: User namespace that the uid is in
381 * @uid: User identifier
382 *
383 * Maps a user-namespace uid pair into a kernel internal kuid,
384 * and returns that kuid.
385 *
386 * When there is no mapping defined for the user-namespace uid
387 * pair INVALID_UID is returned. Callers are expected to test
388 * for and handle INVALID_UID being returned. INVALID_UID
389 * may be tested for using uid_valid().
390 */
391 kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
392 {
393 /* Map the uid to a global kernel uid */
394 return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
395 }
396 EXPORT_SYMBOL(make_kuid);
397
398 /**
399 * from_kuid - Create a uid from a kuid user-namespace pair.
400 * @targ: The user namespace we want a uid in.
401 * @kuid: The kernel internal uid to start with.
402 *
403 * Map @kuid into the user-namespace specified by @targ and
404 * return the resulting uid.
405 *
406 * There is always a mapping into the initial user_namespace.
407 *
408 * If @kuid has no mapping in @targ (uid_t)-1 is returned.
409 */
410 uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
411 {
412 /* Map the uid from a global kernel uid */
413 return map_id_up(&targ->uid_map, __kuid_val(kuid));
414 }
415 EXPORT_SYMBOL(from_kuid);
416
417 /**
418 * from_kuid_munged - Create a uid from a kuid user-namespace pair.
419 * @targ: The user namespace we want a uid in.
420 * @kuid: The kernel internal uid to start with.
421 *
422 * Map @kuid into the user-namespace specified by @targ and
423 * return the resulting uid.
424 *
425 * There is always a mapping into the initial user_namespace.
426 *
427 * Unlike from_kuid from_kuid_munged never fails and always
428 * returns a valid uid. This makes from_kuid_munged appropriate
429 * for use in syscalls like stat and getuid where failing the
430 * system call and failing to provide a valid uid are not an
431 * options.
432 *
433 * If @kuid has no mapping in @targ overflowuid is returned.
434 */
435 uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
436 {
437 uid_t uid;
438 uid = from_kuid(targ, kuid);
439
440 if (uid == (uid_t) -1)
441 uid = overflowuid;
442 return uid;
443 }
444 EXPORT_SYMBOL(from_kuid_munged);
445
446 /**
447 * make_kgid - Map a user-namespace gid pair into a kgid.
448 * @ns: User namespace that the gid is in
449 * @gid: group identifier
450 *
451 * Maps a user-namespace gid pair into a kernel internal kgid,
452 * and returns that kgid.
453 *
454 * When there is no mapping defined for the user-namespace gid
455 * pair INVALID_GID is returned. Callers are expected to test
456 * for and handle INVALID_GID being returned. INVALID_GID may be
457 * tested for using gid_valid().
458 */
459 kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
460 {
461 /* Map the gid to a global kernel gid */
462 return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
463 }
464 EXPORT_SYMBOL(make_kgid);
465
466 /**
467 * from_kgid - Create a gid from a kgid user-namespace pair.
468 * @targ: The user namespace we want a gid in.
469 * @kgid: The kernel internal gid to start with.
470 *
471 * Map @kgid into the user-namespace specified by @targ and
472 * return the resulting gid.
473 *
474 * There is always a mapping into the initial user_namespace.
475 *
476 * If @kgid has no mapping in @targ (gid_t)-1 is returned.
477 */
478 gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
479 {
480 /* Map the gid from a global kernel gid */
481 return map_id_up(&targ->gid_map, __kgid_val(kgid));
482 }
483 EXPORT_SYMBOL(from_kgid);
484
485 /**
486 * from_kgid_munged - Create a gid from a kgid user-namespace pair.
487 * @targ: The user namespace we want a gid in.
488 * @kgid: The kernel internal gid to start with.
489 *
490 * Map @kgid into the user-namespace specified by @targ and
491 * return the resulting gid.
492 *
493 * There is always a mapping into the initial user_namespace.
494 *
495 * Unlike from_kgid from_kgid_munged never fails and always
496 * returns a valid gid. This makes from_kgid_munged appropriate
497 * for use in syscalls like stat and getgid where failing the
498 * system call and failing to provide a valid gid are not options.
499 *
500 * If @kgid has no mapping in @targ overflowgid is returned.
501 */
502 gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
503 {
504 gid_t gid;
505 gid = from_kgid(targ, kgid);
506
507 if (gid == (gid_t) -1)
508 gid = overflowgid;
509 return gid;
510 }
511 EXPORT_SYMBOL(from_kgid_munged);
512
513 /**
514 * make_kprojid - Map a user-namespace projid pair into a kprojid.
515 * @ns: User namespace that the projid is in
516 * @projid: Project identifier
517 *
518 * Maps a user-namespace uid pair into a kernel internal kuid,
519 * and returns that kuid.
520 *
521 * When there is no mapping defined for the user-namespace projid
522 * pair INVALID_PROJID is returned. Callers are expected to test
523 * for and handle INVALID_PROJID being returned. INVALID_PROJID
524 * may be tested for using projid_valid().
525 */
526 kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
527 {
528 /* Map the uid to a global kernel uid */
529 return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
530 }
531 EXPORT_SYMBOL(make_kprojid);
532
533 /**
534 * from_kprojid - Create a projid from a kprojid user-namespace pair.
535 * @targ: The user namespace we want a projid in.
536 * @kprojid: The kernel internal project identifier to start with.
537 *
538 * Map @kprojid into the user-namespace specified by @targ and
539 * return the resulting projid.
540 *
541 * There is always a mapping into the initial user_namespace.
542 *
543 * If @kprojid has no mapping in @targ (projid_t)-1 is returned.
544 */
545 projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
546 {
547 /* Map the uid from a global kernel uid */
548 return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
549 }
550 EXPORT_SYMBOL(from_kprojid);
551
552 /**
553 * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
554 * @targ: The user namespace we want a projid in.
555 * @kprojid: The kernel internal projid to start with.
556 *
557 * Map @kprojid into the user-namespace specified by @targ and
558 * return the resulting projid.
559 *
560 * There is always a mapping into the initial user_namespace.
561 *
562 * Unlike from_kprojid from_kprojid_munged never fails and always
563 * returns a valid projid. This makes from_kprojid_munged
564 * appropriate for use in syscalls like stat and where
565 * failing the system call and failing to provide a valid projid are
566 * not an options.
567 *
568 * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
569 */
570 projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
571 {
572 projid_t projid;
573 projid = from_kprojid(targ, kprojid);
574
575 if (projid == (projid_t) -1)
576 projid = OVERFLOW_PROJID;
577 return projid;
578 }
579 EXPORT_SYMBOL(from_kprojid_munged);
580
581
582 static int uid_m_show(struct seq_file *seq, void *v)
583 {
584 struct user_namespace *ns = seq->private;
585 struct uid_gid_extent *extent = v;
586 struct user_namespace *lower_ns;
587 uid_t lower;
588
589 lower_ns = seq_user_ns(seq);
590 if ((lower_ns == ns) && lower_ns->parent)
591 lower_ns = lower_ns->parent;
592
593 lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
594
595 seq_printf(seq, "%10u %10u %10u\n",
596 extent->first,
597 lower,
598 extent->count);
599
600 return 0;
601 }
602
603 static int gid_m_show(struct seq_file *seq, void *v)
604 {
605 struct user_namespace *ns = seq->private;
606 struct uid_gid_extent *extent = v;
607 struct user_namespace *lower_ns;
608 gid_t lower;
609
610 lower_ns = seq_user_ns(seq);
611 if ((lower_ns == ns) && lower_ns->parent)
612 lower_ns = lower_ns->parent;
613
614 lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
615
616 seq_printf(seq, "%10u %10u %10u\n",
617 extent->first,
618 lower,
619 extent->count);
620
621 return 0;
622 }
623
624 static int projid_m_show(struct seq_file *seq, void *v)
625 {
626 struct user_namespace *ns = seq->private;
627 struct uid_gid_extent *extent = v;
628 struct user_namespace *lower_ns;
629 projid_t lower;
630
631 lower_ns = seq_user_ns(seq);
632 if ((lower_ns == ns) && lower_ns->parent)
633 lower_ns = lower_ns->parent;
634
635 lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
636
637 seq_printf(seq, "%10u %10u %10u\n",
638 extent->first,
639 lower,
640 extent->count);
641
642 return 0;
643 }
644
645 static void *m_start(struct seq_file *seq, loff_t *ppos,
646 struct uid_gid_map *map)
647 {
648 loff_t pos = *ppos;
649 unsigned extents = map->nr_extents;
650 smp_rmb();
651
652 if (pos >= extents)
653 return NULL;
654
655 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
656 return &map->extent[pos];
657
658 return &map->forward[pos];
659 }
660
661 static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
662 {
663 struct user_namespace *ns = seq->private;
664
665 return m_start(seq, ppos, &ns->uid_map);
666 }
667
668 static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
669 {
670 struct user_namespace *ns = seq->private;
671
672 return m_start(seq, ppos, &ns->gid_map);
673 }
674
675 static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
676 {
677 struct user_namespace *ns = seq->private;
678
679 return m_start(seq, ppos, &ns->projid_map);
680 }
681
682 static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
683 {
684 (*pos)++;
685 return seq->op->start(seq, pos);
686 }
687
688 static void m_stop(struct seq_file *seq, void *v)
689 {
690 return;
691 }
692
693 const struct seq_operations proc_uid_seq_operations = {
694 .start = uid_m_start,
695 .stop = m_stop,
696 .next = m_next,
697 .show = uid_m_show,
698 };
699
700 const struct seq_operations proc_gid_seq_operations = {
701 .start = gid_m_start,
702 .stop = m_stop,
703 .next = m_next,
704 .show = gid_m_show,
705 };
706
707 const struct seq_operations proc_projid_seq_operations = {
708 .start = projid_m_start,
709 .stop = m_stop,
710 .next = m_next,
711 .show = projid_m_show,
712 };
713
714 static bool mappings_overlap(struct uid_gid_map *new_map,
715 struct uid_gid_extent *extent)
716 {
717 u32 upper_first, lower_first, upper_last, lower_last;
718 unsigned idx;
719
720 upper_first = extent->first;
721 lower_first = extent->lower_first;
722 upper_last = upper_first + extent->count - 1;
723 lower_last = lower_first + extent->count - 1;
724
725 for (idx = 0; idx < new_map->nr_extents; idx++) {
726 u32 prev_upper_first, prev_lower_first;
727 u32 prev_upper_last, prev_lower_last;
728 struct uid_gid_extent *prev;
729
730 if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
731 prev = &new_map->extent[idx];
732 else
733 prev = &new_map->forward[idx];
734
735 prev_upper_first = prev->first;
736 prev_lower_first = prev->lower_first;
737 prev_upper_last = prev_upper_first + prev->count - 1;
738 prev_lower_last = prev_lower_first + prev->count - 1;
739
740 /* Does the upper range intersect a previous extent? */
741 if ((prev_upper_first <= upper_last) &&
742 (prev_upper_last >= upper_first))
743 return true;
744
745 /* Does the lower range intersect a previous extent? */
746 if ((prev_lower_first <= lower_last) &&
747 (prev_lower_last >= lower_first))
748 return true;
749 }
750 return false;
751 }
752
753 /**
754 * insert_extent - Safely insert a new idmap extent into struct uid_gid_map.
755 * Takes care to allocate a 4K block of memory if the number of mappings exceeds
756 * UID_GID_MAP_MAX_BASE_EXTENTS.
757 */
758 static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
759 {
760 struct uid_gid_extent *dest;
761
762 if (map->nr_extents == UID_GID_MAP_MAX_BASE_EXTENTS) {
763 struct uid_gid_extent *forward;
764
765 /* Allocate memory for 340 mappings. */
766 forward = kmalloc_array(UID_GID_MAP_MAX_EXTENTS,
767 sizeof(struct uid_gid_extent),
768 GFP_KERNEL);
769 if (!forward)
770 return -ENOMEM;
771
772 /* Copy over memory. Only set up memory for the forward pointer.
773 * Defer the memory setup for the reverse pointer.
774 */
775 memcpy(forward, map->extent,
776 map->nr_extents * sizeof(map->extent[0]));
777
778 map->forward = forward;
779 map->reverse = NULL;
780 }
781
782 if (map->nr_extents < UID_GID_MAP_MAX_BASE_EXTENTS)
783 dest = &map->extent[map->nr_extents];
784 else
785 dest = &map->forward[map->nr_extents];
786
787 *dest = *extent;
788 map->nr_extents++;
789 return 0;
790 }
791
792 /* cmp function to sort() forward mappings */
793 static int cmp_extents_forward(const void *a, const void *b)
794 {
795 const struct uid_gid_extent *e1 = a;
796 const struct uid_gid_extent *e2 = b;
797
798 if (e1->first < e2->first)
799 return -1;
800
801 if (e1->first > e2->first)
802 return 1;
803
804 return 0;
805 }
806
807 /* cmp function to sort() reverse mappings */
808 static int cmp_extents_reverse(const void *a, const void *b)
809 {
810 const struct uid_gid_extent *e1 = a;
811 const struct uid_gid_extent *e2 = b;
812
813 if (e1->lower_first < e2->lower_first)
814 return -1;
815
816 if (e1->lower_first > e2->lower_first)
817 return 1;
818
819 return 0;
820 }
821
822 /**
823 * sort_idmaps - Sorts an array of idmap entries.
824 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
825 */
826 static int sort_idmaps(struct uid_gid_map *map)
827 {
828 if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
829 return 0;
830
831 /* Sort forward array. */
832 sort(map->forward, map->nr_extents, sizeof(struct uid_gid_extent),
833 cmp_extents_forward, NULL);
834
835 /* Only copy the memory from forward we actually need. */
836 map->reverse = kmemdup(map->forward,
837 map->nr_extents * sizeof(struct uid_gid_extent),
838 GFP_KERNEL);
839 if (!map->reverse)
840 return -ENOMEM;
841
842 /* Sort reverse array. */
843 sort(map->reverse, map->nr_extents, sizeof(struct uid_gid_extent),
844 cmp_extents_reverse, NULL);
845
846 return 0;
847 }
848
849 /**
850 * verify_root_map() - check the uid 0 mapping
851 * @file: idmapping file
852 * @map_ns: user namespace of the target process
853 * @new_map: requested idmap
854 *
855 * If a process requests mapping parent uid 0 into the new ns, verify that the
856 * process writing the map had the CAP_SETFCAP capability as the target process
857 * will be able to write fscaps that are valid in ancestor user namespaces.
858 *
859 * Return: true if the mapping is allowed, false if not.
860 */
861 static bool verify_root_map(const struct file *file,
862 struct user_namespace *map_ns,
863 struct uid_gid_map *new_map)
864 {
865 int idx;
866 const struct user_namespace *file_ns = file->f_cred->user_ns;
867 struct uid_gid_extent *extent0 = NULL;
868
869 for (idx = 0; idx < new_map->nr_extents; idx++) {
870 if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
871 extent0 = &new_map->extent[idx];
872 else
873 extent0 = &new_map->forward[idx];
874 if (extent0->lower_first == 0)
875 break;
876
877 extent0 = NULL;
878 }
879
880 if (!extent0)
881 return true;
882
883 if (map_ns == file_ns) {
884 /* The process unshared its ns and is writing to its own
885 * /proc/self/uid_map. User already has full capabilites in
886 * the new namespace. Verify that the parent had CAP_SETFCAP
887 * when it unshared.
888 * */
889 if (!file_ns->parent_could_setfcap)
890 return false;
891 } else {
892 /* Process p1 is writing to uid_map of p2, who is in a child
893 * user namespace to p1's. Verify that the opener of the map
894 * file has CAP_SETFCAP against the parent of the new map
895 * namespace */
896 if (!file_ns_capable(file, map_ns->parent, CAP_SETFCAP))
897 return false;
898 }
899
900 return true;
901 }
902
903 static ssize_t map_write(struct file *file, const char __user *buf,
904 size_t count, loff_t *ppos,
905 int cap_setid,
906 struct uid_gid_map *map,
907 struct uid_gid_map *parent_map)
908 {
909 struct seq_file *seq = file->private_data;
910 struct user_namespace *map_ns = seq->private;
911 struct uid_gid_map new_map;
912 unsigned idx;
913 struct uid_gid_extent extent;
914 char *kbuf = NULL, *pos, *next_line;
915 ssize_t ret;
916
917 /* Only allow < page size writes at the beginning of the file */
918 if ((*ppos != 0) || (count >= PAGE_SIZE))
919 return -EINVAL;
920
921 /* Slurp in the user data */
922 kbuf = memdup_user_nul(buf, count);
923 if (IS_ERR(kbuf))
924 return PTR_ERR(kbuf);
925
926 /*
927 * The userns_state_mutex serializes all writes to any given map.
928 *
929 * Any map is only ever written once.
930 *
931 * An id map fits within 1 cache line on most architectures.
932 *
933 * On read nothing needs to be done unless you are on an
934 * architecture with a crazy cache coherency model like alpha.
935 *
936 * There is a one time data dependency between reading the
937 * count of the extents and the values of the extents. The
938 * desired behavior is to see the values of the extents that
939 * were written before the count of the extents.
940 *
941 * To achieve this smp_wmb() is used on guarantee the write
942 * order and smp_rmb() is guaranteed that we don't have crazy
943 * architectures returning stale data.
944 */
945 mutex_lock(&userns_state_mutex);
946
947 memset(&new_map, 0, sizeof(struct uid_gid_map));
948
949 ret = -EPERM;
950 /* Only allow one successful write to the map */
951 if (map->nr_extents != 0)
952 goto out;
953
954 /*
955 * Adjusting namespace settings requires capabilities on the target.
956 */
957 if (cap_valid(cap_setid) && !file_ns_capable(file, map_ns, CAP_SYS_ADMIN))
958 goto out;
959
960 /* Parse the user data */
961 ret = -EINVAL;
962 pos = kbuf;
963 for (; pos; pos = next_line) {
964
965 /* Find the end of line and ensure I don't look past it */
966 next_line = strchr(pos, '\n');
967 if (next_line) {
968 *next_line = '\0';
969 next_line++;
970 if (*next_line == '\0')
971 next_line = NULL;
972 }
973
974 pos = skip_spaces(pos);
975 extent.first = simple_strtoul(pos, &pos, 10);
976 if (!isspace(*pos))
977 goto out;
978
979 pos = skip_spaces(pos);
980 extent.lower_first = simple_strtoul(pos, &pos, 10);
981 if (!isspace(*pos))
982 goto out;
983
984 pos = skip_spaces(pos);
985 extent.count = simple_strtoul(pos, &pos, 10);
986 if (*pos && !isspace(*pos))
987 goto out;
988
989 /* Verify there is not trailing junk on the line */
990 pos = skip_spaces(pos);
991 if (*pos != '\0')
992 goto out;
993
994 /* Verify we have been given valid starting values */
995 if ((extent.first == (u32) -1) ||
996 (extent.lower_first == (u32) -1))
997 goto out;
998
999 /* Verify count is not zero and does not cause the
1000 * extent to wrap
1001 */
1002 if ((extent.first + extent.count) <= extent.first)
1003 goto out;
1004 if ((extent.lower_first + extent.count) <=
1005 extent.lower_first)
1006 goto out;
1007
1008 /* Do the ranges in extent overlap any previous extents? */
1009 if (mappings_overlap(&new_map, &extent))
1010 goto out;
1011
1012 if ((new_map.nr_extents + 1) == UID_GID_MAP_MAX_EXTENTS &&
1013 (next_line != NULL))
1014 goto out;
1015
1016 ret = insert_extent(&new_map, &extent);
1017 if (ret < 0)
1018 goto out;
1019 ret = -EINVAL;
1020 }
1021 /* Be very certain the new map actually exists */
1022 if (new_map.nr_extents == 0)
1023 goto out;
1024
1025 ret = -EPERM;
1026 /* Validate the user is allowed to use user id's mapped to. */
1027 if (!new_idmap_permitted(file, map_ns, cap_setid, &new_map))
1028 goto out;
1029
1030 ret = -EPERM;
1031 /* Map the lower ids from the parent user namespace to the
1032 * kernel global id space.
1033 */
1034 for (idx = 0; idx < new_map.nr_extents; idx++) {
1035 struct uid_gid_extent *e;
1036 u32 lower_first;
1037
1038 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
1039 e = &new_map.extent[idx];
1040 else
1041 e = &new_map.forward[idx];
1042
1043 lower_first = map_id_range_down(parent_map,
1044 e->lower_first,
1045 e->count);
1046
1047 /* Fail if we can not map the specified extent to
1048 * the kernel global id space.
1049 */
1050 if (lower_first == (u32) -1)
1051 goto out;
1052
1053 e->lower_first = lower_first;
1054 }
1055
1056 /*
1057 * If we want to use binary search for lookup, this clones the extent
1058 * array and sorts both copies.
1059 */
1060 ret = sort_idmaps(&new_map);
1061 if (ret < 0)
1062 goto out;
1063
1064 /* Install the map */
1065 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
1066 memcpy(map->extent, new_map.extent,
1067 new_map.nr_extents * sizeof(new_map.extent[0]));
1068 } else {
1069 map->forward = new_map.forward;
1070 map->reverse = new_map.reverse;
1071 }
1072 smp_wmb();
1073 map->nr_extents = new_map.nr_extents;
1074
1075 *ppos = count;
1076 ret = count;
1077 out:
1078 if (ret < 0 && new_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
1079 kfree(new_map.forward);
1080 kfree(new_map.reverse);
1081 map->forward = NULL;
1082 map->reverse = NULL;
1083 map->nr_extents = 0;
1084 }
1085
1086 mutex_unlock(&userns_state_mutex);
1087 kfree(kbuf);
1088 return ret;
1089 }
1090
1091 ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
1092 size_t size, loff_t *ppos)
1093 {
1094 struct seq_file *seq = file->private_data;
1095 struct user_namespace *ns = seq->private;
1096 struct user_namespace *seq_ns = seq_user_ns(seq);
1097
1098 if (!ns->parent)
1099 return -EPERM;
1100
1101 if ((seq_ns != ns) && (seq_ns != ns->parent))
1102 return -EPERM;
1103
1104 return map_write(file, buf, size, ppos, CAP_SETUID,
1105 &ns->uid_map, &ns->parent->uid_map);
1106 }
1107
1108 ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
1109 size_t size, loff_t *ppos)
1110 {
1111 struct seq_file *seq = file->private_data;
1112 struct user_namespace *ns = seq->private;
1113 struct user_namespace *seq_ns = seq_user_ns(seq);
1114
1115 if (!ns->parent)
1116 return -EPERM;
1117
1118 if ((seq_ns != ns) && (seq_ns != ns->parent))
1119 return -EPERM;
1120
1121 return map_write(file, buf, size, ppos, CAP_SETGID,
1122 &ns->gid_map, &ns->parent->gid_map);
1123 }
1124
1125 ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
1126 size_t size, loff_t *ppos)
1127 {
1128 struct seq_file *seq = file->private_data;
1129 struct user_namespace *ns = seq->private;
1130 struct user_namespace *seq_ns = seq_user_ns(seq);
1131
1132 if (!ns->parent)
1133 return -EPERM;
1134
1135 if ((seq_ns != ns) && (seq_ns != ns->parent))
1136 return -EPERM;
1137
1138 /* Anyone can set any valid project id no capability needed */
1139 return map_write(file, buf, size, ppos, -1,
1140 &ns->projid_map, &ns->parent->projid_map);
1141 }
1142
1143 static bool new_idmap_permitted(const struct file *file,
1144 struct user_namespace *ns, int cap_setid,
1145 struct uid_gid_map *new_map)
1146 {
1147 const struct cred *cred = file->f_cred;
1148
1149 if (cap_setid == CAP_SETUID && !verify_root_map(file, ns, new_map))
1150 return false;
1151
1152 /* Don't allow mappings that would allow anything that wouldn't
1153 * be allowed without the establishment of unprivileged mappings.
1154 */
1155 if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
1156 uid_eq(ns->owner, cred->euid)) {
1157 u32 id = new_map->extent[0].lower_first;
1158 if (cap_setid == CAP_SETUID) {
1159 kuid_t uid = make_kuid(ns->parent, id);
1160 if (uid_eq(uid, cred->euid))
1161 return true;
1162 } else if (cap_setid == CAP_SETGID) {
1163 kgid_t gid = make_kgid(ns->parent, id);
1164 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
1165 gid_eq(gid, cred->egid))
1166 return true;
1167 }
1168 }
1169
1170 /* Allow anyone to set a mapping that doesn't require privilege */
1171 if (!cap_valid(cap_setid))
1172 return true;
1173
1174 /* Allow the specified ids if we have the appropriate capability
1175 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
1176 * And the opener of the id file also has the appropriate capability.
1177 */
1178 if (ns_capable(ns->parent, cap_setid) &&
1179 file_ns_capable(file, ns->parent, cap_setid))
1180 return true;
1181
1182 return false;
1183 }
1184
1185 int proc_setgroups_show(struct seq_file *seq, void *v)
1186 {
1187 struct user_namespace *ns = seq->private;
1188 unsigned long userns_flags = READ_ONCE(ns->flags);
1189
1190 seq_printf(seq, "%s\n",
1191 (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
1192 "allow" : "deny");
1193 return 0;
1194 }
1195
1196 ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
1197 size_t count, loff_t *ppos)
1198 {
1199 struct seq_file *seq = file->private_data;
1200 struct user_namespace *ns = seq->private;
1201 char kbuf[8], *pos;
1202 bool setgroups_allowed;
1203 ssize_t ret;
1204
1205 /* Only allow a very narrow range of strings to be written */
1206 ret = -EINVAL;
1207 if ((*ppos != 0) || (count >= sizeof(kbuf)))
1208 goto out;
1209
1210 /* What was written? */
1211 ret = -EFAULT;
1212 if (copy_from_user(kbuf, buf, count))
1213 goto out;
1214 kbuf[count] = '\0';
1215 pos = kbuf;
1216
1217 /* What is being requested? */
1218 ret = -EINVAL;
1219 if (strncmp(pos, "allow", 5) == 0) {
1220 pos += 5;
1221 setgroups_allowed = true;
1222 }
1223 else if (strncmp(pos, "deny", 4) == 0) {
1224 pos += 4;
1225 setgroups_allowed = false;
1226 }
1227 else
1228 goto out;
1229
1230 /* Verify there is not trailing junk on the line */
1231 pos = skip_spaces(pos);
1232 if (*pos != '\0')
1233 goto out;
1234
1235 ret = -EPERM;
1236 mutex_lock(&userns_state_mutex);
1237 if (setgroups_allowed) {
1238 /* Enabling setgroups after setgroups has been disabled
1239 * is not allowed.
1240 */
1241 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
1242 goto out_unlock;
1243 } else {
1244 /* Permanently disabling setgroups after setgroups has
1245 * been enabled by writing the gid_map is not allowed.
1246 */
1247 if (ns->gid_map.nr_extents != 0)
1248 goto out_unlock;
1249 ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
1250 }
1251 mutex_unlock(&userns_state_mutex);
1252
1253 /* Report a successful write */
1254 *ppos = count;
1255 ret = count;
1256 out:
1257 return ret;
1258 out_unlock:
1259 mutex_unlock(&userns_state_mutex);
1260 goto out;
1261 }
1262
1263 bool userns_may_setgroups(const struct user_namespace *ns)
1264 {
1265 bool allowed;
1266
1267 mutex_lock(&userns_state_mutex);
1268 /* It is not safe to use setgroups until a gid mapping in
1269 * the user namespace has been established.
1270 */
1271 allowed = ns->gid_map.nr_extents != 0;
1272 /* Is setgroups allowed? */
1273 allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
1274 mutex_unlock(&userns_state_mutex);
1275
1276 return allowed;
1277 }
1278
1279 /*
1280 * Returns true if @child is the same namespace or a descendant of
1281 * @ancestor.
1282 */
1283 bool in_userns(const struct user_namespace *ancestor,
1284 const struct user_namespace *child)
1285 {
1286 const struct user_namespace *ns;
1287 for (ns = child; ns->level > ancestor->level; ns = ns->parent)
1288 ;
1289 return (ns == ancestor);
1290 }
1291
1292 bool current_in_userns(const struct user_namespace *target_ns)
1293 {
1294 return in_userns(target_ns, current_user_ns());
1295 }
1296 EXPORT_SYMBOL(current_in_userns);
1297
1298 static inline struct user_namespace *to_user_ns(struct ns_common *ns)
1299 {
1300 return container_of(ns, struct user_namespace, ns);
1301 }
1302
1303 static struct ns_common *userns_get(struct task_struct *task)
1304 {
1305 struct user_namespace *user_ns;
1306
1307 rcu_read_lock();
1308 user_ns = get_user_ns(__task_cred(task)->user_ns);
1309 rcu_read_unlock();
1310
1311 return user_ns ? &user_ns->ns : NULL;
1312 }
1313
1314 static void userns_put(struct ns_common *ns)
1315 {
1316 put_user_ns(to_user_ns(ns));
1317 }
1318
1319 static int userns_install(struct nsset *nsset, struct ns_common *ns)
1320 {
1321 struct user_namespace *user_ns = to_user_ns(ns);
1322 struct cred *cred;
1323
1324 /* Don't allow gaining capabilities by reentering
1325 * the same user namespace.
1326 */
1327 if (user_ns == current_user_ns())
1328 return -EINVAL;
1329
1330 /* Tasks that share a thread group must share a user namespace */
1331 if (!thread_group_empty(current))
1332 return -EINVAL;
1333
1334 if (current->fs->users != 1)
1335 return -EINVAL;
1336
1337 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
1338 return -EPERM;
1339
1340 cred = nsset_cred(nsset);
1341 if (!cred)
1342 return -EINVAL;
1343
1344 put_user_ns(cred->user_ns);
1345 set_cred_user_ns(cred, get_user_ns(user_ns));
1346
1347 if (set_cred_ucounts(cred) < 0)
1348 return -EINVAL;
1349
1350 return 0;
1351 }
1352
1353 struct ns_common *ns_get_owner(struct ns_common *ns)
1354 {
1355 struct user_namespace *my_user_ns = current_user_ns();
1356 struct user_namespace *owner, *p;
1357
1358 /* See if the owner is in the current user namespace */
1359 owner = p = ns->ops->owner(ns);
1360 for (;;) {
1361 if (!p)
1362 return ERR_PTR(-EPERM);
1363 if (p == my_user_ns)
1364 break;
1365 p = p->parent;
1366 }
1367
1368 return &get_user_ns(owner)->ns;
1369 }
1370
1371 static struct user_namespace *userns_owner(struct ns_common *ns)
1372 {
1373 return to_user_ns(ns)->parent;
1374 }
1375
1376 const struct proc_ns_operations userns_operations = {
1377 .name = "user",
1378 .type = CLONE_NEWUSER,
1379 .get = userns_get,
1380 .put = userns_put,
1381 .install = userns_install,
1382 .owner = userns_owner,
1383 .get_parent = ns_get_owner,
1384 };
1385
1386 static __init int user_namespaces_init(void)
1387 {
1388 user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC | SLAB_ACCOUNT);
1389 return 0;
1390 }
1391 subsys_initcall(user_namespaces_init);