]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/user.c
Merge tag 'microblaze-3.19-rc1' of git://git.monstr.eu/linux-2.6-microblaze
[mirror_ubuntu-bionic-kernel.git] / kernel / user.c
1 /*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
19 #include <linux/proc_ns.h>
20
21 /*
22 * userns count is 1 for root user, 1 for init_uts_ns,
23 * and 1 for... ?
24 */
25 struct user_namespace init_user_ns = {
26 .uid_map = {
27 .nr_extents = 1,
28 .extent[0] = {
29 .first = 0,
30 .lower_first = 0,
31 .count = 4294967295U,
32 },
33 },
34 .gid_map = {
35 .nr_extents = 1,
36 .extent[0] = {
37 .first = 0,
38 .lower_first = 0,
39 .count = 4294967295U,
40 },
41 },
42 .projid_map = {
43 .nr_extents = 1,
44 .extent[0] = {
45 .first = 0,
46 .lower_first = 0,
47 .count = 4294967295U,
48 },
49 },
50 .count = ATOMIC_INIT(3),
51 .owner = GLOBAL_ROOT_UID,
52 .group = GLOBAL_ROOT_GID,
53 .ns.inum = PROC_USER_INIT_INO,
54 #ifdef CONFIG_USER_NS
55 .ns.ops = &userns_operations,
56 #endif
57 #ifdef CONFIG_PERSISTENT_KEYRINGS
58 .persistent_keyring_register_sem =
59 __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
60 #endif
61 };
62 EXPORT_SYMBOL_GPL(init_user_ns);
63
64 /*
65 * UID task count cache, to get fast user lookup in "alloc_uid"
66 * when changing user ID's (ie setuid() and friends).
67 */
68
69 #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
70 #define UIDHASH_SZ (1 << UIDHASH_BITS)
71 #define UIDHASH_MASK (UIDHASH_SZ - 1)
72 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
73 #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
74
75 static struct kmem_cache *uid_cachep;
76 struct hlist_head uidhash_table[UIDHASH_SZ];
77
78 /*
79 * The uidhash_lock is mostly taken from process context, but it is
80 * occasionally also taken from softirq/tasklet context, when
81 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
82 * But free_uid() is also called with local interrupts disabled, and running
83 * local_bh_enable() with local interrupts disabled is an error - we'll run
84 * softirq callbacks, and they can unconditionally enable interrupts, and
85 * the caller of free_uid() didn't expect that..
86 */
87 static DEFINE_SPINLOCK(uidhash_lock);
88
89 /* root_user.__count is 1, for init task cred */
90 struct user_struct root_user = {
91 .__count = ATOMIC_INIT(1),
92 .processes = ATOMIC_INIT(1),
93 .sigpending = ATOMIC_INIT(0),
94 .locked_shm = 0,
95 .uid = GLOBAL_ROOT_UID,
96 };
97
98 /*
99 * These routines must be called with the uidhash spinlock held!
100 */
101 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
102 {
103 hlist_add_head(&up->uidhash_node, hashent);
104 }
105
106 static void uid_hash_remove(struct user_struct *up)
107 {
108 hlist_del_init(&up->uidhash_node);
109 }
110
111 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
112 {
113 struct user_struct *user;
114
115 hlist_for_each_entry(user, hashent, uidhash_node) {
116 if (uid_eq(user->uid, uid)) {
117 atomic_inc(&user->__count);
118 return user;
119 }
120 }
121
122 return NULL;
123 }
124
125 /* IRQs are disabled and uidhash_lock is held upon function entry.
126 * IRQ state (as stored in flags) is restored and uidhash_lock released
127 * upon function exit.
128 */
129 static void free_user(struct user_struct *up, unsigned long flags)
130 __releases(&uidhash_lock)
131 {
132 uid_hash_remove(up);
133 spin_unlock_irqrestore(&uidhash_lock, flags);
134 key_put(up->uid_keyring);
135 key_put(up->session_keyring);
136 kmem_cache_free(uid_cachep, up);
137 }
138
139 /*
140 * Locate the user_struct for the passed UID. If found, take a ref on it. The
141 * caller must undo that ref with free_uid().
142 *
143 * If the user_struct could not be found, return NULL.
144 */
145 struct user_struct *find_user(kuid_t uid)
146 {
147 struct user_struct *ret;
148 unsigned long flags;
149
150 spin_lock_irqsave(&uidhash_lock, flags);
151 ret = uid_hash_find(uid, uidhashentry(uid));
152 spin_unlock_irqrestore(&uidhash_lock, flags);
153 return ret;
154 }
155
156 void free_uid(struct user_struct *up)
157 {
158 unsigned long flags;
159
160 if (!up)
161 return;
162
163 local_irq_save(flags);
164 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
165 free_user(up, flags);
166 else
167 local_irq_restore(flags);
168 }
169
170 struct user_struct *alloc_uid(kuid_t uid)
171 {
172 struct hlist_head *hashent = uidhashentry(uid);
173 struct user_struct *up, *new;
174
175 spin_lock_irq(&uidhash_lock);
176 up = uid_hash_find(uid, hashent);
177 spin_unlock_irq(&uidhash_lock);
178
179 if (!up) {
180 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
181 if (!new)
182 goto out_unlock;
183
184 new->uid = uid;
185 atomic_set(&new->__count, 1);
186
187 /*
188 * Before adding this, check whether we raced
189 * on adding the same user already..
190 */
191 spin_lock_irq(&uidhash_lock);
192 up = uid_hash_find(uid, hashent);
193 if (up) {
194 key_put(new->uid_keyring);
195 key_put(new->session_keyring);
196 kmem_cache_free(uid_cachep, new);
197 } else {
198 uid_hash_insert(new, hashent);
199 up = new;
200 }
201 spin_unlock_irq(&uidhash_lock);
202 }
203
204 return up;
205
206 out_unlock:
207 return NULL;
208 }
209
210 static int __init uid_cache_init(void)
211 {
212 int n;
213
214 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
215 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
216
217 for(n = 0; n < UIDHASH_SZ; ++n)
218 INIT_HLIST_HEAD(uidhash_table + n);
219
220 /* Insert the root user immediately (init already runs as root) */
221 spin_lock_irq(&uidhash_lock);
222 uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
223 spin_unlock_irq(&uidhash_lock);
224
225 return 0;
226 }
227 subsys_initcall(uid_cache_init);