]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/user.c
user namespaces: document CFS behavior
[mirror_ubuntu-artful-kernel.git] / kernel / user.c
1 /*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
19 #include "cred-internals.h"
20
21 struct user_namespace init_user_ns = {
22 .kref = {
23 .refcount = ATOMIC_INIT(1),
24 },
25 .creator = &root_user,
26 };
27 EXPORT_SYMBOL_GPL(init_user_ns);
28
29 /*
30 * UID task count cache, to get fast user lookup in "alloc_uid"
31 * when changing user ID's (ie setuid() and friends).
32 */
33
34 #define UIDHASH_MASK (UIDHASH_SZ - 1)
35 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
36 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
37
38 static struct kmem_cache *uid_cachep;
39
40 /*
41 * The uidhash_lock is mostly taken from process context, but it is
42 * occasionally also taken from softirq/tasklet context, when
43 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
44 * But free_uid() is also called with local interrupts disabled, and running
45 * local_bh_enable() with local interrupts disabled is an error - we'll run
46 * softirq callbacks, and they can unconditionally enable interrupts, and
47 * the caller of free_uid() didn't expect that..
48 */
49 static DEFINE_SPINLOCK(uidhash_lock);
50
51 /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
52 struct user_struct root_user = {
53 .__count = ATOMIC_INIT(2),
54 .processes = ATOMIC_INIT(1),
55 .files = ATOMIC_INIT(0),
56 .sigpending = ATOMIC_INIT(0),
57 .locked_shm = 0,
58 .user_ns = &init_user_ns,
59 #ifdef CONFIG_USER_SCHED
60 .tg = &init_task_group,
61 #endif
62 };
63
64 /*
65 * These routines must be called with the uidhash spinlock held!
66 */
67 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
68 {
69 hlist_add_head(&up->uidhash_node, hashent);
70 }
71
72 static void uid_hash_remove(struct user_struct *up)
73 {
74 hlist_del_init(&up->uidhash_node);
75 }
76
77 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
78 {
79 struct user_struct *user;
80 struct hlist_node *h;
81
82 hlist_for_each_entry(user, h, hashent, uidhash_node) {
83 if (user->uid == uid) {
84 atomic_inc(&user->__count);
85 return user;
86 }
87 }
88
89 return NULL;
90 }
91
92 #ifdef CONFIG_USER_SCHED
93
94 static void sched_destroy_user(struct user_struct *up)
95 {
96 sched_destroy_group(up->tg);
97 }
98
99 static int sched_create_user(struct user_struct *up)
100 {
101 int rc = 0;
102
103 up->tg = sched_create_group(&root_task_group);
104 if (IS_ERR(up->tg))
105 rc = -ENOMEM;
106
107 return rc;
108 }
109
110 #else /* CONFIG_USER_SCHED */
111
112 static void sched_destroy_user(struct user_struct *up) { }
113 static int sched_create_user(struct user_struct *up) { return 0; }
114
115 #endif /* CONFIG_USER_SCHED */
116
117 #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
118
119 static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
120 static DEFINE_MUTEX(uids_mutex);
121
122 static inline void uids_mutex_lock(void)
123 {
124 mutex_lock(&uids_mutex);
125 }
126
127 static inline void uids_mutex_unlock(void)
128 {
129 mutex_unlock(&uids_mutex);
130 }
131
132 /* uid directory attributes */
133 #ifdef CONFIG_FAIR_GROUP_SCHED
134 static ssize_t cpu_shares_show(struct kobject *kobj,
135 struct kobj_attribute *attr,
136 char *buf)
137 {
138 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
139
140 return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
141 }
142
143 static ssize_t cpu_shares_store(struct kobject *kobj,
144 struct kobj_attribute *attr,
145 const char *buf, size_t size)
146 {
147 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
148 unsigned long shares;
149 int rc;
150
151 sscanf(buf, "%lu", &shares);
152
153 rc = sched_group_set_shares(up->tg, shares);
154
155 return (rc ? rc : size);
156 }
157
158 static struct kobj_attribute cpu_share_attr =
159 __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
160 #endif
161
162 #ifdef CONFIG_RT_GROUP_SCHED
163 static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
164 struct kobj_attribute *attr,
165 char *buf)
166 {
167 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
168
169 return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
170 }
171
172 static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
173 struct kobj_attribute *attr,
174 const char *buf, size_t size)
175 {
176 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
177 unsigned long rt_runtime;
178 int rc;
179
180 sscanf(buf, "%ld", &rt_runtime);
181
182 rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
183
184 return (rc ? rc : size);
185 }
186
187 static struct kobj_attribute cpu_rt_runtime_attr =
188 __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
189
190 static ssize_t cpu_rt_period_show(struct kobject *kobj,
191 struct kobj_attribute *attr,
192 char *buf)
193 {
194 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
195
196 return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
197 }
198
199 static ssize_t cpu_rt_period_store(struct kobject *kobj,
200 struct kobj_attribute *attr,
201 const char *buf, size_t size)
202 {
203 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
204 unsigned long rt_period;
205 int rc;
206
207 sscanf(buf, "%lu", &rt_period);
208
209 rc = sched_group_set_rt_period(up->tg, rt_period);
210
211 return (rc ? rc : size);
212 }
213
214 static struct kobj_attribute cpu_rt_period_attr =
215 __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
216 #endif
217
218 /* default attributes per uid directory */
219 static struct attribute *uids_attributes[] = {
220 #ifdef CONFIG_FAIR_GROUP_SCHED
221 &cpu_share_attr.attr,
222 #endif
223 #ifdef CONFIG_RT_GROUP_SCHED
224 &cpu_rt_runtime_attr.attr,
225 &cpu_rt_period_attr.attr,
226 #endif
227 NULL
228 };
229
230 /* the lifetime of user_struct is not managed by the core (now) */
231 static void uids_release(struct kobject *kobj)
232 {
233 return;
234 }
235
236 static struct kobj_type uids_ktype = {
237 .sysfs_ops = &kobj_sysfs_ops,
238 .default_attrs = uids_attributes,
239 .release = uids_release,
240 };
241
242 /*
243 * Create /sys/kernel/uids/<uid>/cpu_share file for this user
244 * We do not create this file for users in a user namespace (until
245 * sysfs tagging is implemented).
246 *
247 * See Documentation/scheduler/sched-design-CFS.txt for ramifications.
248 */
249 static int uids_user_create(struct user_struct *up)
250 {
251 struct kobject *kobj = &up->kobj;
252 int error;
253
254 memset(kobj, 0, sizeof(struct kobject));
255 if (up->user_ns != &init_user_ns)
256 return 0;
257 kobj->kset = uids_kset;
258 error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
259 if (error) {
260 kobject_put(kobj);
261 goto done;
262 }
263
264 kobject_uevent(kobj, KOBJ_ADD);
265 done:
266 return error;
267 }
268
269 /* create these entries in sysfs:
270 * "/sys/kernel/uids" directory
271 * "/sys/kernel/uids/0" directory (for root user)
272 * "/sys/kernel/uids/0/cpu_share" file (for root user)
273 */
274 int __init uids_sysfs_init(void)
275 {
276 uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
277 if (!uids_kset)
278 return -ENOMEM;
279
280 return uids_user_create(&root_user);
281 }
282
283 /* work function to remove sysfs directory for a user and free up
284 * corresponding structures.
285 */
286 static void remove_user_sysfs_dir(struct work_struct *w)
287 {
288 struct user_struct *up = container_of(w, struct user_struct, work);
289 unsigned long flags;
290 int remove_user = 0;
291
292 if (up->user_ns != &init_user_ns)
293 return;
294 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
295 * atomic.
296 */
297 uids_mutex_lock();
298
299 local_irq_save(flags);
300
301 if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
302 uid_hash_remove(up);
303 remove_user = 1;
304 spin_unlock_irqrestore(&uidhash_lock, flags);
305 } else {
306 local_irq_restore(flags);
307 }
308
309 if (!remove_user)
310 goto done;
311
312 kobject_uevent(&up->kobj, KOBJ_REMOVE);
313 kobject_del(&up->kobj);
314 kobject_put(&up->kobj);
315
316 sched_destroy_user(up);
317 key_put(up->uid_keyring);
318 key_put(up->session_keyring);
319 kmem_cache_free(uid_cachep, up);
320
321 done:
322 uids_mutex_unlock();
323 }
324
325 /* IRQs are disabled and uidhash_lock is held upon function entry.
326 * IRQ state (as stored in flags) is restored and uidhash_lock released
327 * upon function exit.
328 */
329 static void free_user(struct user_struct *up, unsigned long flags)
330 {
331 /* restore back the count */
332 atomic_inc(&up->__count);
333 spin_unlock_irqrestore(&uidhash_lock, flags);
334
335 put_user_ns(up->user_ns);
336 INIT_WORK(&up->work, remove_user_sysfs_dir);
337 schedule_work(&up->work);
338 }
339
340 #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
341
342 int uids_sysfs_init(void) { return 0; }
343 static inline int uids_user_create(struct user_struct *up) { return 0; }
344 static inline void uids_mutex_lock(void) { }
345 static inline void uids_mutex_unlock(void) { }
346
347 /* IRQs are disabled and uidhash_lock is held upon function entry.
348 * IRQ state (as stored in flags) is restored and uidhash_lock released
349 * upon function exit.
350 */
351 static void free_user(struct user_struct *up, unsigned long flags)
352 {
353 uid_hash_remove(up);
354 spin_unlock_irqrestore(&uidhash_lock, flags);
355 sched_destroy_user(up);
356 key_put(up->uid_keyring);
357 key_put(up->session_keyring);
358 put_user_ns(up->user_ns);
359 kmem_cache_free(uid_cachep, up);
360 }
361
362 #endif
363
364 /*
365 * Locate the user_struct for the passed UID. If found, take a ref on it. The
366 * caller must undo that ref with free_uid().
367 *
368 * If the user_struct could not be found, return NULL.
369 */
370 struct user_struct *find_user(uid_t uid)
371 {
372 struct user_struct *ret;
373 unsigned long flags;
374 struct user_namespace *ns = current_user_ns();
375
376 spin_lock_irqsave(&uidhash_lock, flags);
377 ret = uid_hash_find(uid, uidhashentry(ns, uid));
378 spin_unlock_irqrestore(&uidhash_lock, flags);
379 return ret;
380 }
381
382 void free_uid(struct user_struct *up)
383 {
384 unsigned long flags;
385
386 if (!up)
387 return;
388
389 local_irq_save(flags);
390 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
391 free_user(up, flags);
392 else
393 local_irq_restore(flags);
394 }
395
396 struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
397 {
398 struct hlist_head *hashent = uidhashentry(ns, uid);
399 struct user_struct *up, *new;
400
401 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
402 * atomic.
403 */
404 uids_mutex_lock();
405
406 spin_lock_irq(&uidhash_lock);
407 up = uid_hash_find(uid, hashent);
408 spin_unlock_irq(&uidhash_lock);
409
410 if (!up) {
411 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
412 if (!new)
413 goto out_unlock;
414
415 new->uid = uid;
416 atomic_set(&new->__count, 1);
417
418 if (sched_create_user(new) < 0)
419 goto out_free_user;
420
421 new->user_ns = get_user_ns(ns);
422
423 if (uids_user_create(new))
424 goto out_destoy_sched;
425
426 /*
427 * Before adding this, check whether we raced
428 * on adding the same user already..
429 */
430 spin_lock_irq(&uidhash_lock);
431 up = uid_hash_find(uid, hashent);
432 if (up) {
433 /* This case is not possible when CONFIG_USER_SCHED
434 * is defined, since we serialize alloc_uid() using
435 * uids_mutex. Hence no need to call
436 * sched_destroy_user() or remove_user_sysfs_dir().
437 */
438 key_put(new->uid_keyring);
439 key_put(new->session_keyring);
440 kmem_cache_free(uid_cachep, new);
441 } else {
442 uid_hash_insert(new, hashent);
443 up = new;
444 }
445 spin_unlock_irq(&uidhash_lock);
446 }
447
448 uids_mutex_unlock();
449
450 return up;
451
452 out_destoy_sched:
453 sched_destroy_user(new);
454 put_user_ns(new->user_ns);
455 out_free_user:
456 kmem_cache_free(uid_cachep, new);
457 out_unlock:
458 uids_mutex_unlock();
459 return NULL;
460 }
461
462 static int __init uid_cache_init(void)
463 {
464 int n;
465
466 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
467 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
468
469 for(n = 0; n < UIDHASH_SZ; ++n)
470 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
471
472 /* Insert the root user immediately (init already runs as root) */
473 spin_lock_irq(&uidhash_lock);
474 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
475 spin_unlock_irq(&uidhash_lock);
476
477 return 0;
478 }
479
480 module_init(uid_cache_init);