]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/user.c
eeepc-laptop: disable cpu speed control on EeePC 701
[mirror_ubuntu-bionic-kernel.git] / kernel / user.c
1 /*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
19 #include "cred-internals.h"
20
21 struct user_namespace init_user_ns = {
22 .kref = {
23 .refcount = ATOMIC_INIT(2),
24 },
25 .creator = &root_user,
26 };
27 EXPORT_SYMBOL_GPL(init_user_ns);
28
29 /*
30 * UID task count cache, to get fast user lookup in "alloc_uid"
31 * when changing user ID's (ie setuid() and friends).
32 */
33
34 #define UIDHASH_MASK (UIDHASH_SZ - 1)
35 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
36 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
37
38 static struct kmem_cache *uid_cachep;
39
40 /*
41 * The uidhash_lock is mostly taken from process context, but it is
42 * occasionally also taken from softirq/tasklet context, when
43 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
44 * But free_uid() is also called with local interrupts disabled, and running
45 * local_bh_enable() with local interrupts disabled is an error - we'll run
46 * softirq callbacks, and they can unconditionally enable interrupts, and
47 * the caller of free_uid() didn't expect that..
48 */
49 static DEFINE_SPINLOCK(uidhash_lock);
50
51 /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
52 struct user_struct root_user = {
53 .__count = ATOMIC_INIT(2),
54 .processes = ATOMIC_INIT(1),
55 .files = ATOMIC_INIT(0),
56 .sigpending = ATOMIC_INIT(0),
57 .locked_shm = 0,
58 .user_ns = &init_user_ns,
59 #ifdef CONFIG_USER_SCHED
60 .tg = &init_task_group,
61 #endif
62 };
63
64 /*
65 * These routines must be called with the uidhash spinlock held!
66 */
67 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
68 {
69 hlist_add_head(&up->uidhash_node, hashent);
70 }
71
72 static void uid_hash_remove(struct user_struct *up)
73 {
74 hlist_del_init(&up->uidhash_node);
75 put_user_ns(up->user_ns);
76 }
77
78 #ifdef CONFIG_USER_SCHED
79
80 static void sched_destroy_user(struct user_struct *up)
81 {
82 sched_destroy_group(up->tg);
83 }
84
85 static int sched_create_user(struct user_struct *up)
86 {
87 int rc = 0;
88
89 up->tg = sched_create_group(&root_task_group);
90 if (IS_ERR(up->tg))
91 rc = -ENOMEM;
92
93 set_tg_uid(up);
94
95 return rc;
96 }
97
98 #else /* CONFIG_USER_SCHED */
99
100 static void sched_destroy_user(struct user_struct *up) { }
101 static int sched_create_user(struct user_struct *up) { return 0; }
102
103 #endif /* CONFIG_USER_SCHED */
104
105 #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
106
107 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
108 {
109 struct user_struct *user;
110 struct hlist_node *h;
111
112 hlist_for_each_entry(user, h, hashent, uidhash_node) {
113 if (user->uid == uid) {
114 /* possibly resurrect an "almost deleted" object */
115 if (atomic_inc_return(&user->__count) == 1)
116 cancel_delayed_work(&user->work);
117 return user;
118 }
119 }
120
121 return NULL;
122 }
123
124 static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
125 static DEFINE_MUTEX(uids_mutex);
126
127 static inline void uids_mutex_lock(void)
128 {
129 mutex_lock(&uids_mutex);
130 }
131
132 static inline void uids_mutex_unlock(void)
133 {
134 mutex_unlock(&uids_mutex);
135 }
136
137 /* uid directory attributes */
138 #ifdef CONFIG_FAIR_GROUP_SCHED
139 static ssize_t cpu_shares_show(struct kobject *kobj,
140 struct kobj_attribute *attr,
141 char *buf)
142 {
143 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
144
145 return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
146 }
147
148 static ssize_t cpu_shares_store(struct kobject *kobj,
149 struct kobj_attribute *attr,
150 const char *buf, size_t size)
151 {
152 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
153 unsigned long shares;
154 int rc;
155
156 sscanf(buf, "%lu", &shares);
157
158 rc = sched_group_set_shares(up->tg, shares);
159
160 return (rc ? rc : size);
161 }
162
163 static struct kobj_attribute cpu_share_attr =
164 __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
165 #endif
166
167 #ifdef CONFIG_RT_GROUP_SCHED
168 static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
169 struct kobj_attribute *attr,
170 char *buf)
171 {
172 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
173
174 return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
175 }
176
177 static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
178 struct kobj_attribute *attr,
179 const char *buf, size_t size)
180 {
181 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
182 unsigned long rt_runtime;
183 int rc;
184
185 sscanf(buf, "%ld", &rt_runtime);
186
187 rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
188
189 return (rc ? rc : size);
190 }
191
192 static struct kobj_attribute cpu_rt_runtime_attr =
193 __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
194
195 static ssize_t cpu_rt_period_show(struct kobject *kobj,
196 struct kobj_attribute *attr,
197 char *buf)
198 {
199 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
200
201 return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
202 }
203
204 static ssize_t cpu_rt_period_store(struct kobject *kobj,
205 struct kobj_attribute *attr,
206 const char *buf, size_t size)
207 {
208 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
209 unsigned long rt_period;
210 int rc;
211
212 sscanf(buf, "%lu", &rt_period);
213
214 rc = sched_group_set_rt_period(up->tg, rt_period);
215
216 return (rc ? rc : size);
217 }
218
219 static struct kobj_attribute cpu_rt_period_attr =
220 __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
221 #endif
222
223 /* default attributes per uid directory */
224 static struct attribute *uids_attributes[] = {
225 #ifdef CONFIG_FAIR_GROUP_SCHED
226 &cpu_share_attr.attr,
227 #endif
228 #ifdef CONFIG_RT_GROUP_SCHED
229 &cpu_rt_runtime_attr.attr,
230 &cpu_rt_period_attr.attr,
231 #endif
232 NULL
233 };
234
235 /* the lifetime of user_struct is not managed by the core (now) */
236 static void uids_release(struct kobject *kobj)
237 {
238 return;
239 }
240
241 static struct kobj_type uids_ktype = {
242 .sysfs_ops = &kobj_sysfs_ops,
243 .default_attrs = uids_attributes,
244 .release = uids_release,
245 };
246
247 /*
248 * Create /sys/kernel/uids/<uid>/cpu_share file for this user
249 * We do not create this file for users in a user namespace (until
250 * sysfs tagging is implemented).
251 *
252 * See Documentation/scheduler/sched-design-CFS.txt for ramifications.
253 */
254 static int uids_user_create(struct user_struct *up)
255 {
256 struct kobject *kobj = &up->kobj;
257 int error;
258
259 memset(kobj, 0, sizeof(struct kobject));
260 if (up->user_ns != &init_user_ns)
261 return 0;
262 kobj->kset = uids_kset;
263 error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
264 if (error) {
265 kobject_put(kobj);
266 goto done;
267 }
268
269 kobject_uevent(kobj, KOBJ_ADD);
270 done:
271 return error;
272 }
273
274 /* create these entries in sysfs:
275 * "/sys/kernel/uids" directory
276 * "/sys/kernel/uids/0" directory (for root user)
277 * "/sys/kernel/uids/0/cpu_share" file (for root user)
278 */
279 int __init uids_sysfs_init(void)
280 {
281 uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
282 if (!uids_kset)
283 return -ENOMEM;
284
285 return uids_user_create(&root_user);
286 }
287
288 /* delayed work function to remove sysfs directory for a user and free up
289 * corresponding structures.
290 */
291 static void cleanup_user_struct(struct work_struct *w)
292 {
293 struct user_struct *up = container_of(w, struct user_struct, work.work);
294 unsigned long flags;
295 int remove_user = 0;
296
297 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
298 * atomic.
299 */
300 uids_mutex_lock();
301
302 spin_lock_irqsave(&uidhash_lock, flags);
303 if (atomic_read(&up->__count) == 0) {
304 uid_hash_remove(up);
305 remove_user = 1;
306 }
307 spin_unlock_irqrestore(&uidhash_lock, flags);
308
309 if (!remove_user)
310 goto done;
311
312 if (up->user_ns == &init_user_ns) {
313 kobject_uevent(&up->kobj, KOBJ_REMOVE);
314 kobject_del(&up->kobj);
315 kobject_put(&up->kobj);
316 }
317
318 sched_destroy_user(up);
319 key_put(up->uid_keyring);
320 key_put(up->session_keyring);
321 kmem_cache_free(uid_cachep, up);
322
323 done:
324 uids_mutex_unlock();
325 }
326
327 /* IRQs are disabled and uidhash_lock is held upon function entry.
328 * IRQ state (as stored in flags) is restored and uidhash_lock released
329 * upon function exit.
330 */
331 static void free_user(struct user_struct *up, unsigned long flags)
332 {
333 INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
334 schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
335 spin_unlock_irqrestore(&uidhash_lock, flags);
336 }
337
338 #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
339
340 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
341 {
342 struct user_struct *user;
343 struct hlist_node *h;
344
345 hlist_for_each_entry(user, h, hashent, uidhash_node) {
346 if (user->uid == uid) {
347 atomic_inc(&user->__count);
348 return user;
349 }
350 }
351
352 return NULL;
353 }
354
355 int uids_sysfs_init(void) { return 0; }
356 static inline int uids_user_create(struct user_struct *up) { return 0; }
357 static inline void uids_mutex_lock(void) { }
358 static inline void uids_mutex_unlock(void) { }
359
360 /* IRQs are disabled and uidhash_lock is held upon function entry.
361 * IRQ state (as stored in flags) is restored and uidhash_lock released
362 * upon function exit.
363 */
364 static void free_user(struct user_struct *up, unsigned long flags)
365 {
366 uid_hash_remove(up);
367 spin_unlock_irqrestore(&uidhash_lock, flags);
368 sched_destroy_user(up);
369 key_put(up->uid_keyring);
370 key_put(up->session_keyring);
371 kmem_cache_free(uid_cachep, up);
372 }
373
374 #endif
375
376 #if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
377 /*
378 * We need to check if a setuid can take place. This function should be called
379 * before successfully completing the setuid.
380 */
381 int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
382 {
383
384 return sched_rt_can_attach(up->tg, tsk);
385
386 }
387 #else
388 int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
389 {
390 return 1;
391 }
392 #endif
393
394 /*
395 * Locate the user_struct for the passed UID. If found, take a ref on it. The
396 * caller must undo that ref with free_uid().
397 *
398 * If the user_struct could not be found, return NULL.
399 */
400 struct user_struct *find_user(uid_t uid)
401 {
402 struct user_struct *ret;
403 unsigned long flags;
404 struct user_namespace *ns = current_user_ns();
405
406 spin_lock_irqsave(&uidhash_lock, flags);
407 ret = uid_hash_find(uid, uidhashentry(ns, uid));
408 spin_unlock_irqrestore(&uidhash_lock, flags);
409 return ret;
410 }
411
412 void free_uid(struct user_struct *up)
413 {
414 unsigned long flags;
415
416 if (!up)
417 return;
418
419 local_irq_save(flags);
420 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
421 free_user(up, flags);
422 else
423 local_irq_restore(flags);
424 }
425
426 struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
427 {
428 struct hlist_head *hashent = uidhashentry(ns, uid);
429 struct user_struct *up, *new;
430
431 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
432 * atomic.
433 */
434 uids_mutex_lock();
435
436 spin_lock_irq(&uidhash_lock);
437 up = uid_hash_find(uid, hashent);
438 spin_unlock_irq(&uidhash_lock);
439
440 if (!up) {
441 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
442 if (!new)
443 goto out_unlock;
444
445 new->uid = uid;
446 atomic_set(&new->__count, 1);
447
448 if (sched_create_user(new) < 0)
449 goto out_free_user;
450
451 new->user_ns = get_user_ns(ns);
452
453 if (uids_user_create(new))
454 goto out_destoy_sched;
455
456 /*
457 * Before adding this, check whether we raced
458 * on adding the same user already..
459 */
460 spin_lock_irq(&uidhash_lock);
461 up = uid_hash_find(uid, hashent);
462 if (up) {
463 /* This case is not possible when CONFIG_USER_SCHED
464 * is defined, since we serialize alloc_uid() using
465 * uids_mutex. Hence no need to call
466 * sched_destroy_user() or remove_user_sysfs_dir().
467 */
468 key_put(new->uid_keyring);
469 key_put(new->session_keyring);
470 kmem_cache_free(uid_cachep, new);
471 } else {
472 uid_hash_insert(new, hashent);
473 up = new;
474 }
475 spin_unlock_irq(&uidhash_lock);
476 }
477
478 uids_mutex_unlock();
479
480 return up;
481
482 out_destoy_sched:
483 sched_destroy_user(new);
484 put_user_ns(new->user_ns);
485 out_free_user:
486 kmem_cache_free(uid_cachep, new);
487 out_unlock:
488 uids_mutex_unlock();
489 return NULL;
490 }
491
492 static int __init uid_cache_init(void)
493 {
494 int n;
495
496 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
497 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
498
499 for(n = 0; n < UIDHASH_SZ; ++n)
500 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
501
502 /* Insert the root user immediately (init already runs as root) */
503 spin_lock_irq(&uidhash_lock);
504 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
505 spin_unlock_irq(&uidhash_lock);
506
507 return 0;
508 }
509
510 module_init(uid_cache_init);