4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
19 #include "cred-internals.h"
21 struct user_namespace init_user_ns
= {
23 .refcount
= ATOMIC_INIT(1),
25 .creator
= &root_user
,
27 EXPORT_SYMBOL_GPL(init_user_ns
);
30 * UID task count cache, to get fast user lookup in "alloc_uid"
31 * when changing user ID's (ie setuid() and friends).
34 #define UIDHASH_MASK (UIDHASH_SZ - 1)
35 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
36 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
38 static struct kmem_cache
*uid_cachep
;
41 * The uidhash_lock is mostly taken from process context, but it is
42 * occasionally also taken from softirq/tasklet context, when
43 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
44 * But free_uid() is also called with local interrupts disabled, and running
45 * local_bh_enable() with local interrupts disabled is an error - we'll run
46 * softirq callbacks, and they can unconditionally enable interrupts, and
47 * the caller of free_uid() didn't expect that..
49 static DEFINE_SPINLOCK(uidhash_lock
);
51 /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
52 struct user_struct root_user
= {
53 .__count
= ATOMIC_INIT(2),
54 .processes
= ATOMIC_INIT(1),
55 .files
= ATOMIC_INIT(0),
56 .sigpending
= ATOMIC_INIT(0),
58 .user_ns
= &init_user_ns
,
59 #ifdef CONFIG_USER_SCHED
60 .tg
= &init_task_group
,
65 * These routines must be called with the uidhash spinlock held!
67 static void uid_hash_insert(struct user_struct
*up
, struct hlist_head
*hashent
)
69 hlist_add_head(&up
->uidhash_node
, hashent
);
72 static void uid_hash_remove(struct user_struct
*up
)
74 hlist_del_init(&up
->uidhash_node
);
77 static struct user_struct
*uid_hash_find(uid_t uid
, struct hlist_head
*hashent
)
79 struct user_struct
*user
;
82 hlist_for_each_entry(user
, h
, hashent
, uidhash_node
) {
83 if (user
->uid
== uid
) {
84 atomic_inc(&user
->__count
);
92 #ifdef CONFIG_USER_SCHED
94 static void sched_destroy_user(struct user_struct
*up
)
96 sched_destroy_group(up
->tg
);
99 static int sched_create_user(struct user_struct
*up
)
103 up
->tg
= sched_create_group(&root_task_group
);
110 #else /* CONFIG_USER_SCHED */
112 static void sched_destroy_user(struct user_struct
*up
) { }
113 static int sched_create_user(struct user_struct
*up
) { return 0; }
115 #endif /* CONFIG_USER_SCHED */
117 #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
119 static struct kset
*uids_kset
; /* represents the /sys/kernel/uids/ directory */
120 static DEFINE_MUTEX(uids_mutex
);
122 static inline void uids_mutex_lock(void)
124 mutex_lock(&uids_mutex
);
127 static inline void uids_mutex_unlock(void)
129 mutex_unlock(&uids_mutex
);
132 /* uid directory attributes */
133 #ifdef CONFIG_FAIR_GROUP_SCHED
134 static ssize_t
cpu_shares_show(struct kobject
*kobj
,
135 struct kobj_attribute
*attr
,
138 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
140 return sprintf(buf
, "%lu\n", sched_group_shares(up
->tg
));
143 static ssize_t
cpu_shares_store(struct kobject
*kobj
,
144 struct kobj_attribute
*attr
,
145 const char *buf
, size_t size
)
147 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
148 unsigned long shares
;
151 sscanf(buf
, "%lu", &shares
);
153 rc
= sched_group_set_shares(up
->tg
, shares
);
155 return (rc
? rc
: size
);
158 static struct kobj_attribute cpu_share_attr
=
159 __ATTR(cpu_share
, 0644, cpu_shares_show
, cpu_shares_store
);
162 #ifdef CONFIG_RT_GROUP_SCHED
163 static ssize_t
cpu_rt_runtime_show(struct kobject
*kobj
,
164 struct kobj_attribute
*attr
,
167 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
169 return sprintf(buf
, "%ld\n", sched_group_rt_runtime(up
->tg
));
172 static ssize_t
cpu_rt_runtime_store(struct kobject
*kobj
,
173 struct kobj_attribute
*attr
,
174 const char *buf
, size_t size
)
176 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
177 unsigned long rt_runtime
;
180 sscanf(buf
, "%ld", &rt_runtime
);
182 rc
= sched_group_set_rt_runtime(up
->tg
, rt_runtime
);
184 return (rc
? rc
: size
);
187 static struct kobj_attribute cpu_rt_runtime_attr
=
188 __ATTR(cpu_rt_runtime
, 0644, cpu_rt_runtime_show
, cpu_rt_runtime_store
);
190 static ssize_t
cpu_rt_period_show(struct kobject
*kobj
,
191 struct kobj_attribute
*attr
,
194 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
196 return sprintf(buf
, "%lu\n", sched_group_rt_period(up
->tg
));
199 static ssize_t
cpu_rt_period_store(struct kobject
*kobj
,
200 struct kobj_attribute
*attr
,
201 const char *buf
, size_t size
)
203 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
204 unsigned long rt_period
;
207 sscanf(buf
, "%lu", &rt_period
);
209 rc
= sched_group_set_rt_period(up
->tg
, rt_period
);
211 return (rc
? rc
: size
);
214 static struct kobj_attribute cpu_rt_period_attr
=
215 __ATTR(cpu_rt_period
, 0644, cpu_rt_period_show
, cpu_rt_period_store
);
218 /* default attributes per uid directory */
219 static struct attribute
*uids_attributes
[] = {
220 #ifdef CONFIG_FAIR_GROUP_SCHED
221 &cpu_share_attr
.attr
,
223 #ifdef CONFIG_RT_GROUP_SCHED
224 &cpu_rt_runtime_attr
.attr
,
225 &cpu_rt_period_attr
.attr
,
230 /* the lifetime of user_struct is not managed by the core (now) */
231 static void uids_release(struct kobject
*kobj
)
236 static struct kobj_type uids_ktype
= {
237 .sysfs_ops
= &kobj_sysfs_ops
,
238 .default_attrs
= uids_attributes
,
239 .release
= uids_release
,
243 * Create /sys/kernel/uids/<uid>/cpu_share file for this user
244 * We do not create this file for users in a user namespace (until
245 * sysfs tagging is implemented).
247 * See Documentation/scheduler/sched-design-CFS.txt for ramifications.
249 static int uids_user_create(struct user_struct
*up
)
251 struct kobject
*kobj
= &up
->kobj
;
254 memset(kobj
, 0, sizeof(struct kobject
));
255 if (up
->user_ns
!= &init_user_ns
)
257 kobj
->kset
= uids_kset
;
258 error
= kobject_init_and_add(kobj
, &uids_ktype
, NULL
, "%d", up
->uid
);
264 kobject_uevent(kobj
, KOBJ_ADD
);
269 /* create these entries in sysfs:
270 * "/sys/kernel/uids" directory
271 * "/sys/kernel/uids/0" directory (for root user)
272 * "/sys/kernel/uids/0/cpu_share" file (for root user)
274 int __init
uids_sysfs_init(void)
276 uids_kset
= kset_create_and_add("uids", NULL
, kernel_kobj
);
280 return uids_user_create(&root_user
);
283 /* work function to remove sysfs directory for a user and free up
284 * corresponding structures.
286 static void remove_user_sysfs_dir(struct work_struct
*w
)
288 struct user_struct
*up
= container_of(w
, struct user_struct
, work
);
292 if (up
->user_ns
!= &init_user_ns
)
294 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
299 local_irq_save(flags
);
301 if (atomic_dec_and_lock(&up
->__count
, &uidhash_lock
)) {
304 spin_unlock_irqrestore(&uidhash_lock
, flags
);
306 local_irq_restore(flags
);
312 kobject_uevent(&up
->kobj
, KOBJ_REMOVE
);
313 kobject_del(&up
->kobj
);
314 kobject_put(&up
->kobj
);
316 sched_destroy_user(up
);
317 key_put(up
->uid_keyring
);
318 key_put(up
->session_keyring
);
319 kmem_cache_free(uid_cachep
, up
);
325 /* IRQs are disabled and uidhash_lock is held upon function entry.
326 * IRQ state (as stored in flags) is restored and uidhash_lock released
327 * upon function exit.
329 static void free_user(struct user_struct
*up
, unsigned long flags
)
331 /* restore back the count */
332 atomic_inc(&up
->__count
);
333 spin_unlock_irqrestore(&uidhash_lock
, flags
);
335 put_user_ns(up
->user_ns
);
336 INIT_WORK(&up
->work
, remove_user_sysfs_dir
);
337 schedule_work(&up
->work
);
340 #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
342 int uids_sysfs_init(void) { return 0; }
343 static inline int uids_user_create(struct user_struct
*up
) { return 0; }
344 static inline void uids_mutex_lock(void) { }
345 static inline void uids_mutex_unlock(void) { }
347 /* IRQs are disabled and uidhash_lock is held upon function entry.
348 * IRQ state (as stored in flags) is restored and uidhash_lock released
349 * upon function exit.
351 static void free_user(struct user_struct
*up
, unsigned long flags
)
354 spin_unlock_irqrestore(&uidhash_lock
, flags
);
355 sched_destroy_user(up
);
356 key_put(up
->uid_keyring
);
357 key_put(up
->session_keyring
);
358 put_user_ns(up
->user_ns
);
359 kmem_cache_free(uid_cachep
, up
);
365 * Locate the user_struct for the passed UID. If found, take a ref on it. The
366 * caller must undo that ref with free_uid().
368 * If the user_struct could not be found, return NULL.
370 struct user_struct
*find_user(uid_t uid
)
372 struct user_struct
*ret
;
374 struct user_namespace
*ns
= current_user_ns();
376 spin_lock_irqsave(&uidhash_lock
, flags
);
377 ret
= uid_hash_find(uid
, uidhashentry(ns
, uid
));
378 spin_unlock_irqrestore(&uidhash_lock
, flags
);
382 void free_uid(struct user_struct
*up
)
389 local_irq_save(flags
);
390 if (atomic_dec_and_lock(&up
->__count
, &uidhash_lock
))
391 free_user(up
, flags
);
393 local_irq_restore(flags
);
396 struct user_struct
*alloc_uid(struct user_namespace
*ns
, uid_t uid
)
398 struct hlist_head
*hashent
= uidhashentry(ns
, uid
);
399 struct user_struct
*up
, *new;
401 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
406 spin_lock_irq(&uidhash_lock
);
407 up
= uid_hash_find(uid
, hashent
);
408 spin_unlock_irq(&uidhash_lock
);
411 new = kmem_cache_zalloc(uid_cachep
, GFP_KERNEL
);
416 atomic_set(&new->__count
, 1);
418 if (sched_create_user(new) < 0)
421 new->user_ns
= get_user_ns(ns
);
423 if (uids_user_create(new))
424 goto out_destoy_sched
;
427 * Before adding this, check whether we raced
428 * on adding the same user already..
430 spin_lock_irq(&uidhash_lock
);
431 up
= uid_hash_find(uid
, hashent
);
433 /* This case is not possible when CONFIG_USER_SCHED
434 * is defined, since we serialize alloc_uid() using
435 * uids_mutex. Hence no need to call
436 * sched_destroy_user() or remove_user_sysfs_dir().
438 key_put(new->uid_keyring
);
439 key_put(new->session_keyring
);
440 kmem_cache_free(uid_cachep
, new);
442 uid_hash_insert(new, hashent
);
445 spin_unlock_irq(&uidhash_lock
);
453 sched_destroy_user(new);
454 put_user_ns(new->user_ns
);
456 kmem_cache_free(uid_cachep
, new);
462 static int __init
uid_cache_init(void)
466 uid_cachep
= kmem_cache_create("uid_cache", sizeof(struct user_struct
),
467 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
469 for(n
= 0; n
< UIDHASH_SZ
; ++n
)
470 INIT_HLIST_HEAD(init_user_ns
.uidhash_table
+ n
);
472 /* Insert the root user immediately (init already runs as root) */
473 spin_lock_irq(&uidhash_lock
);
474 uid_hash_insert(&root_user
, uidhashentry(&init_user_ns
, 0));
475 spin_unlock_irq(&uidhash_lock
);
480 module_init(uid_cache_init
);