]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * The "user cache". | |
3 | * | |
4 | * (C) Copyright 1991-2000 Linus Torvalds | |
5 | * | |
6 | * We have a per-user structure to keep track of how many | |
7 | * processes, files etc the user has claimed, in order to be | |
8 | * able to have per-user limits for system resources. | |
9 | */ | |
10 | ||
11 | #include <linux/init.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/bitops.h> | |
15 | #include <linux/key.h> | |
4021cb27 | 16 | #include <linux/interrupt.h> |
acce292c CLG |
17 | #include <linux/module.h> |
18 | #include <linux/user_namespace.h> | |
1da177e4 | 19 | |
aee16ce7 PE |
20 | struct user_namespace init_user_ns = { |
21 | .kref = { | |
22 | .refcount = ATOMIC_INIT(2), | |
23 | }, | |
24 | .root_user = &root_user, | |
25 | }; | |
26 | EXPORT_SYMBOL_GPL(init_user_ns); | |
27 | ||
1da177e4 LT |
28 | /* |
29 | * UID task count cache, to get fast user lookup in "alloc_uid" | |
30 | * when changing user ID's (ie setuid() and friends). | |
31 | */ | |
32 | ||
1da177e4 LT |
33 | #define UIDHASH_MASK (UIDHASH_SZ - 1) |
34 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | |
acce292c | 35 | #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) |
1da177e4 | 36 | |
e18b890b | 37 | static struct kmem_cache *uid_cachep; |
4021cb27 IM |
38 | |
39 | /* | |
40 | * The uidhash_lock is mostly taken from process context, but it is | |
41 | * occasionally also taken from softirq/tasklet context, when | |
42 | * task-structs get RCU-freed. Hence all locking must be softirq-safe. | |
3fa97c9d AM |
43 | * But free_uid() is also called with local interrupts disabled, and running |
44 | * local_bh_enable() with local interrupts disabled is an error - we'll run | |
45 | * softirq callbacks, and they can unconditionally enable interrupts, and | |
46 | * the caller of free_uid() didn't expect that.. | |
4021cb27 | 47 | */ |
1da177e4 LT |
48 | static DEFINE_SPINLOCK(uidhash_lock); |
49 | ||
50 | struct user_struct root_user = { | |
51 | .__count = ATOMIC_INIT(1), | |
52 | .processes = ATOMIC_INIT(1), | |
53 | .files = ATOMIC_INIT(0), | |
54 | .sigpending = ATOMIC_INIT(0), | |
1da177e4 LT |
55 | .locked_shm = 0, |
56 | #ifdef CONFIG_KEYS | |
57 | .uid_keyring = &root_user_keyring, | |
58 | .session_keyring = &root_session_keyring, | |
59 | #endif | |
24e377a8 | 60 | #ifdef CONFIG_FAIR_USER_SCHED |
4cf86d77 | 61 | .tg = &init_task_group, |
24e377a8 | 62 | #endif |
1da177e4 LT |
63 | }; |
64 | ||
5cb350ba DG |
65 | /* |
66 | * These routines must be called with the uidhash spinlock held! | |
67 | */ | |
40aeb400 | 68 | static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) |
5cb350ba DG |
69 | { |
70 | hlist_add_head(&up->uidhash_node, hashent); | |
71 | } | |
72 | ||
40aeb400 | 73 | static void uid_hash_remove(struct user_struct *up) |
5cb350ba DG |
74 | { |
75 | hlist_del_init(&up->uidhash_node); | |
76 | } | |
77 | ||
40aeb400 | 78 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) |
5cb350ba DG |
79 | { |
80 | struct user_struct *user; | |
81 | struct hlist_node *h; | |
82 | ||
83 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | |
84 | if (user->uid == uid) { | |
85 | atomic_inc(&user->__count); | |
86 | return user; | |
87 | } | |
88 | } | |
89 | ||
90 | return NULL; | |
91 | } | |
92 | ||
24e377a8 | 93 | #ifdef CONFIG_FAIR_USER_SCHED |
5cb350ba | 94 | |
24e377a8 SV |
95 | static void sched_destroy_user(struct user_struct *up) |
96 | { | |
97 | sched_destroy_group(up->tg); | |
98 | } | |
99 | ||
100 | static int sched_create_user(struct user_struct *up) | |
101 | { | |
102 | int rc = 0; | |
103 | ||
104 | up->tg = sched_create_group(); | |
105 | if (IS_ERR(up->tg)) | |
106 | rc = -ENOMEM; | |
107 | ||
108 | return rc; | |
109 | } | |
110 | ||
111 | static void sched_switch_user(struct task_struct *p) | |
112 | { | |
113 | sched_move_task(p); | |
114 | } | |
115 | ||
b1a8c172 DG |
116 | #else /* CONFIG_FAIR_USER_SCHED */ |
117 | ||
118 | static void sched_destroy_user(struct user_struct *up) { } | |
119 | static int sched_create_user(struct user_struct *up) { return 0; } | |
120 | static void sched_switch_user(struct task_struct *p) { } | |
121 | ||
122 | #endif /* CONFIG_FAIR_USER_SCHED */ | |
123 | ||
124 | #if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS) | |
125 | ||
eb41d946 | 126 | static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ |
b1a8c172 DG |
127 | static DEFINE_MUTEX(uids_mutex); |
128 | ||
5cb350ba DG |
129 | static inline void uids_mutex_lock(void) |
130 | { | |
131 | mutex_lock(&uids_mutex); | |
132 | } | |
24e377a8 | 133 | |
5cb350ba DG |
134 | static inline void uids_mutex_unlock(void) |
135 | { | |
136 | mutex_unlock(&uids_mutex); | |
137 | } | |
24e377a8 | 138 | |
eb41d946 KS |
139 | /* uid directory attributes */ |
140 | static ssize_t cpu_shares_show(struct kobject *kobj, | |
141 | struct kobj_attribute *attr, | |
142 | char *buf) | |
5cb350ba | 143 | { |
eb41d946 | 144 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
24e377a8 | 145 | |
eb41d946 | 146 | return sprintf(buf, "%lu\n", sched_group_shares(up->tg)); |
5cb350ba DG |
147 | } |
148 | ||
eb41d946 KS |
149 | static ssize_t cpu_shares_store(struct kobject *kobj, |
150 | struct kobj_attribute *attr, | |
151 | const char *buf, size_t size) | |
5cb350ba | 152 | { |
eb41d946 | 153 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
5cb350ba DG |
154 | unsigned long shares; |
155 | int rc; | |
156 | ||
eb41d946 | 157 | sscanf(buf, "%lu", &shares); |
5cb350ba DG |
158 | |
159 | rc = sched_group_set_shares(up->tg, shares); | |
160 | ||
161 | return (rc ? rc : size); | |
162 | } | |
163 | ||
eb41d946 KS |
164 | static struct kobj_attribute cpu_share_attr = |
165 | __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store); | |
166 | ||
167 | /* default attributes per uid directory */ | |
168 | static struct attribute *uids_attributes[] = { | |
169 | &cpu_share_attr.attr, | |
170 | NULL | |
171 | }; | |
172 | ||
173 | /* the lifetime of user_struct is not managed by the core (now) */ | |
174 | static void uids_release(struct kobject *kobj) | |
5cb350ba | 175 | { |
eb41d946 | 176 | return; |
5cb350ba DG |
177 | } |
178 | ||
eb41d946 KS |
179 | static struct kobj_type uids_ktype = { |
180 | .sysfs_ops = &kobj_sysfs_ops, | |
181 | .default_attrs = uids_attributes, | |
182 | .release = uids_release, | |
183 | }; | |
184 | ||
185 | /* create /sys/kernel/uids/<uid>/cpu_share file for this user */ | |
186 | static int uids_user_create(struct user_struct *up) | |
1da177e4 | 187 | { |
eb41d946 | 188 | struct kobject *kobj = &up->kobj; |
5cb350ba DG |
189 | int error; |
190 | ||
eb41d946 | 191 | memset(kobj, 0, sizeof(struct kobject)); |
eb41d946 | 192 | kobj->kset = uids_kset; |
cf15126b GKH |
193 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); |
194 | if (error) { | |
195 | kobject_put(kobj); | |
5cb350ba | 196 | goto done; |
cf15126b | 197 | } |
5cb350ba | 198 | |
fb7dde37 | 199 | kobject_uevent(kobj, KOBJ_ADD); |
5cb350ba DG |
200 | done: |
201 | return error; | |
1da177e4 LT |
202 | } |
203 | ||
eb41d946 | 204 | /* create these entries in sysfs: |
5cb350ba DG |
205 | * "/sys/kernel/uids" directory |
206 | * "/sys/kernel/uids/0" directory (for root user) | |
207 | * "/sys/kernel/uids/0/cpu_share" file (for root user) | |
208 | */ | |
eb41d946 | 209 | int __init uids_sysfs_init(void) |
1da177e4 | 210 | { |
0ff21e46 | 211 | uids_kset = kset_create_and_add("uids", NULL, kernel_kobj); |
eb41d946 KS |
212 | if (!uids_kset) |
213 | return -ENOMEM; | |
5cb350ba | 214 | |
eb41d946 | 215 | return uids_user_create(&root_user); |
1da177e4 LT |
216 | } |
217 | ||
5cb350ba DG |
218 | /* work function to remove sysfs directory for a user and free up |
219 | * corresponding structures. | |
220 | */ | |
221 | static void remove_user_sysfs_dir(struct work_struct *w) | |
1da177e4 | 222 | { |
5cb350ba | 223 | struct user_struct *up = container_of(w, struct user_struct, work); |
5cb350ba DG |
224 | unsigned long flags; |
225 | int remove_user = 0; | |
1da177e4 | 226 | |
5cb350ba DG |
227 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() |
228 | * atomic. | |
229 | */ | |
230 | uids_mutex_lock(); | |
231 | ||
232 | local_irq_save(flags); | |
233 | ||
234 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { | |
235 | uid_hash_remove(up); | |
236 | remove_user = 1; | |
237 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
238 | } else { | |
239 | local_irq_restore(flags); | |
1da177e4 LT |
240 | } |
241 | ||
5cb350ba DG |
242 | if (!remove_user) |
243 | goto done; | |
244 | ||
eb41d946 KS |
245 | kobject_uevent(&up->kobj, KOBJ_REMOVE); |
246 | kobject_del(&up->kobj); | |
247 | kobject_put(&up->kobj); | |
5cb350ba DG |
248 | |
249 | sched_destroy_user(up); | |
250 | key_put(up->uid_keyring); | |
251 | key_put(up->session_keyring); | |
252 | kmem_cache_free(uid_cachep, up); | |
253 | ||
254 | done: | |
255 | uids_mutex_unlock(); | |
256 | } | |
257 | ||
258 | /* IRQs are disabled and uidhash_lock is held upon function entry. | |
259 | * IRQ state (as stored in flags) is restored and uidhash_lock released | |
260 | * upon function exit. | |
261 | */ | |
262 | static inline void free_user(struct user_struct *up, unsigned long flags) | |
263 | { | |
264 | /* restore back the count */ | |
265 | atomic_inc(&up->__count); | |
266 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
267 | ||
268 | INIT_WORK(&up->work, remove_user_sysfs_dir); | |
269 | schedule_work(&up->work); | |
1da177e4 LT |
270 | } |
271 | ||
b1a8c172 | 272 | #else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */ |
5cb350ba | 273 | |
eb41d946 KS |
274 | int uids_sysfs_init(void) { return 0; } |
275 | static inline int uids_user_create(struct user_struct *up) { return 0; } | |
5cb350ba DG |
276 | static inline void uids_mutex_lock(void) { } |
277 | static inline void uids_mutex_unlock(void) { } | |
278 | ||
279 | /* IRQs are disabled and uidhash_lock is held upon function entry. | |
280 | * IRQ state (as stored in flags) is restored and uidhash_lock released | |
281 | * upon function exit. | |
282 | */ | |
283 | static inline void free_user(struct user_struct *up, unsigned long flags) | |
284 | { | |
285 | uid_hash_remove(up); | |
286 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
287 | sched_destroy_user(up); | |
288 | key_put(up->uid_keyring); | |
289 | key_put(up->session_keyring); | |
290 | kmem_cache_free(uid_cachep, up); | |
291 | } | |
292 | ||
b1a8c172 | 293 | #endif |
5cb350ba | 294 | |
1da177e4 LT |
295 | /* |
296 | * Locate the user_struct for the passed UID. If found, take a ref on it. The | |
297 | * caller must undo that ref with free_uid(). | |
298 | * | |
299 | * If the user_struct could not be found, return NULL. | |
300 | */ | |
301 | struct user_struct *find_user(uid_t uid) | |
302 | { | |
303 | struct user_struct *ret; | |
3fa97c9d | 304 | unsigned long flags; |
acce292c | 305 | struct user_namespace *ns = current->nsproxy->user_ns; |
1da177e4 | 306 | |
3fa97c9d | 307 | spin_lock_irqsave(&uidhash_lock, flags); |
acce292c | 308 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
3fa97c9d | 309 | spin_unlock_irqrestore(&uidhash_lock, flags); |
1da177e4 LT |
310 | return ret; |
311 | } | |
312 | ||
313 | void free_uid(struct user_struct *up) | |
314 | { | |
3fa97c9d AM |
315 | unsigned long flags; |
316 | ||
36f57413 AM |
317 | if (!up) |
318 | return; | |
319 | ||
3fa97c9d | 320 | local_irq_save(flags); |
5cb350ba DG |
321 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) |
322 | free_user(up, flags); | |
323 | else | |
36f57413 | 324 | local_irq_restore(flags); |
1da177e4 LT |
325 | } |
326 | ||
acce292c | 327 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) |
1da177e4 | 328 | { |
735de223 | 329 | struct hlist_head *hashent = uidhashentry(ns, uid); |
8eb703e4 | 330 | struct user_struct *up, *new; |
1da177e4 | 331 | |
eb41d946 | 332 | /* Make uid_hash_find() + uids_user_create() + uid_hash_insert() |
5cb350ba DG |
333 | * atomic. |
334 | */ | |
335 | uids_mutex_lock(); | |
336 | ||
3fa97c9d | 337 | spin_lock_irq(&uidhash_lock); |
1da177e4 | 338 | up = uid_hash_find(uid, hashent); |
3fa97c9d | 339 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
340 | |
341 | if (!up) { | |
e94b1766 | 342 | new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); |
8eb703e4 PE |
343 | if (!new) |
344 | goto out_unlock; | |
5e8869bb | 345 | |
1da177e4 LT |
346 | new->uid = uid; |
347 | atomic_set(&new->__count, 1); | |
348 | atomic_set(&new->processes, 0); | |
349 | atomic_set(&new->files, 0); | |
350 | atomic_set(&new->sigpending, 0); | |
2d9048e2 | 351 | #ifdef CONFIG_INOTIFY_USER |
0eeca283 RL |
352 | atomic_set(&new->inotify_watches, 0); |
353 | atomic_set(&new->inotify_devs, 0); | |
354 | #endif | |
970a8645 | 355 | #ifdef CONFIG_POSIX_MQUEUE |
1da177e4 | 356 | new->mq_bytes = 0; |
970a8645 | 357 | #endif |
1da177e4 LT |
358 | new->locked_shm = 0; |
359 | ||
8eb703e4 PE |
360 | if (alloc_uid_keyring(new, current) < 0) |
361 | goto out_free_user; | |
1da177e4 | 362 | |
8eb703e4 PE |
363 | if (sched_create_user(new) < 0) |
364 | goto out_put_keys; | |
24e377a8 | 365 | |
8eb703e4 PE |
366 | if (uids_user_create(new)) |
367 | goto out_destoy_sched; | |
5cb350ba | 368 | |
1da177e4 LT |
369 | /* |
370 | * Before adding this, check whether we raced | |
371 | * on adding the same user already.. | |
372 | */ | |
3fa97c9d | 373 | spin_lock_irq(&uidhash_lock); |
1da177e4 LT |
374 | up = uid_hash_find(uid, hashent); |
375 | if (up) { | |
5cb350ba DG |
376 | /* This case is not possible when CONFIG_FAIR_USER_SCHED |
377 | * is defined, since we serialize alloc_uid() using | |
378 | * uids_mutex. Hence no need to call | |
379 | * sched_destroy_user() or remove_user_sysfs_dir(). | |
380 | */ | |
1da177e4 LT |
381 | key_put(new->uid_keyring); |
382 | key_put(new->session_keyring); | |
383 | kmem_cache_free(uid_cachep, new); | |
384 | } else { | |
385 | uid_hash_insert(new, hashent); | |
386 | up = new; | |
387 | } | |
3fa97c9d | 388 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
389 | |
390 | } | |
5cb350ba DG |
391 | |
392 | uids_mutex_unlock(); | |
393 | ||
1da177e4 | 394 | return up; |
8eb703e4 PE |
395 | |
396 | out_destoy_sched: | |
397 | sched_destroy_user(new); | |
398 | out_put_keys: | |
399 | key_put(new->uid_keyring); | |
400 | key_put(new->session_keyring); | |
401 | out_free_user: | |
402 | kmem_cache_free(uid_cachep, new); | |
403 | out_unlock: | |
404 | uids_mutex_unlock(); | |
405 | return NULL; | |
1da177e4 LT |
406 | } |
407 | ||
408 | void switch_uid(struct user_struct *new_user) | |
409 | { | |
410 | struct user_struct *old_user; | |
411 | ||
412 | /* What if a process setreuid()'s and this brings the | |
413 | * new uid over his NPROC rlimit? We can check this now | |
414 | * cheaply with the new uid cache, so if it matters | |
415 | * we should be checking for it. -DaveM | |
416 | */ | |
417 | old_user = current->user; | |
418 | atomic_inc(&new_user->processes); | |
419 | atomic_dec(&old_user->processes); | |
420 | switch_uid_keyring(new_user); | |
421 | current->user = new_user; | |
24e377a8 | 422 | sched_switch_user(current); |
45c18b0b LT |
423 | |
424 | /* | |
425 | * We need to synchronize with __sigqueue_alloc() | |
426 | * doing a get_uid(p->user).. If that saw the old | |
427 | * user value, we need to wait until it has exited | |
428 | * its critical region before we can free the old | |
429 | * structure. | |
430 | */ | |
431 | smp_mb(); | |
432 | spin_unlock_wait(¤t->sighand->siglock); | |
433 | ||
1da177e4 LT |
434 | free_uid(old_user); |
435 | suid_keys(current); | |
436 | } | |
437 | ||
aee16ce7 | 438 | #ifdef CONFIG_USER_NS |
28f300d2 PE |
439 | void release_uids(struct user_namespace *ns) |
440 | { | |
441 | int i; | |
442 | unsigned long flags; | |
443 | struct hlist_head *head; | |
444 | struct hlist_node *nd; | |
445 | ||
446 | spin_lock_irqsave(&uidhash_lock, flags); | |
447 | /* | |
448 | * collapse the chains so that the user_struct-s will | |
449 | * be still alive, but not in hashes. subsequent free_uid() | |
450 | * will free them. | |
451 | */ | |
452 | for (i = 0; i < UIDHASH_SZ; i++) { | |
453 | head = ns->uidhash_table + i; | |
454 | while (!hlist_empty(head)) { | |
455 | nd = head->first; | |
456 | hlist_del_init(nd); | |
457 | } | |
458 | } | |
459 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
460 | ||
461 | free_uid(ns->root_user); | |
462 | } | |
aee16ce7 | 463 | #endif |
1da177e4 LT |
464 | |
465 | static int __init uid_cache_init(void) | |
466 | { | |
467 | int n; | |
468 | ||
469 | uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), | |
20c2df83 | 470 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1da177e4 LT |
471 | |
472 | for(n = 0; n < UIDHASH_SZ; ++n) | |
735de223 | 473 | INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); |
1da177e4 LT |
474 | |
475 | /* Insert the root user immediately (init already runs as root) */ | |
3fa97c9d | 476 | spin_lock_irq(&uidhash_lock); |
acce292c | 477 | uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); |
3fa97c9d | 478 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
479 | |
480 | return 0; | |
481 | } | |
482 | ||
483 | module_init(uid_cache_init); |