]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * The "user cache". | |
3 | * | |
4 | * (C) Copyright 1991-2000 Linus Torvalds | |
5 | * | |
6 | * We have a per-user structure to keep track of how many | |
7 | * processes, files etc the user has claimed, in order to be | |
8 | * able to have per-user limits for system resources. | |
9 | */ | |
10 | ||
11 | #include <linux/init.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/bitops.h> | |
15 | #include <linux/key.h> | |
4021cb27 | 16 | #include <linux/interrupt.h> |
acce292c CLG |
17 | #include <linux/module.h> |
18 | #include <linux/user_namespace.h> | |
1da177e4 LT |
19 | |
20 | /* | |
21 | * UID task count cache, to get fast user lookup in "alloc_uid" | |
22 | * when changing user ID's (ie setuid() and friends). | |
23 | */ | |
24 | ||
1da177e4 LT |
25 | #define UIDHASH_MASK (UIDHASH_SZ - 1) |
26 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | |
acce292c | 27 | #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) |
1da177e4 | 28 | |
e18b890b | 29 | static struct kmem_cache *uid_cachep; |
4021cb27 IM |
30 | |
31 | /* | |
32 | * The uidhash_lock is mostly taken from process context, but it is | |
33 | * occasionally also taken from softirq/tasklet context, when | |
34 | * task-structs get RCU-freed. Hence all locking must be softirq-safe. | |
3fa97c9d AM |
35 | * But free_uid() is also called with local interrupts disabled, and running |
36 | * local_bh_enable() with local interrupts disabled is an error - we'll run | |
37 | * softirq callbacks, and they can unconditionally enable interrupts, and | |
38 | * the caller of free_uid() didn't expect that.. | |
4021cb27 | 39 | */ |
1da177e4 LT |
40 | static DEFINE_SPINLOCK(uidhash_lock); |
41 | ||
42 | struct user_struct root_user = { | |
43 | .__count = ATOMIC_INIT(1), | |
44 | .processes = ATOMIC_INIT(1), | |
45 | .files = ATOMIC_INIT(0), | |
46 | .sigpending = ATOMIC_INIT(0), | |
47 | .mq_bytes = 0, | |
48 | .locked_shm = 0, | |
49 | #ifdef CONFIG_KEYS | |
50 | .uid_keyring = &root_user_keyring, | |
51 | .session_keyring = &root_session_keyring, | |
52 | #endif | |
24e377a8 SV |
53 | #ifdef CONFIG_FAIR_USER_SCHED |
54 | .tg = &init_task_grp, | |
55 | #endif | |
1da177e4 LT |
56 | }; |
57 | ||
24e377a8 SV |
58 | #ifdef CONFIG_FAIR_USER_SCHED |
59 | static void sched_destroy_user(struct user_struct *up) | |
60 | { | |
61 | sched_destroy_group(up->tg); | |
62 | } | |
63 | ||
64 | static int sched_create_user(struct user_struct *up) | |
65 | { | |
66 | int rc = 0; | |
67 | ||
68 | up->tg = sched_create_group(); | |
69 | if (IS_ERR(up->tg)) | |
70 | rc = -ENOMEM; | |
71 | ||
72 | return rc; | |
73 | } | |
74 | ||
75 | static void sched_switch_user(struct task_struct *p) | |
76 | { | |
77 | sched_move_task(p); | |
78 | } | |
79 | ||
80 | #else /* CONFIG_FAIR_USER_SCHED */ | |
81 | ||
82 | static void sched_destroy_user(struct user_struct *up) { } | |
83 | static int sched_create_user(struct user_struct *up) { return 0; } | |
84 | static void sched_switch_user(struct task_struct *p) { } | |
85 | ||
86 | #endif /* CONFIG_FAIR_USER_SCHED */ | |
87 | ||
1da177e4 LT |
88 | /* |
89 | * These routines must be called with the uidhash spinlock held! | |
90 | */ | |
735de223 | 91 | static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) |
1da177e4 | 92 | { |
735de223 | 93 | hlist_add_head(&up->uidhash_node, hashent); |
1da177e4 LT |
94 | } |
95 | ||
96 | static inline void uid_hash_remove(struct user_struct *up) | |
97 | { | |
28f300d2 | 98 | hlist_del_init(&up->uidhash_node); |
1da177e4 LT |
99 | } |
100 | ||
735de223 | 101 | static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) |
1da177e4 | 102 | { |
d8a4821d | 103 | struct user_struct *user; |
735de223 | 104 | struct hlist_node *h; |
1da177e4 | 105 | |
735de223 | 106 | hlist_for_each_entry(user, h, hashent, uidhash_node) { |
1da177e4 LT |
107 | if(user->uid == uid) { |
108 | atomic_inc(&user->__count); | |
109 | return user; | |
110 | } | |
111 | } | |
112 | ||
113 | return NULL; | |
114 | } | |
115 | ||
116 | /* | |
117 | * Locate the user_struct for the passed UID. If found, take a ref on it. The | |
118 | * caller must undo that ref with free_uid(). | |
119 | * | |
120 | * If the user_struct could not be found, return NULL. | |
121 | */ | |
122 | struct user_struct *find_user(uid_t uid) | |
123 | { | |
124 | struct user_struct *ret; | |
3fa97c9d | 125 | unsigned long flags; |
acce292c | 126 | struct user_namespace *ns = current->nsproxy->user_ns; |
1da177e4 | 127 | |
3fa97c9d | 128 | spin_lock_irqsave(&uidhash_lock, flags); |
acce292c | 129 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
3fa97c9d | 130 | spin_unlock_irqrestore(&uidhash_lock, flags); |
1da177e4 LT |
131 | return ret; |
132 | } | |
133 | ||
134 | void free_uid(struct user_struct *up) | |
135 | { | |
3fa97c9d AM |
136 | unsigned long flags; |
137 | ||
36f57413 AM |
138 | if (!up) |
139 | return; | |
140 | ||
3fa97c9d | 141 | local_irq_save(flags); |
36f57413 | 142 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { |
1da177e4 | 143 | uid_hash_remove(up); |
36f57413 | 144 | spin_unlock_irqrestore(&uidhash_lock, flags); |
24e377a8 | 145 | sched_destroy_user(up); |
1da177e4 LT |
146 | key_put(up->uid_keyring); |
147 | key_put(up->session_keyring); | |
148 | kmem_cache_free(uid_cachep, up); | |
36f57413 AM |
149 | } else { |
150 | local_irq_restore(flags); | |
1da177e4 LT |
151 | } |
152 | } | |
153 | ||
acce292c | 154 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) |
1da177e4 | 155 | { |
735de223 | 156 | struct hlist_head *hashent = uidhashentry(ns, uid); |
1da177e4 LT |
157 | struct user_struct *up; |
158 | ||
3fa97c9d | 159 | spin_lock_irq(&uidhash_lock); |
1da177e4 | 160 | up = uid_hash_find(uid, hashent); |
3fa97c9d | 161 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
162 | |
163 | if (!up) { | |
164 | struct user_struct *new; | |
165 | ||
e94b1766 | 166 | new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); |
1da177e4 LT |
167 | if (!new) |
168 | return NULL; | |
169 | new->uid = uid; | |
170 | atomic_set(&new->__count, 1); | |
171 | atomic_set(&new->processes, 0); | |
172 | atomic_set(&new->files, 0); | |
173 | atomic_set(&new->sigpending, 0); | |
2d9048e2 | 174 | #ifdef CONFIG_INOTIFY_USER |
0eeca283 RL |
175 | atomic_set(&new->inotify_watches, 0); |
176 | atomic_set(&new->inotify_devs, 0); | |
177 | #endif | |
1da177e4 LT |
178 | |
179 | new->mq_bytes = 0; | |
180 | new->locked_shm = 0; | |
181 | ||
d720024e | 182 | if (alloc_uid_keyring(new, current) < 0) { |
1da177e4 LT |
183 | kmem_cache_free(uid_cachep, new); |
184 | return NULL; | |
185 | } | |
186 | ||
24e377a8 SV |
187 | if (sched_create_user(new) < 0) { |
188 | key_put(new->uid_keyring); | |
189 | key_put(new->session_keyring); | |
190 | kmem_cache_free(uid_cachep, new); | |
191 | return NULL; | |
192 | } | |
193 | ||
1da177e4 LT |
194 | /* |
195 | * Before adding this, check whether we raced | |
196 | * on adding the same user already.. | |
197 | */ | |
3fa97c9d | 198 | spin_lock_irq(&uidhash_lock); |
1da177e4 LT |
199 | up = uid_hash_find(uid, hashent); |
200 | if (up) { | |
24e377a8 | 201 | sched_destroy_user(new); |
1da177e4 LT |
202 | key_put(new->uid_keyring); |
203 | key_put(new->session_keyring); | |
204 | kmem_cache_free(uid_cachep, new); | |
205 | } else { | |
206 | uid_hash_insert(new, hashent); | |
207 | up = new; | |
208 | } | |
3fa97c9d | 209 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
210 | |
211 | } | |
212 | return up; | |
213 | } | |
214 | ||
215 | void switch_uid(struct user_struct *new_user) | |
216 | { | |
217 | struct user_struct *old_user; | |
218 | ||
219 | /* What if a process setreuid()'s and this brings the | |
220 | * new uid over his NPROC rlimit? We can check this now | |
221 | * cheaply with the new uid cache, so if it matters | |
222 | * we should be checking for it. -DaveM | |
223 | */ | |
224 | old_user = current->user; | |
225 | atomic_inc(&new_user->processes); | |
226 | atomic_dec(&old_user->processes); | |
227 | switch_uid_keyring(new_user); | |
228 | current->user = new_user; | |
24e377a8 | 229 | sched_switch_user(current); |
45c18b0b LT |
230 | |
231 | /* | |
232 | * We need to synchronize with __sigqueue_alloc() | |
233 | * doing a get_uid(p->user).. If that saw the old | |
234 | * user value, we need to wait until it has exited | |
235 | * its critical region before we can free the old | |
236 | * structure. | |
237 | */ | |
238 | smp_mb(); | |
239 | spin_unlock_wait(¤t->sighand->siglock); | |
240 | ||
1da177e4 LT |
241 | free_uid(old_user); |
242 | suid_keys(current); | |
243 | } | |
244 | ||
28f300d2 PE |
245 | void release_uids(struct user_namespace *ns) |
246 | { | |
247 | int i; | |
248 | unsigned long flags; | |
249 | struct hlist_head *head; | |
250 | struct hlist_node *nd; | |
251 | ||
252 | spin_lock_irqsave(&uidhash_lock, flags); | |
253 | /* | |
254 | * collapse the chains so that the user_struct-s will | |
255 | * be still alive, but not in hashes. subsequent free_uid() | |
256 | * will free them. | |
257 | */ | |
258 | for (i = 0; i < UIDHASH_SZ; i++) { | |
259 | head = ns->uidhash_table + i; | |
260 | while (!hlist_empty(head)) { | |
261 | nd = head->first; | |
262 | hlist_del_init(nd); | |
263 | } | |
264 | } | |
265 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
266 | ||
267 | free_uid(ns->root_user); | |
268 | } | |
1da177e4 LT |
269 | |
270 | static int __init uid_cache_init(void) | |
271 | { | |
272 | int n; | |
273 | ||
274 | uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), | |
20c2df83 | 275 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1da177e4 LT |
276 | |
277 | for(n = 0; n < UIDHASH_SZ; ++n) | |
735de223 | 278 | INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); |
1da177e4 LT |
279 | |
280 | /* Insert the root user immediately (init already runs as root) */ | |
3fa97c9d | 281 | spin_lock_irq(&uidhash_lock); |
acce292c | 282 | uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); |
3fa97c9d | 283 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
284 | |
285 | return 0; | |
286 | } | |
287 | ||
288 | module_init(uid_cache_init); |