]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * The "user cache". | |
3 | * | |
4 | * (C) Copyright 1991-2000 Linus Torvalds | |
5 | * | |
6 | * We have a per-user structure to keep track of how many | |
7 | * processes, files etc the user has claimed, in order to be | |
8 | * able to have per-user limits for system resources. | |
9 | */ | |
10 | ||
11 | #include <linux/init.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/bitops.h> | |
15 | #include <linux/key.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/user_namespace.h> | |
19 | ||
20 | /* | |
21 | * UID task count cache, to get fast user lookup in "alloc_uid" | |
22 | * when changing user ID's (ie setuid() and friends). | |
23 | */ | |
24 | ||
25 | #define UIDHASH_MASK (UIDHASH_SZ - 1) | |
26 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | |
27 | #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) | |
28 | ||
29 | static struct kmem_cache *uid_cachep; | |
30 | ||
31 | /* | |
32 | * The uidhash_lock is mostly taken from process context, but it is | |
33 | * occasionally also taken from softirq/tasklet context, when | |
34 | * task-structs get RCU-freed. Hence all locking must be softirq-safe. | |
35 | * But free_uid() is also called with local interrupts disabled, and running | |
36 | * local_bh_enable() with local interrupts disabled is an error - we'll run | |
37 | * softirq callbacks, and they can unconditionally enable interrupts, and | |
38 | * the caller of free_uid() didn't expect that.. | |
39 | */ | |
40 | static DEFINE_SPINLOCK(uidhash_lock); | |
41 | ||
42 | struct user_struct root_user = { | |
43 | .__count = ATOMIC_INIT(1), | |
44 | .processes = ATOMIC_INIT(1), | |
45 | .files = ATOMIC_INIT(0), | |
46 | .sigpending = ATOMIC_INIT(0), | |
47 | .mq_bytes = 0, | |
48 | .locked_shm = 0, | |
49 | #ifdef CONFIG_KEYS | |
50 | .uid_keyring = &root_user_keyring, | |
51 | .session_keyring = &root_session_keyring, | |
52 | #endif | |
53 | }; | |
54 | ||
55 | /* | |
56 | * These routines must be called with the uidhash spinlock held! | |
57 | */ | |
58 | static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) | |
59 | { | |
60 | hlist_add_head(&up->uidhash_node, hashent); | |
61 | } | |
62 | ||
63 | static inline void uid_hash_remove(struct user_struct *up) | |
64 | { | |
65 | hlist_del(&up->uidhash_node); | |
66 | } | |
67 | ||
68 | static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | |
69 | { | |
70 | struct user_struct *user; | |
71 | struct hlist_node *h; | |
72 | ||
73 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | |
74 | if(user->uid == uid) { | |
75 | atomic_inc(&user->__count); | |
76 | return user; | |
77 | } | |
78 | } | |
79 | ||
80 | return NULL; | |
81 | } | |
82 | ||
83 | /* | |
84 | * Locate the user_struct for the passed UID. If found, take a ref on it. The | |
85 | * caller must undo that ref with free_uid(). | |
86 | * | |
87 | * If the user_struct could not be found, return NULL. | |
88 | */ | |
89 | struct user_struct *find_user(uid_t uid) | |
90 | { | |
91 | struct user_struct *ret; | |
92 | unsigned long flags; | |
93 | struct user_namespace *ns = current->nsproxy->user_ns; | |
94 | ||
95 | spin_lock_irqsave(&uidhash_lock, flags); | |
96 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); | |
97 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
98 | return ret; | |
99 | } | |
100 | ||
101 | void free_uid(struct user_struct *up) | |
102 | { | |
103 | unsigned long flags; | |
104 | ||
105 | if (!up) | |
106 | return; | |
107 | ||
108 | local_irq_save(flags); | |
109 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { | |
110 | uid_hash_remove(up); | |
111 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
112 | key_put(up->uid_keyring); | |
113 | key_put(up->session_keyring); | |
114 | kmem_cache_free(uid_cachep, up); | |
115 | } else { | |
116 | local_irq_restore(flags); | |
117 | } | |
118 | } | |
119 | ||
120 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | |
121 | { | |
122 | struct hlist_head *hashent = uidhashentry(ns, uid); | |
123 | struct user_struct *up; | |
124 | ||
125 | spin_lock_irq(&uidhash_lock); | |
126 | up = uid_hash_find(uid, hashent); | |
127 | spin_unlock_irq(&uidhash_lock); | |
128 | ||
129 | if (!up) { | |
130 | struct user_struct *new; | |
131 | ||
132 | new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); | |
133 | if (!new) | |
134 | return NULL; | |
135 | new->uid = uid; | |
136 | atomic_set(&new->__count, 1); | |
137 | atomic_set(&new->processes, 0); | |
138 | atomic_set(&new->files, 0); | |
139 | atomic_set(&new->sigpending, 0); | |
140 | #ifdef CONFIG_INOTIFY_USER | |
141 | atomic_set(&new->inotify_watches, 0); | |
142 | atomic_set(&new->inotify_devs, 0); | |
143 | #endif | |
144 | ||
145 | new->mq_bytes = 0; | |
146 | new->locked_shm = 0; | |
147 | ||
148 | if (alloc_uid_keyring(new, current) < 0) { | |
149 | kmem_cache_free(uid_cachep, new); | |
150 | return NULL; | |
151 | } | |
152 | ||
153 | /* | |
154 | * Before adding this, check whether we raced | |
155 | * on adding the same user already.. | |
156 | */ | |
157 | spin_lock_irq(&uidhash_lock); | |
158 | up = uid_hash_find(uid, hashent); | |
159 | if (up) { | |
160 | key_put(new->uid_keyring); | |
161 | key_put(new->session_keyring); | |
162 | kmem_cache_free(uid_cachep, new); | |
163 | } else { | |
164 | uid_hash_insert(new, hashent); | |
165 | up = new; | |
166 | } | |
167 | spin_unlock_irq(&uidhash_lock); | |
168 | ||
169 | } | |
170 | return up; | |
171 | } | |
172 | ||
173 | void switch_uid(struct user_struct *new_user) | |
174 | { | |
175 | struct user_struct *old_user; | |
176 | ||
177 | /* What if a process setreuid()'s and this brings the | |
178 | * new uid over his NPROC rlimit? We can check this now | |
179 | * cheaply with the new uid cache, so if it matters | |
180 | * we should be checking for it. -DaveM | |
181 | */ | |
182 | old_user = current->user; | |
183 | atomic_inc(&new_user->processes); | |
184 | atomic_dec(&old_user->processes); | |
185 | switch_uid_keyring(new_user); | |
186 | current->user = new_user; | |
187 | ||
188 | /* | |
189 | * We need to synchronize with __sigqueue_alloc() | |
190 | * doing a get_uid(p->user).. If that saw the old | |
191 | * user value, we need to wait until it has exited | |
192 | * its critical region before we can free the old | |
193 | * structure. | |
194 | */ | |
195 | smp_mb(); | |
196 | spin_unlock_wait(¤t->sighand->siglock); | |
197 | ||
198 | free_uid(old_user); | |
199 | suid_keys(current); | |
200 | } | |
201 | ||
202 | ||
203 | static int __init uid_cache_init(void) | |
204 | { | |
205 | int n; | |
206 | ||
207 | uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), | |
208 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | |
209 | ||
210 | for(n = 0; n < UIDHASH_SZ; ++n) | |
211 | INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); | |
212 | ||
213 | /* Insert the root user immediately (init already runs as root) */ | |
214 | spin_lock_irq(&uidhash_lock); | |
215 | uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); | |
216 | spin_unlock_irq(&uidhash_lock); | |
217 | ||
218 | return 0; | |
219 | } | |
220 | ||
221 | module_init(uid_cache_init); |