]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * The "user cache". | |
3 | * | |
4 | * (C) Copyright 1991-2000 Linus Torvalds | |
5 | * | |
6 | * We have a per-user structure to keep track of how many | |
7 | * processes, files etc the user has claimed, in order to be | |
8 | * able to have per-user limits for system resources. | |
9 | */ | |
10 | ||
11 | #include <linux/init.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/bitops.h> | |
15 | #include <linux/key.h> | |
16 | #include <linux/sched/user.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/export.h> | |
19 | #include <linux/user_namespace.h> | |
20 | #include <linux/proc_ns.h> | |
21 | ||
22 | /* | |
23 | * userns count is 1 for root user, 1 for init_uts_ns, | |
24 | * and 1 for... ? | |
25 | */ | |
26 | struct user_namespace init_user_ns = { | |
27 | .uid_map = { | |
28 | .nr_extents = 1, | |
29 | { | |
30 | .extent[0] = { | |
31 | .first = 0, | |
32 | .lower_first = 0, | |
33 | .count = 4294967295U, | |
34 | }, | |
35 | }, | |
36 | }, | |
37 | .gid_map = { | |
38 | .nr_extents = 1, | |
39 | { | |
40 | .extent[0] = { | |
41 | .first = 0, | |
42 | .lower_first = 0, | |
43 | .count = 4294967295U, | |
44 | }, | |
45 | }, | |
46 | }, | |
47 | .projid_map = { | |
48 | .nr_extents = 1, | |
49 | { | |
50 | .extent[0] = { | |
51 | .first = 0, | |
52 | .lower_first = 0, | |
53 | .count = 4294967295U, | |
54 | }, | |
55 | }, | |
56 | }, | |
57 | .count = ATOMIC_INIT(3), | |
58 | .owner = GLOBAL_ROOT_UID, | |
59 | .group = GLOBAL_ROOT_GID, | |
60 | .ns.inum = PROC_USER_INIT_INO, | |
61 | #ifdef CONFIG_USER_NS | |
62 | .ns.ops = &userns_operations, | |
63 | #endif | |
64 | .flags = USERNS_INIT_FLAGS, | |
65 | #ifdef CONFIG_PERSISTENT_KEYRINGS | |
66 | .persistent_keyring_register_sem = | |
67 | __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem), | |
68 | #endif | |
69 | }; | |
70 | EXPORT_SYMBOL_GPL(init_user_ns); | |
71 | ||
72 | /* | |
73 | * UID task count cache, to get fast user lookup in "alloc_uid" | |
74 | * when changing user ID's (ie setuid() and friends). | |
75 | */ | |
76 | ||
77 | #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7) | |
78 | #define UIDHASH_SZ (1 << UIDHASH_BITS) | |
79 | #define UIDHASH_MASK (UIDHASH_SZ - 1) | |
80 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | |
81 | #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid)))) | |
82 | ||
83 | static struct kmem_cache *uid_cachep; | |
84 | struct hlist_head uidhash_table[UIDHASH_SZ]; | |
85 | ||
86 | /* | |
87 | * The uidhash_lock is mostly taken from process context, but it is | |
88 | * occasionally also taken from softirq/tasklet context, when | |
89 | * task-structs get RCU-freed. Hence all locking must be softirq-safe. | |
90 | * But free_uid() is also called with local interrupts disabled, and running | |
91 | * local_bh_enable() with local interrupts disabled is an error - we'll run | |
92 | * softirq callbacks, and they can unconditionally enable interrupts, and | |
93 | * the caller of free_uid() didn't expect that.. | |
94 | */ | |
95 | static DEFINE_SPINLOCK(uidhash_lock); | |
96 | ||
97 | /* root_user.__count is 1, for init task cred */ | |
98 | struct user_struct root_user = { | |
99 | .__count = ATOMIC_INIT(1), | |
100 | .processes = ATOMIC_INIT(1), | |
101 | .sigpending = ATOMIC_INIT(0), | |
102 | .locked_shm = 0, | |
103 | .uid = GLOBAL_ROOT_UID, | |
104 | }; | |
105 | ||
106 | /* | |
107 | * These routines must be called with the uidhash spinlock held! | |
108 | */ | |
109 | static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) | |
110 | { | |
111 | hlist_add_head(&up->uidhash_node, hashent); | |
112 | } | |
113 | ||
114 | static void uid_hash_remove(struct user_struct *up) | |
115 | { | |
116 | hlist_del_init(&up->uidhash_node); | |
117 | } | |
118 | ||
119 | static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) | |
120 | { | |
121 | struct user_struct *user; | |
122 | ||
123 | hlist_for_each_entry(user, hashent, uidhash_node) { | |
124 | if (uid_eq(user->uid, uid)) { | |
125 | atomic_inc(&user->__count); | |
126 | return user; | |
127 | } | |
128 | } | |
129 | ||
130 | return NULL; | |
131 | } | |
132 | ||
133 | /* IRQs are disabled and uidhash_lock is held upon function entry. | |
134 | * IRQ state (as stored in flags) is restored and uidhash_lock released | |
135 | * upon function exit. | |
136 | */ | |
137 | static void free_user(struct user_struct *up, unsigned long flags) | |
138 | __releases(&uidhash_lock) | |
139 | { | |
140 | uid_hash_remove(up); | |
141 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
142 | key_put(up->uid_keyring); | |
143 | key_put(up->session_keyring); | |
144 | kmem_cache_free(uid_cachep, up); | |
145 | } | |
146 | ||
147 | /* | |
148 | * Locate the user_struct for the passed UID. If found, take a ref on it. The | |
149 | * caller must undo that ref with free_uid(). | |
150 | * | |
151 | * If the user_struct could not be found, return NULL. | |
152 | */ | |
153 | struct user_struct *find_user(kuid_t uid) | |
154 | { | |
155 | struct user_struct *ret; | |
156 | unsigned long flags; | |
157 | ||
158 | spin_lock_irqsave(&uidhash_lock, flags); | |
159 | ret = uid_hash_find(uid, uidhashentry(uid)); | |
160 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
161 | return ret; | |
162 | } | |
163 | ||
164 | void free_uid(struct user_struct *up) | |
165 | { | |
166 | unsigned long flags; | |
167 | ||
168 | if (!up) | |
169 | return; | |
170 | ||
171 | local_irq_save(flags); | |
172 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) | |
173 | free_user(up, flags); | |
174 | else | |
175 | local_irq_restore(flags); | |
176 | } | |
177 | ||
178 | struct user_struct *alloc_uid(kuid_t uid) | |
179 | { | |
180 | struct hlist_head *hashent = uidhashentry(uid); | |
181 | struct user_struct *up, *new; | |
182 | ||
183 | spin_lock_irq(&uidhash_lock); | |
184 | up = uid_hash_find(uid, hashent); | |
185 | spin_unlock_irq(&uidhash_lock); | |
186 | ||
187 | if (!up) { | |
188 | new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); | |
189 | if (!new) | |
190 | goto out_unlock; | |
191 | ||
192 | new->uid = uid; | |
193 | atomic_set(&new->__count, 1); | |
194 | ||
195 | /* | |
196 | * Before adding this, check whether we raced | |
197 | * on adding the same user already.. | |
198 | */ | |
199 | spin_lock_irq(&uidhash_lock); | |
200 | up = uid_hash_find(uid, hashent); | |
201 | if (up) { | |
202 | key_put(new->uid_keyring); | |
203 | key_put(new->session_keyring); | |
204 | kmem_cache_free(uid_cachep, new); | |
205 | } else { | |
206 | uid_hash_insert(new, hashent); | |
207 | up = new; | |
208 | } | |
209 | spin_unlock_irq(&uidhash_lock); | |
210 | } | |
211 | ||
212 | return up; | |
213 | ||
214 | out_unlock: | |
215 | return NULL; | |
216 | } | |
217 | ||
218 | static int __init uid_cache_init(void) | |
219 | { | |
220 | int n; | |
221 | ||
222 | uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), | |
223 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | |
224 | ||
225 | for(n = 0; n < UIDHASH_SZ; ++n) | |
226 | INIT_HLIST_HEAD(uidhash_table + n); | |
227 | ||
228 | /* Insert the root user immediately (init already runs as root) */ | |
229 | spin_lock_irq(&uidhash_lock); | |
230 | uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID)); | |
231 | spin_unlock_irq(&uidhash_lock); | |
232 | ||
233 | return 0; | |
234 | } | |
235 | subsys_initcall(uid_cache_init); |