]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/user.c
Pull bsp-removal into release branch
[mirror_ubuntu-artful-kernel.git] / kernel / user.c
1 /*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17
18 /*
19 * UID task count cache, to get fast user lookup in "alloc_uid"
20 * when changing user ID's (ie setuid() and friends).
21 */
22
23 #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8)
24 #define UIDHASH_SZ (1 << UIDHASH_BITS)
25 #define UIDHASH_MASK (UIDHASH_SZ - 1)
26 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
27 #define uidhashentry(uid) (uidhash_table + __uidhashfn((uid)))
28
29 static kmem_cache_t *uid_cachep;
30 static struct list_head uidhash_table[UIDHASH_SZ];
31
32 /*
33 * The uidhash_lock is mostly taken from process context, but it is
34 * occasionally also taken from softirq/tasklet context, when
35 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
36 * But free_uid() is also called with local interrupts disabled, and running
37 * local_bh_enable() with local interrupts disabled is an error - we'll run
38 * softirq callbacks, and they can unconditionally enable interrupts, and
39 * the caller of free_uid() didn't expect that..
40 */
41 static DEFINE_SPINLOCK(uidhash_lock);
42
43 struct user_struct root_user = {
44 .__count = ATOMIC_INIT(1),
45 .processes = ATOMIC_INIT(1),
46 .files = ATOMIC_INIT(0),
47 .sigpending = ATOMIC_INIT(0),
48 .mq_bytes = 0,
49 .locked_shm = 0,
50 #ifdef CONFIG_KEYS
51 .uid_keyring = &root_user_keyring,
52 .session_keyring = &root_session_keyring,
53 #endif
54 };
55
56 /*
57 * These routines must be called with the uidhash spinlock held!
58 */
59 static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent)
60 {
61 list_add(&up->uidhash_list, hashent);
62 }
63
64 static inline void uid_hash_remove(struct user_struct *up)
65 {
66 list_del(&up->uidhash_list);
67 }
68
69 static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent)
70 {
71 struct list_head *up;
72
73 list_for_each(up, hashent) {
74 struct user_struct *user;
75
76 user = list_entry(up, struct user_struct, uidhash_list);
77
78 if(user->uid == uid) {
79 atomic_inc(&user->__count);
80 return user;
81 }
82 }
83
84 return NULL;
85 }
86
87 /*
88 * Locate the user_struct for the passed UID. If found, take a ref on it. The
89 * caller must undo that ref with free_uid().
90 *
91 * If the user_struct could not be found, return NULL.
92 */
93 struct user_struct *find_user(uid_t uid)
94 {
95 struct user_struct *ret;
96 unsigned long flags;
97
98 spin_lock_irqsave(&uidhash_lock, flags);
99 ret = uid_hash_find(uid, uidhashentry(uid));
100 spin_unlock_irqrestore(&uidhash_lock, flags);
101 return ret;
102 }
103
104 void free_uid(struct user_struct *up)
105 {
106 unsigned long flags;
107
108 local_irq_save(flags);
109 if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
110 uid_hash_remove(up);
111 key_put(up->uid_keyring);
112 key_put(up->session_keyring);
113 kmem_cache_free(uid_cachep, up);
114 spin_unlock(&uidhash_lock);
115 }
116 local_irq_restore(flags);
117 }
118
119 struct user_struct * alloc_uid(uid_t uid)
120 {
121 struct list_head *hashent = uidhashentry(uid);
122 struct user_struct *up;
123
124 spin_lock_irq(&uidhash_lock);
125 up = uid_hash_find(uid, hashent);
126 spin_unlock_irq(&uidhash_lock);
127
128 if (!up) {
129 struct user_struct *new;
130
131 new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
132 if (!new)
133 return NULL;
134 new->uid = uid;
135 atomic_set(&new->__count, 1);
136 atomic_set(&new->processes, 0);
137 atomic_set(&new->files, 0);
138 atomic_set(&new->sigpending, 0);
139 #ifdef CONFIG_INOTIFY
140 atomic_set(&new->inotify_watches, 0);
141 atomic_set(&new->inotify_devs, 0);
142 #endif
143
144 new->mq_bytes = 0;
145 new->locked_shm = 0;
146
147 if (alloc_uid_keyring(new) < 0) {
148 kmem_cache_free(uid_cachep, new);
149 return NULL;
150 }
151
152 /*
153 * Before adding this, check whether we raced
154 * on adding the same user already..
155 */
156 spin_lock_irq(&uidhash_lock);
157 up = uid_hash_find(uid, hashent);
158 if (up) {
159 key_put(new->uid_keyring);
160 key_put(new->session_keyring);
161 kmem_cache_free(uid_cachep, new);
162 } else {
163 uid_hash_insert(new, hashent);
164 up = new;
165 }
166 spin_unlock_irq(&uidhash_lock);
167
168 }
169 return up;
170 }
171
172 void switch_uid(struct user_struct *new_user)
173 {
174 struct user_struct *old_user;
175
176 /* What if a process setreuid()'s and this brings the
177 * new uid over his NPROC rlimit? We can check this now
178 * cheaply with the new uid cache, so if it matters
179 * we should be checking for it. -DaveM
180 */
181 old_user = current->user;
182 atomic_inc(&new_user->processes);
183 atomic_dec(&old_user->processes);
184 switch_uid_keyring(new_user);
185 current->user = new_user;
186 free_uid(old_user);
187 suid_keys(current);
188 }
189
190
191 static int __init uid_cache_init(void)
192 {
193 int n;
194
195 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
196 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
197
198 for(n = 0; n < UIDHASH_SZ; ++n)
199 INIT_LIST_HEAD(uidhash_table + n);
200
201 /* Insert the root user immediately (init already runs as root) */
202 spin_lock_irq(&uidhash_lock);
203 uid_hash_insert(&root_user, uidhashentry(0));
204 spin_unlock_irq(&uidhash_lock);
205
206 return 0;
207 }
208
209 module_init(uid_cache_init);