]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - security/tomoyo/gc.c
TOMOYO: Cleanup part 3.
[mirror_ubuntu-focal-kernel.git] / security / tomoyo / gc.c
1 /*
2 * security/tomoyo/gc.c
3 *
4 * Implementation of the Domain-Based Mandatory Access Control.
5 *
6 * Copyright (C) 2005-2010 NTT DATA CORPORATION
7 *
8 */
9
10 #include "common.h"
11 #include <linux/kthread.h>
12 #include <linux/slab.h>
13
14 struct tomoyo_gc {
15 struct list_head list;
16 enum tomoyo_policy_id type;
17 struct list_head *element;
18 };
19 static LIST_HEAD(tomoyo_gc_queue);
20 static DEFINE_MUTEX(tomoyo_gc_mutex);
21
22 /**
23 * tomoyo_add_to_gc - Add an entry to to be deleted list.
24 *
25 * @type: One of values in "enum tomoyo_policy_id".
26 * @element: Pointer to "struct list_head".
27 *
28 * Returns true on success, false otherwise.
29 *
30 * Caller holds tomoyo_policy_lock mutex.
31 *
32 * Adding an entry needs kmalloc(). Thus, if we try to add thousands of
33 * entries at once, it will take too long time. Thus, do not add more than 128
34 * entries per a scan. But to be able to handle worst case where all entries
35 * are in-use, we accept one more entry per a scan.
36 *
37 * If we use singly linked list using "struct list_head"->prev (which is
38 * LIST_POISON2), we can avoid kmalloc().
39 */
40 static bool tomoyo_add_to_gc(const int type, struct list_head *element)
41 {
42 struct tomoyo_gc *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
43 if (!entry)
44 return false;
45 entry->type = type;
46 entry->element = element;
47 list_add(&entry->list, &tomoyo_gc_queue);
48 list_del_rcu(element);
49 return true;
50 }
51
52 /**
53 * tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control".
54 *
55 * @element: Pointer to "struct list_head".
56 *
57 * Returns nothing.
58 */
59 static void tomoyo_del_transition_control(struct list_head *element)
60 {
61 struct tomoyo_transition_control *ptr =
62 container_of(element, typeof(*ptr), head.list);
63 tomoyo_put_name(ptr->domainname);
64 tomoyo_put_name(ptr->program);
65 }
66
67 /**
68 * tomoyo_del_aggregator - Delete members in "struct tomoyo_aggregator".
69 *
70 * @element: Pointer to "struct list_head".
71 *
72 * Returns nothing.
73 */
74 static void tomoyo_del_aggregator(struct list_head *element)
75 {
76 struct tomoyo_aggregator *ptr =
77 container_of(element, typeof(*ptr), head.list);
78 tomoyo_put_name(ptr->original_name);
79 tomoyo_put_name(ptr->aggregated_name);
80 }
81
82 /**
83 * tomoyo_del_manager - Delete members in "struct tomoyo_manager".
84 *
85 * @element: Pointer to "struct list_head".
86 *
87 * Returns nothing.
88 */
89 static void tomoyo_del_manager(struct list_head *element)
90 {
91 struct tomoyo_manager *ptr =
92 container_of(element, typeof(*ptr), head.list);
93 tomoyo_put_name(ptr->manager);
94 }
95
96 /**
97 * tomoyo_del_acl - Delete members in "struct tomoyo_acl_info".
98 *
99 * @element: Pointer to "struct list_head".
100 *
101 * Returns nothing.
102 */
103 static void tomoyo_del_acl(struct list_head *element)
104 {
105 struct tomoyo_acl_info *acl =
106 container_of(element, typeof(*acl), list);
107 switch (acl->type) {
108 case TOMOYO_TYPE_PATH_ACL:
109 {
110 struct tomoyo_path_acl *entry
111 = container_of(acl, typeof(*entry), head);
112 tomoyo_put_name_union(&entry->name);
113 }
114 break;
115 case TOMOYO_TYPE_PATH2_ACL:
116 {
117 struct tomoyo_path2_acl *entry
118 = container_of(acl, typeof(*entry), head);
119 tomoyo_put_name_union(&entry->name1);
120 tomoyo_put_name_union(&entry->name2);
121 }
122 break;
123 case TOMOYO_TYPE_PATH_NUMBER_ACL:
124 {
125 struct tomoyo_path_number_acl *entry
126 = container_of(acl, typeof(*entry), head);
127 tomoyo_put_name_union(&entry->name);
128 tomoyo_put_number_union(&entry->number);
129 }
130 break;
131 case TOMOYO_TYPE_MKDEV_ACL:
132 {
133 struct tomoyo_mkdev_acl *entry
134 = container_of(acl, typeof(*entry), head);
135 tomoyo_put_name_union(&entry->name);
136 tomoyo_put_number_union(&entry->mode);
137 tomoyo_put_number_union(&entry->major);
138 tomoyo_put_number_union(&entry->minor);
139 }
140 break;
141 case TOMOYO_TYPE_MOUNT_ACL:
142 {
143 struct tomoyo_mount_acl *entry
144 = container_of(acl, typeof(*entry), head);
145 tomoyo_put_name_union(&entry->dev_name);
146 tomoyo_put_name_union(&entry->dir_name);
147 tomoyo_put_name_union(&entry->fs_type);
148 tomoyo_put_number_union(&entry->flags);
149 }
150 break;
151 }
152 }
153
154 static bool tomoyo_del_domain(struct list_head *element)
155 {
156 struct tomoyo_domain_info *domain =
157 container_of(element, typeof(*domain), list);
158 struct tomoyo_acl_info *acl;
159 struct tomoyo_acl_info *tmp;
160 /*
161 * Since we don't protect whole execve() operation using SRCU,
162 * we need to recheck domain->users at this point.
163 *
164 * (1) Reader starts SRCU section upon execve().
165 * (2) Reader traverses tomoyo_domain_list and finds this domain.
166 * (3) Writer marks this domain as deleted.
167 * (4) Garbage collector removes this domain from tomoyo_domain_list
168 * because this domain is marked as deleted and used by nobody.
169 * (5) Reader saves reference to this domain into
170 * "struct linux_binprm"->cred->security .
171 * (6) Reader finishes SRCU section, although execve() operation has
172 * not finished yet.
173 * (7) Garbage collector waits for SRCU synchronization.
174 * (8) Garbage collector kfree() this domain because this domain is
175 * used by nobody.
176 * (9) Reader finishes execve() operation and restores this domain from
177 * "struct linux_binprm"->cred->security.
178 *
179 * By updating domain->users at (5), we can solve this race problem
180 * by rechecking domain->users at (8).
181 */
182 if (atomic_read(&domain->users))
183 return false;
184 list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
185 tomoyo_del_acl(&acl->list);
186 tomoyo_memory_free(acl);
187 }
188 tomoyo_put_name(domain->domainname);
189 return true;
190 }
191
192
193 /**
194 * tomoyo_del_name - Delete members in "struct tomoyo_name".
195 *
196 * @element: Pointer to "struct list_head".
197 *
198 * Returns nothing.
199 */
200 static void tomoyo_del_name(struct list_head *element)
201 {
202 const struct tomoyo_name *ptr =
203 container_of(element, typeof(*ptr), head.list);
204 }
205
206 /**
207 * tomoyo_del_path_group - Delete members in "struct tomoyo_path_group".
208 *
209 * @element: Pointer to "struct list_head".
210 *
211 * Returns nothing.
212 */
213 static void tomoyo_del_path_group(struct list_head *element)
214 {
215 struct tomoyo_path_group *member =
216 container_of(element, typeof(*member), head.list);
217 tomoyo_put_name(member->member_name);
218 }
219
220 /**
221 * tomoyo_del_group - Delete "struct tomoyo_group".
222 *
223 * @element: Pointer to "struct list_head".
224 *
225 * Returns nothing.
226 */
227 static void tomoyo_del_group(struct list_head *element)
228 {
229 struct tomoyo_group *group =
230 container_of(element, typeof(*group), head.list);
231 tomoyo_put_name(group->group_name);
232 }
233
234 /**
235 * tomoyo_del_number_group - Delete members in "struct tomoyo_number_group".
236 *
237 * @element: Pointer to "struct list_head".
238 *
239 * Returns nothing.
240 */
241 static void tomoyo_del_number_group(struct list_head *element)
242 {
243 struct tomoyo_number_group *member =
244 container_of(element, typeof(*member), head.list);
245 }
246
247 /**
248 * tomoyo_collect_member - Delete elements with "struct tomoyo_acl_head".
249 *
250 * @id: One of values in "enum tomoyo_policy_id".
251 * @member_list: Pointer to "struct list_head".
252 *
253 * Returns true if some elements are deleted, false otherwise.
254 */
255 static bool tomoyo_collect_member(const enum tomoyo_policy_id id,
256 struct list_head *member_list)
257 {
258 struct tomoyo_acl_head *member;
259 list_for_each_entry(member, member_list, list) {
260 if (!member->is_deleted)
261 continue;
262 if (!tomoyo_add_to_gc(id, &member->list))
263 return false;
264 }
265 return true;
266 }
267
268 static bool tomoyo_collect_acl(struct tomoyo_domain_info *domain)
269 {
270 struct tomoyo_acl_info *acl;
271 list_for_each_entry(acl, &domain->acl_info_list, list) {
272 if (!acl->is_deleted)
273 continue;
274 if (!tomoyo_add_to_gc(TOMOYO_ID_ACL, &acl->list))
275 return false;
276 }
277 return true;
278 }
279
280 /**
281 * tomoyo_collect_entry - Scan lists for deleted elements.
282 *
283 * Returns nothing.
284 */
285 static void tomoyo_collect_entry(void)
286 {
287 int i;
288 if (mutex_lock_interruptible(&tomoyo_policy_lock))
289 return;
290 for (i = 0; i < TOMOYO_MAX_POLICY; i++) {
291 if (!tomoyo_collect_member(i, &tomoyo_policy_list[i]))
292 goto unlock;
293 }
294 {
295 struct tomoyo_domain_info *domain;
296 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
297 if (!tomoyo_collect_acl(domain))
298 goto unlock;
299 if (!domain->is_deleted || atomic_read(&domain->users))
300 continue;
301 /*
302 * Nobody is referring this domain. But somebody may
303 * refer this domain after successful execve().
304 * We recheck domain->users after SRCU synchronization.
305 */
306 if (!tomoyo_add_to_gc(TOMOYO_ID_DOMAIN, &domain->list))
307 goto unlock;
308 }
309 }
310 for (i = 0; i < TOMOYO_MAX_HASH; i++) {
311 struct tomoyo_name *ptr;
312 list_for_each_entry_rcu(ptr, &tomoyo_name_list[i], head.list) {
313 if (atomic_read(&ptr->head.users))
314 continue;
315 if (!tomoyo_add_to_gc(TOMOYO_ID_NAME, &ptr->head.list))
316 goto unlock;
317 }
318 }
319 for (i = 0; i < TOMOYO_MAX_GROUP; i++) {
320 struct list_head *list = &tomoyo_group_list[i];
321 int id;
322 struct tomoyo_group *group;
323 switch (i) {
324 case 0:
325 id = TOMOYO_ID_PATH_GROUP;
326 break;
327 default:
328 id = TOMOYO_ID_NUMBER_GROUP;
329 break;
330 }
331 list_for_each_entry(group, list, head.list) {
332 if (!tomoyo_collect_member(id, &group->member_list))
333 goto unlock;
334 if (!list_empty(&group->member_list) ||
335 atomic_read(&group->head.users))
336 continue;
337 if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP,
338 &group->head.list))
339 goto unlock;
340 }
341 }
342 unlock:
343 mutex_unlock(&tomoyo_policy_lock);
344 }
345
346 static void tomoyo_kfree_entry(void)
347 {
348 struct tomoyo_gc *p;
349 struct tomoyo_gc *tmp;
350
351 list_for_each_entry_safe(p, tmp, &tomoyo_gc_queue, list) {
352 struct list_head *element = p->element;
353 switch (p->type) {
354 case TOMOYO_ID_TRANSITION_CONTROL:
355 tomoyo_del_transition_control(element);
356 break;
357 case TOMOYO_ID_AGGREGATOR:
358 tomoyo_del_aggregator(element);
359 break;
360 case TOMOYO_ID_MANAGER:
361 tomoyo_del_manager(element);
362 break;
363 case TOMOYO_ID_NAME:
364 tomoyo_del_name(element);
365 break;
366 case TOMOYO_ID_ACL:
367 tomoyo_del_acl(element);
368 break;
369 case TOMOYO_ID_DOMAIN:
370 if (!tomoyo_del_domain(element))
371 continue;
372 break;
373 case TOMOYO_ID_PATH_GROUP:
374 tomoyo_del_path_group(element);
375 break;
376 case TOMOYO_ID_GROUP:
377 tomoyo_del_group(element);
378 break;
379 case TOMOYO_ID_NUMBER_GROUP:
380 tomoyo_del_number_group(element);
381 break;
382 case TOMOYO_MAX_POLICY:
383 break;
384 }
385 tomoyo_memory_free(element);
386 list_del(&p->list);
387 kfree(p);
388 }
389 }
390
391 /**
392 * tomoyo_gc_thread - Garbage collector thread function.
393 *
394 * @unused: Unused.
395 *
396 * In case OOM-killer choose this thread for termination, we create this thread
397 * as a short live thread whenever /sys/kernel/security/tomoyo/ interface was
398 * close()d.
399 *
400 * Returns 0.
401 */
402 static int tomoyo_gc_thread(void *unused)
403 {
404 daemonize("GC for TOMOYO");
405 if (mutex_trylock(&tomoyo_gc_mutex)) {
406 int i;
407 for (i = 0; i < 10; i++) {
408 tomoyo_collect_entry();
409 if (list_empty(&tomoyo_gc_queue))
410 break;
411 synchronize_srcu(&tomoyo_ss);
412 tomoyo_kfree_entry();
413 }
414 mutex_unlock(&tomoyo_gc_mutex);
415 }
416 do_exit(0);
417 }
418
419 void tomoyo_run_gc(void)
420 {
421 struct task_struct *task = kthread_create(tomoyo_gc_thread, NULL,
422 "GC for TOMOYO");
423 if (!IS_ERR(task))
424 wake_up_process(task);
425 }