]>
Commit | Line | Data |
---|---|---|
1 | #include "audit.h" | |
2 | #include <linux/fsnotify_backend.h> | |
3 | #include <linux/namei.h> | |
4 | #include <linux/mount.h> | |
5 | #include <linux/kthread.h> | |
6 | #include <linux/slab.h> | |
7 | ||
8 | struct audit_tree; | |
9 | struct audit_chunk; | |
10 | ||
11 | struct audit_tree { | |
12 | atomic_t count; | |
13 | int goner; | |
14 | struct audit_chunk *root; | |
15 | struct list_head chunks; | |
16 | struct list_head rules; | |
17 | struct list_head list; | |
18 | struct list_head same_root; | |
19 | struct rcu_head head; | |
20 | char pathname[]; | |
21 | }; | |
22 | ||
23 | struct audit_chunk { | |
24 | struct list_head hash; | |
25 | struct fsnotify_mark mark; | |
26 | struct list_head trees; /* with root here */ | |
27 | int dead; | |
28 | int count; | |
29 | atomic_long_t refs; | |
30 | struct rcu_head head; | |
31 | struct node { | |
32 | struct list_head list; | |
33 | struct audit_tree *owner; | |
34 | unsigned index; /* index; upper bit indicates 'will prune' */ | |
35 | } owners[]; | |
36 | }; | |
37 | ||
38 | static LIST_HEAD(tree_list); | |
39 | static LIST_HEAD(prune_list); | |
40 | static struct task_struct *prune_thread; | |
41 | ||
42 | /* | |
43 | * One struct chunk is attached to each inode of interest. | |
44 | * We replace struct chunk on tagging/untagging. | |
45 | * Rules have pointer to struct audit_tree. | |
46 | * Rules have struct list_head rlist forming a list of rules over | |
47 | * the same tree. | |
48 | * References to struct chunk are collected at audit_inode{,_child}() | |
49 | * time and used in AUDIT_TREE rule matching. | |
50 | * These references are dropped at the same time we are calling | |
51 | * audit_free_names(), etc. | |
52 | * | |
53 | * Cyclic lists galore: | |
54 | * tree.chunks anchors chunk.owners[].list hash_lock | |
55 | * tree.rules anchors rule.rlist audit_filter_mutex | |
56 | * chunk.trees anchors tree.same_root hash_lock | |
57 | * chunk.hash is a hash with middle bits of watch.inode as | |
58 | * a hash function. RCU, hash_lock | |
59 | * | |
60 | * tree is refcounted; one reference for "some rules on rules_list refer to | |
61 | * it", one for each chunk with pointer to it. | |
62 | * | |
63 | * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount | |
64 | * of watch contributes 1 to .refs). | |
65 | * | |
66 | * node.index allows to get from node.list to containing chunk. | |
67 | * MSB of that sucker is stolen to mark taggings that we might have to | |
68 | * revert - several operations have very unpleasant cleanup logics and | |
69 | * that makes a difference. Some. | |
70 | */ | |
71 | ||
72 | static struct fsnotify_group *audit_tree_group; | |
73 | ||
74 | static struct audit_tree *alloc_tree(const char *s) | |
75 | { | |
76 | struct audit_tree *tree; | |
77 | ||
78 | tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); | |
79 | if (tree) { | |
80 | atomic_set(&tree->count, 1); | |
81 | tree->goner = 0; | |
82 | INIT_LIST_HEAD(&tree->chunks); | |
83 | INIT_LIST_HEAD(&tree->rules); | |
84 | INIT_LIST_HEAD(&tree->list); | |
85 | INIT_LIST_HEAD(&tree->same_root); | |
86 | tree->root = NULL; | |
87 | strcpy(tree->pathname, s); | |
88 | } | |
89 | return tree; | |
90 | } | |
91 | ||
92 | static inline void get_tree(struct audit_tree *tree) | |
93 | { | |
94 | atomic_inc(&tree->count); | |
95 | } | |
96 | ||
97 | static inline void put_tree(struct audit_tree *tree) | |
98 | { | |
99 | if (atomic_dec_and_test(&tree->count)) | |
100 | kfree_rcu(tree, head); | |
101 | } | |
102 | ||
103 | /* to avoid bringing the entire thing in audit.h */ | |
104 | const char *audit_tree_path(struct audit_tree *tree) | |
105 | { | |
106 | return tree->pathname; | |
107 | } | |
108 | ||
109 | static void free_chunk(struct audit_chunk *chunk) | |
110 | { | |
111 | int i; | |
112 | ||
113 | for (i = 0; i < chunk->count; i++) { | |
114 | if (chunk->owners[i].owner) | |
115 | put_tree(chunk->owners[i].owner); | |
116 | } | |
117 | kfree(chunk); | |
118 | } | |
119 | ||
120 | void audit_put_chunk(struct audit_chunk *chunk) | |
121 | { | |
122 | if (atomic_long_dec_and_test(&chunk->refs)) | |
123 | free_chunk(chunk); | |
124 | } | |
125 | ||
126 | static void __put_chunk(struct rcu_head *rcu) | |
127 | { | |
128 | struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); | |
129 | audit_put_chunk(chunk); | |
130 | } | |
131 | ||
132 | static void audit_tree_destroy_watch(struct fsnotify_mark *entry) | |
133 | { | |
134 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); | |
135 | call_rcu(&chunk->head, __put_chunk); | |
136 | } | |
137 | ||
138 | static struct audit_chunk *alloc_chunk(int count) | |
139 | { | |
140 | struct audit_chunk *chunk; | |
141 | size_t size; | |
142 | int i; | |
143 | ||
144 | size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); | |
145 | chunk = kzalloc(size, GFP_KERNEL); | |
146 | if (!chunk) | |
147 | return NULL; | |
148 | ||
149 | INIT_LIST_HEAD(&chunk->hash); | |
150 | INIT_LIST_HEAD(&chunk->trees); | |
151 | chunk->count = count; | |
152 | atomic_long_set(&chunk->refs, 1); | |
153 | for (i = 0; i < count; i++) { | |
154 | INIT_LIST_HEAD(&chunk->owners[i].list); | |
155 | chunk->owners[i].index = i; | |
156 | } | |
157 | fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); | |
158 | chunk->mark.mask = FS_IN_IGNORED; | |
159 | return chunk; | |
160 | } | |
161 | ||
162 | enum {HASH_SIZE = 128}; | |
163 | static struct list_head chunk_hash_heads[HASH_SIZE]; | |
164 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); | |
165 | ||
166 | static inline struct list_head *chunk_hash(const struct inode *inode) | |
167 | { | |
168 | unsigned long n = (unsigned long)inode / L1_CACHE_BYTES; | |
169 | return chunk_hash_heads + n % HASH_SIZE; | |
170 | } | |
171 | ||
172 | /* hash_lock & entry->lock is held by caller */ | |
173 | static void insert_hash(struct audit_chunk *chunk) | |
174 | { | |
175 | struct fsnotify_mark *entry = &chunk->mark; | |
176 | struct list_head *list; | |
177 | ||
178 | if (!entry->inode) | |
179 | return; | |
180 | list = chunk_hash(entry->inode); | |
181 | list_add_rcu(&chunk->hash, list); | |
182 | } | |
183 | ||
184 | /* called under rcu_read_lock */ | |
185 | struct audit_chunk *audit_tree_lookup(const struct inode *inode) | |
186 | { | |
187 | struct list_head *list = chunk_hash(inode); | |
188 | struct audit_chunk *p; | |
189 | ||
190 | list_for_each_entry_rcu(p, list, hash) { | |
191 | /* mark.inode may have gone NULL, but who cares? */ | |
192 | if (p->mark.inode == inode) { | |
193 | atomic_long_inc(&p->refs); | |
194 | return p; | |
195 | } | |
196 | } | |
197 | return NULL; | |
198 | } | |
199 | ||
200 | bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) | |
201 | { | |
202 | int n; | |
203 | for (n = 0; n < chunk->count; n++) | |
204 | if (chunk->owners[n].owner == tree) | |
205 | return true; | |
206 | return false; | |
207 | } | |
208 | ||
209 | /* tagging and untagging inodes with trees */ | |
210 | ||
211 | static struct audit_chunk *find_chunk(struct node *p) | |
212 | { | |
213 | int index = p->index & ~(1U<<31); | |
214 | p -= index; | |
215 | return container_of(p, struct audit_chunk, owners[0]); | |
216 | } | |
217 | ||
218 | static void untag_chunk(struct node *p) | |
219 | { | |
220 | struct audit_chunk *chunk = find_chunk(p); | |
221 | struct fsnotify_mark *entry = &chunk->mark; | |
222 | struct audit_chunk *new = NULL; | |
223 | struct audit_tree *owner; | |
224 | int size = chunk->count - 1; | |
225 | int i, j; | |
226 | ||
227 | fsnotify_get_mark(entry); | |
228 | ||
229 | spin_unlock(&hash_lock); | |
230 | ||
231 | if (size) | |
232 | new = alloc_chunk(size); | |
233 | ||
234 | mutex_lock(&entry->group->mark_mutex); | |
235 | spin_lock(&entry->lock); | |
236 | if (chunk->dead || !entry->inode) { | |
237 | spin_unlock(&entry->lock); | |
238 | mutex_unlock(&entry->group->mark_mutex); | |
239 | if (new) | |
240 | free_chunk(new); | |
241 | goto out; | |
242 | } | |
243 | ||
244 | owner = p->owner; | |
245 | ||
246 | if (!size) { | |
247 | chunk->dead = 1; | |
248 | spin_lock(&hash_lock); | |
249 | list_del_init(&chunk->trees); | |
250 | if (owner->root == chunk) | |
251 | owner->root = NULL; | |
252 | list_del_init(&p->list); | |
253 | list_del_rcu(&chunk->hash); | |
254 | spin_unlock(&hash_lock); | |
255 | spin_unlock(&entry->lock); | |
256 | mutex_unlock(&entry->group->mark_mutex); | |
257 | fsnotify_destroy_mark(entry, audit_tree_group); | |
258 | goto out; | |
259 | } | |
260 | ||
261 | if (!new) | |
262 | goto Fallback; | |
263 | ||
264 | if (fsnotify_add_mark_locked(&new->mark, entry->group, entry->inode, | |
265 | NULL, 1)) { | |
266 | fsnotify_put_mark(&new->mark); | |
267 | goto Fallback; | |
268 | } | |
269 | ||
270 | chunk->dead = 1; | |
271 | spin_lock(&hash_lock); | |
272 | list_replace_init(&chunk->trees, &new->trees); | |
273 | if (owner->root == chunk) { | |
274 | list_del_init(&owner->same_root); | |
275 | owner->root = NULL; | |
276 | } | |
277 | ||
278 | for (i = j = 0; j <= size; i++, j++) { | |
279 | struct audit_tree *s; | |
280 | if (&chunk->owners[j] == p) { | |
281 | list_del_init(&p->list); | |
282 | i--; | |
283 | continue; | |
284 | } | |
285 | s = chunk->owners[j].owner; | |
286 | new->owners[i].owner = s; | |
287 | new->owners[i].index = chunk->owners[j].index - j + i; | |
288 | if (!s) /* result of earlier fallback */ | |
289 | continue; | |
290 | get_tree(s); | |
291 | list_replace_init(&chunk->owners[j].list, &new->owners[i].list); | |
292 | } | |
293 | ||
294 | list_replace_rcu(&chunk->hash, &new->hash); | |
295 | list_for_each_entry(owner, &new->trees, same_root) | |
296 | owner->root = new; | |
297 | spin_unlock(&hash_lock); | |
298 | spin_unlock(&entry->lock); | |
299 | mutex_unlock(&entry->group->mark_mutex); | |
300 | fsnotify_destroy_mark(entry, audit_tree_group); | |
301 | fsnotify_put_mark(&new->mark); /* drop initial reference */ | |
302 | goto out; | |
303 | ||
304 | Fallback: | |
305 | // do the best we can | |
306 | spin_lock(&hash_lock); | |
307 | if (owner->root == chunk) { | |
308 | list_del_init(&owner->same_root); | |
309 | owner->root = NULL; | |
310 | } | |
311 | list_del_init(&p->list); | |
312 | p->owner = NULL; | |
313 | put_tree(owner); | |
314 | spin_unlock(&hash_lock); | |
315 | spin_unlock(&entry->lock); | |
316 | mutex_unlock(&entry->group->mark_mutex); | |
317 | out: | |
318 | fsnotify_put_mark(entry); | |
319 | spin_lock(&hash_lock); | |
320 | } | |
321 | ||
322 | static int create_chunk(struct inode *inode, struct audit_tree *tree) | |
323 | { | |
324 | struct fsnotify_mark *entry; | |
325 | struct audit_chunk *chunk = alloc_chunk(1); | |
326 | if (!chunk) | |
327 | return -ENOMEM; | |
328 | ||
329 | entry = &chunk->mark; | |
330 | if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { | |
331 | fsnotify_put_mark(entry); | |
332 | return -ENOSPC; | |
333 | } | |
334 | ||
335 | spin_lock(&entry->lock); | |
336 | spin_lock(&hash_lock); | |
337 | if (tree->goner) { | |
338 | spin_unlock(&hash_lock); | |
339 | chunk->dead = 1; | |
340 | spin_unlock(&entry->lock); | |
341 | fsnotify_destroy_mark(entry, audit_tree_group); | |
342 | fsnotify_put_mark(entry); | |
343 | return 0; | |
344 | } | |
345 | chunk->owners[0].index = (1U << 31); | |
346 | chunk->owners[0].owner = tree; | |
347 | get_tree(tree); | |
348 | list_add(&chunk->owners[0].list, &tree->chunks); | |
349 | if (!tree->root) { | |
350 | tree->root = chunk; | |
351 | list_add(&tree->same_root, &chunk->trees); | |
352 | } | |
353 | insert_hash(chunk); | |
354 | spin_unlock(&hash_lock); | |
355 | spin_unlock(&entry->lock); | |
356 | fsnotify_put_mark(entry); /* drop initial reference */ | |
357 | return 0; | |
358 | } | |
359 | ||
360 | /* the first tagged inode becomes root of tree */ | |
361 | static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |
362 | { | |
363 | struct fsnotify_mark *old_entry, *chunk_entry; | |
364 | struct audit_tree *owner; | |
365 | struct audit_chunk *chunk, *old; | |
366 | struct node *p; | |
367 | int n; | |
368 | ||
369 | old_entry = fsnotify_find_inode_mark(audit_tree_group, inode); | |
370 | if (!old_entry) | |
371 | return create_chunk(inode, tree); | |
372 | ||
373 | old = container_of(old_entry, struct audit_chunk, mark); | |
374 | ||
375 | /* are we already there? */ | |
376 | spin_lock(&hash_lock); | |
377 | for (n = 0; n < old->count; n++) { | |
378 | if (old->owners[n].owner == tree) { | |
379 | spin_unlock(&hash_lock); | |
380 | fsnotify_put_mark(old_entry); | |
381 | return 0; | |
382 | } | |
383 | } | |
384 | spin_unlock(&hash_lock); | |
385 | ||
386 | chunk = alloc_chunk(old->count + 1); | |
387 | if (!chunk) { | |
388 | fsnotify_put_mark(old_entry); | |
389 | return -ENOMEM; | |
390 | } | |
391 | ||
392 | chunk_entry = &chunk->mark; | |
393 | ||
394 | mutex_lock(&old_entry->group->mark_mutex); | |
395 | spin_lock(&old_entry->lock); | |
396 | if (!old_entry->inode) { | |
397 | /* old_entry is being shot, lets just lie */ | |
398 | spin_unlock(&old_entry->lock); | |
399 | mutex_unlock(&old_entry->group->mark_mutex); | |
400 | fsnotify_put_mark(old_entry); | |
401 | free_chunk(chunk); | |
402 | return -ENOENT; | |
403 | } | |
404 | ||
405 | if (fsnotify_add_mark_locked(chunk_entry, old_entry->group, | |
406 | old_entry->inode, NULL, 1)) { | |
407 | spin_unlock(&old_entry->lock); | |
408 | mutex_unlock(&old_entry->group->mark_mutex); | |
409 | fsnotify_put_mark(chunk_entry); | |
410 | fsnotify_put_mark(old_entry); | |
411 | return -ENOSPC; | |
412 | } | |
413 | ||
414 | /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */ | |
415 | spin_lock(&chunk_entry->lock); | |
416 | spin_lock(&hash_lock); | |
417 | ||
418 | /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */ | |
419 | if (tree->goner) { | |
420 | spin_unlock(&hash_lock); | |
421 | chunk->dead = 1; | |
422 | spin_unlock(&chunk_entry->lock); | |
423 | spin_unlock(&old_entry->lock); | |
424 | mutex_unlock(&old_entry->group->mark_mutex); | |
425 | ||
426 | fsnotify_destroy_mark(chunk_entry, audit_tree_group); | |
427 | ||
428 | fsnotify_put_mark(chunk_entry); | |
429 | fsnotify_put_mark(old_entry); | |
430 | return 0; | |
431 | } | |
432 | list_replace_init(&old->trees, &chunk->trees); | |
433 | for (n = 0, p = chunk->owners; n < old->count; n++, p++) { | |
434 | struct audit_tree *s = old->owners[n].owner; | |
435 | p->owner = s; | |
436 | p->index = old->owners[n].index; | |
437 | if (!s) /* result of fallback in untag */ | |
438 | continue; | |
439 | get_tree(s); | |
440 | list_replace_init(&old->owners[n].list, &p->list); | |
441 | } | |
442 | p->index = (chunk->count - 1) | (1U<<31); | |
443 | p->owner = tree; | |
444 | get_tree(tree); | |
445 | list_add(&p->list, &tree->chunks); | |
446 | list_replace_rcu(&old->hash, &chunk->hash); | |
447 | list_for_each_entry(owner, &chunk->trees, same_root) | |
448 | owner->root = chunk; | |
449 | old->dead = 1; | |
450 | if (!tree->root) { | |
451 | tree->root = chunk; | |
452 | list_add(&tree->same_root, &chunk->trees); | |
453 | } | |
454 | spin_unlock(&hash_lock); | |
455 | spin_unlock(&chunk_entry->lock); | |
456 | spin_unlock(&old_entry->lock); | |
457 | mutex_unlock(&old_entry->group->mark_mutex); | |
458 | fsnotify_destroy_mark(old_entry, audit_tree_group); | |
459 | fsnotify_put_mark(chunk_entry); /* drop initial reference */ | |
460 | fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ | |
461 | return 0; | |
462 | } | |
463 | ||
464 | static void audit_tree_log_remove_rule(struct audit_krule *rule) | |
465 | { | |
466 | struct audit_buffer *ab; | |
467 | ||
468 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | |
469 | if (unlikely(!ab)) | |
470 | return; | |
471 | audit_log_format(ab, "op=remove_rule"); | |
472 | audit_log_format(ab, " dir="); | |
473 | audit_log_untrustedstring(ab, rule->tree->pathname); | |
474 | audit_log_key(ab, rule->filterkey); | |
475 | audit_log_format(ab, " list=%d res=1", rule->listnr); | |
476 | audit_log_end(ab); | |
477 | } | |
478 | ||
479 | static void kill_rules(struct audit_tree *tree) | |
480 | { | |
481 | struct audit_krule *rule, *next; | |
482 | struct audit_entry *entry; | |
483 | ||
484 | list_for_each_entry_safe(rule, next, &tree->rules, rlist) { | |
485 | entry = container_of(rule, struct audit_entry, rule); | |
486 | ||
487 | list_del_init(&rule->rlist); | |
488 | if (rule->tree) { | |
489 | /* not a half-baked one */ | |
490 | audit_tree_log_remove_rule(rule); | |
491 | if (entry->rule.exe) | |
492 | audit_remove_mark(entry->rule.exe); | |
493 | rule->tree = NULL; | |
494 | list_del_rcu(&entry->list); | |
495 | list_del(&entry->rule.list); | |
496 | call_rcu(&entry->rcu, audit_free_rule_rcu); | |
497 | } | |
498 | } | |
499 | } | |
500 | ||
501 | /* | |
502 | * finish killing struct audit_tree | |
503 | */ | |
504 | static void prune_one(struct audit_tree *victim) | |
505 | { | |
506 | spin_lock(&hash_lock); | |
507 | while (!list_empty(&victim->chunks)) { | |
508 | struct node *p; | |
509 | ||
510 | p = list_entry(victim->chunks.next, struct node, list); | |
511 | ||
512 | untag_chunk(p); | |
513 | } | |
514 | spin_unlock(&hash_lock); | |
515 | put_tree(victim); | |
516 | } | |
517 | ||
518 | /* trim the uncommitted chunks from tree */ | |
519 | ||
520 | static void trim_marked(struct audit_tree *tree) | |
521 | { | |
522 | struct list_head *p, *q; | |
523 | spin_lock(&hash_lock); | |
524 | if (tree->goner) { | |
525 | spin_unlock(&hash_lock); | |
526 | return; | |
527 | } | |
528 | /* reorder */ | |
529 | for (p = tree->chunks.next; p != &tree->chunks; p = q) { | |
530 | struct node *node = list_entry(p, struct node, list); | |
531 | q = p->next; | |
532 | if (node->index & (1U<<31)) { | |
533 | list_del_init(p); | |
534 | list_add(p, &tree->chunks); | |
535 | } | |
536 | } | |
537 | ||
538 | while (!list_empty(&tree->chunks)) { | |
539 | struct node *node; | |
540 | ||
541 | node = list_entry(tree->chunks.next, struct node, list); | |
542 | ||
543 | /* have we run out of marked? */ | |
544 | if (!(node->index & (1U<<31))) | |
545 | break; | |
546 | ||
547 | untag_chunk(node); | |
548 | } | |
549 | if (!tree->root && !tree->goner) { | |
550 | tree->goner = 1; | |
551 | spin_unlock(&hash_lock); | |
552 | mutex_lock(&audit_filter_mutex); | |
553 | kill_rules(tree); | |
554 | list_del_init(&tree->list); | |
555 | mutex_unlock(&audit_filter_mutex); | |
556 | prune_one(tree); | |
557 | } else { | |
558 | spin_unlock(&hash_lock); | |
559 | } | |
560 | } | |
561 | ||
562 | static void audit_schedule_prune(void); | |
563 | ||
564 | /* called with audit_filter_mutex */ | |
565 | int audit_remove_tree_rule(struct audit_krule *rule) | |
566 | { | |
567 | struct audit_tree *tree; | |
568 | tree = rule->tree; | |
569 | if (tree) { | |
570 | spin_lock(&hash_lock); | |
571 | list_del_init(&rule->rlist); | |
572 | if (list_empty(&tree->rules) && !tree->goner) { | |
573 | tree->root = NULL; | |
574 | list_del_init(&tree->same_root); | |
575 | tree->goner = 1; | |
576 | list_move(&tree->list, &prune_list); | |
577 | rule->tree = NULL; | |
578 | spin_unlock(&hash_lock); | |
579 | audit_schedule_prune(); | |
580 | return 1; | |
581 | } | |
582 | rule->tree = NULL; | |
583 | spin_unlock(&hash_lock); | |
584 | return 1; | |
585 | } | |
586 | return 0; | |
587 | } | |
588 | ||
589 | static int compare_root(struct vfsmount *mnt, void *arg) | |
590 | { | |
591 | return d_backing_inode(mnt->mnt_root) == arg; | |
592 | } | |
593 | ||
594 | void audit_trim_trees(void) | |
595 | { | |
596 | struct list_head cursor; | |
597 | ||
598 | mutex_lock(&audit_filter_mutex); | |
599 | list_add(&cursor, &tree_list); | |
600 | while (cursor.next != &tree_list) { | |
601 | struct audit_tree *tree; | |
602 | struct path path; | |
603 | struct vfsmount *root_mnt; | |
604 | struct node *node; | |
605 | int err; | |
606 | ||
607 | tree = container_of(cursor.next, struct audit_tree, list); | |
608 | get_tree(tree); | |
609 | list_del(&cursor); | |
610 | list_add(&cursor, &tree->list); | |
611 | mutex_unlock(&audit_filter_mutex); | |
612 | ||
613 | err = kern_path(tree->pathname, 0, &path); | |
614 | if (err) | |
615 | goto skip_it; | |
616 | ||
617 | root_mnt = collect_mounts(&path); | |
618 | path_put(&path); | |
619 | if (IS_ERR(root_mnt)) | |
620 | goto skip_it; | |
621 | ||
622 | spin_lock(&hash_lock); | |
623 | list_for_each_entry(node, &tree->chunks, list) { | |
624 | struct audit_chunk *chunk = find_chunk(node); | |
625 | /* this could be NULL if the watch is dying else where... */ | |
626 | struct inode *inode = chunk->mark.inode; | |
627 | node->index |= 1U<<31; | |
628 | if (iterate_mounts(compare_root, inode, root_mnt)) | |
629 | node->index &= ~(1U<<31); | |
630 | } | |
631 | spin_unlock(&hash_lock); | |
632 | trim_marked(tree); | |
633 | drop_collected_mounts(root_mnt); | |
634 | skip_it: | |
635 | put_tree(tree); | |
636 | mutex_lock(&audit_filter_mutex); | |
637 | } | |
638 | list_del(&cursor); | |
639 | mutex_unlock(&audit_filter_mutex); | |
640 | } | |
641 | ||
642 | int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) | |
643 | { | |
644 | ||
645 | if (pathname[0] != '/' || | |
646 | rule->listnr != AUDIT_FILTER_EXIT || | |
647 | op != Audit_equal || | |
648 | rule->inode_f || rule->watch || rule->tree) | |
649 | return -EINVAL; | |
650 | rule->tree = alloc_tree(pathname); | |
651 | if (!rule->tree) | |
652 | return -ENOMEM; | |
653 | return 0; | |
654 | } | |
655 | ||
656 | void audit_put_tree(struct audit_tree *tree) | |
657 | { | |
658 | put_tree(tree); | |
659 | } | |
660 | ||
661 | static int tag_mount(struct vfsmount *mnt, void *arg) | |
662 | { | |
663 | return tag_chunk(d_backing_inode(mnt->mnt_root), arg); | |
664 | } | |
665 | ||
666 | /* | |
667 | * That gets run when evict_chunk() ends up needing to kill audit_tree. | |
668 | * Runs from a separate thread. | |
669 | */ | |
670 | static int prune_tree_thread(void *unused) | |
671 | { | |
672 | for (;;) { | |
673 | if (list_empty(&prune_list)) { | |
674 | set_current_state(TASK_INTERRUPTIBLE); | |
675 | schedule(); | |
676 | } | |
677 | ||
678 | mutex_lock(&audit_cmd_mutex); | |
679 | mutex_lock(&audit_filter_mutex); | |
680 | ||
681 | while (!list_empty(&prune_list)) { | |
682 | struct audit_tree *victim; | |
683 | ||
684 | victim = list_entry(prune_list.next, | |
685 | struct audit_tree, list); | |
686 | list_del_init(&victim->list); | |
687 | ||
688 | mutex_unlock(&audit_filter_mutex); | |
689 | ||
690 | prune_one(victim); | |
691 | ||
692 | mutex_lock(&audit_filter_mutex); | |
693 | } | |
694 | ||
695 | mutex_unlock(&audit_filter_mutex); | |
696 | mutex_unlock(&audit_cmd_mutex); | |
697 | } | |
698 | return 0; | |
699 | } | |
700 | ||
701 | static int audit_launch_prune(void) | |
702 | { | |
703 | if (prune_thread) | |
704 | return 0; | |
705 | prune_thread = kthread_run(prune_tree_thread, NULL, | |
706 | "audit_prune_tree"); | |
707 | if (IS_ERR(prune_thread)) { | |
708 | pr_err("cannot start thread audit_prune_tree"); | |
709 | prune_thread = NULL; | |
710 | return -ENOMEM; | |
711 | } | |
712 | return 0; | |
713 | } | |
714 | ||
715 | /* called with audit_filter_mutex */ | |
716 | int audit_add_tree_rule(struct audit_krule *rule) | |
717 | { | |
718 | struct audit_tree *seed = rule->tree, *tree; | |
719 | struct path path; | |
720 | struct vfsmount *mnt; | |
721 | int err; | |
722 | ||
723 | rule->tree = NULL; | |
724 | list_for_each_entry(tree, &tree_list, list) { | |
725 | if (!strcmp(seed->pathname, tree->pathname)) { | |
726 | put_tree(seed); | |
727 | rule->tree = tree; | |
728 | list_add(&rule->rlist, &tree->rules); | |
729 | return 0; | |
730 | } | |
731 | } | |
732 | tree = seed; | |
733 | list_add(&tree->list, &tree_list); | |
734 | list_add(&rule->rlist, &tree->rules); | |
735 | /* do not set rule->tree yet */ | |
736 | mutex_unlock(&audit_filter_mutex); | |
737 | ||
738 | if (unlikely(!prune_thread)) { | |
739 | err = audit_launch_prune(); | |
740 | if (err) | |
741 | goto Err; | |
742 | } | |
743 | ||
744 | err = kern_path(tree->pathname, 0, &path); | |
745 | if (err) | |
746 | goto Err; | |
747 | mnt = collect_mounts(&path); | |
748 | path_put(&path); | |
749 | if (IS_ERR(mnt)) { | |
750 | err = PTR_ERR(mnt); | |
751 | goto Err; | |
752 | } | |
753 | ||
754 | get_tree(tree); | |
755 | err = iterate_mounts(tag_mount, tree, mnt); | |
756 | drop_collected_mounts(mnt); | |
757 | ||
758 | if (!err) { | |
759 | struct node *node; | |
760 | spin_lock(&hash_lock); | |
761 | list_for_each_entry(node, &tree->chunks, list) | |
762 | node->index &= ~(1U<<31); | |
763 | spin_unlock(&hash_lock); | |
764 | } else { | |
765 | trim_marked(tree); | |
766 | goto Err; | |
767 | } | |
768 | ||
769 | mutex_lock(&audit_filter_mutex); | |
770 | if (list_empty(&rule->rlist)) { | |
771 | put_tree(tree); | |
772 | return -ENOENT; | |
773 | } | |
774 | rule->tree = tree; | |
775 | put_tree(tree); | |
776 | ||
777 | return 0; | |
778 | Err: | |
779 | mutex_lock(&audit_filter_mutex); | |
780 | list_del_init(&tree->list); | |
781 | list_del_init(&tree->rules); | |
782 | put_tree(tree); | |
783 | return err; | |
784 | } | |
785 | ||
786 | int audit_tag_tree(char *old, char *new) | |
787 | { | |
788 | struct list_head cursor, barrier; | |
789 | int failed = 0; | |
790 | struct path path1, path2; | |
791 | struct vfsmount *tagged; | |
792 | int err; | |
793 | ||
794 | err = kern_path(new, 0, &path2); | |
795 | if (err) | |
796 | return err; | |
797 | tagged = collect_mounts(&path2); | |
798 | path_put(&path2); | |
799 | if (IS_ERR(tagged)) | |
800 | return PTR_ERR(tagged); | |
801 | ||
802 | err = kern_path(old, 0, &path1); | |
803 | if (err) { | |
804 | drop_collected_mounts(tagged); | |
805 | return err; | |
806 | } | |
807 | ||
808 | mutex_lock(&audit_filter_mutex); | |
809 | list_add(&barrier, &tree_list); | |
810 | list_add(&cursor, &barrier); | |
811 | ||
812 | while (cursor.next != &tree_list) { | |
813 | struct audit_tree *tree; | |
814 | int good_one = 0; | |
815 | ||
816 | tree = container_of(cursor.next, struct audit_tree, list); | |
817 | get_tree(tree); | |
818 | list_del(&cursor); | |
819 | list_add(&cursor, &tree->list); | |
820 | mutex_unlock(&audit_filter_mutex); | |
821 | ||
822 | err = kern_path(tree->pathname, 0, &path2); | |
823 | if (!err) { | |
824 | good_one = path_is_under(&path1, &path2); | |
825 | path_put(&path2); | |
826 | } | |
827 | ||
828 | if (!good_one) { | |
829 | put_tree(tree); | |
830 | mutex_lock(&audit_filter_mutex); | |
831 | continue; | |
832 | } | |
833 | ||
834 | failed = iterate_mounts(tag_mount, tree, tagged); | |
835 | if (failed) { | |
836 | put_tree(tree); | |
837 | mutex_lock(&audit_filter_mutex); | |
838 | break; | |
839 | } | |
840 | ||
841 | mutex_lock(&audit_filter_mutex); | |
842 | spin_lock(&hash_lock); | |
843 | if (!tree->goner) { | |
844 | list_del(&tree->list); | |
845 | list_add(&tree->list, &tree_list); | |
846 | } | |
847 | spin_unlock(&hash_lock); | |
848 | put_tree(tree); | |
849 | } | |
850 | ||
851 | while (barrier.prev != &tree_list) { | |
852 | struct audit_tree *tree; | |
853 | ||
854 | tree = container_of(barrier.prev, struct audit_tree, list); | |
855 | get_tree(tree); | |
856 | list_del(&tree->list); | |
857 | list_add(&tree->list, &barrier); | |
858 | mutex_unlock(&audit_filter_mutex); | |
859 | ||
860 | if (!failed) { | |
861 | struct node *node; | |
862 | spin_lock(&hash_lock); | |
863 | list_for_each_entry(node, &tree->chunks, list) | |
864 | node->index &= ~(1U<<31); | |
865 | spin_unlock(&hash_lock); | |
866 | } else { | |
867 | trim_marked(tree); | |
868 | } | |
869 | ||
870 | put_tree(tree); | |
871 | mutex_lock(&audit_filter_mutex); | |
872 | } | |
873 | list_del(&barrier); | |
874 | list_del(&cursor); | |
875 | mutex_unlock(&audit_filter_mutex); | |
876 | path_put(&path1); | |
877 | drop_collected_mounts(tagged); | |
878 | return failed; | |
879 | } | |
880 | ||
881 | ||
882 | static void audit_schedule_prune(void) | |
883 | { | |
884 | wake_up_process(prune_thread); | |
885 | } | |
886 | ||
887 | /* | |
888 | * ... and that one is done if evict_chunk() decides to delay until the end | |
889 | * of syscall. Runs synchronously. | |
890 | */ | |
891 | void audit_kill_trees(struct list_head *list) | |
892 | { | |
893 | mutex_lock(&audit_cmd_mutex); | |
894 | mutex_lock(&audit_filter_mutex); | |
895 | ||
896 | while (!list_empty(list)) { | |
897 | struct audit_tree *victim; | |
898 | ||
899 | victim = list_entry(list->next, struct audit_tree, list); | |
900 | kill_rules(victim); | |
901 | list_del_init(&victim->list); | |
902 | ||
903 | mutex_unlock(&audit_filter_mutex); | |
904 | ||
905 | prune_one(victim); | |
906 | ||
907 | mutex_lock(&audit_filter_mutex); | |
908 | } | |
909 | ||
910 | mutex_unlock(&audit_filter_mutex); | |
911 | mutex_unlock(&audit_cmd_mutex); | |
912 | } | |
913 | ||
914 | /* | |
915 | * Here comes the stuff asynchronous to auditctl operations | |
916 | */ | |
917 | ||
918 | static void evict_chunk(struct audit_chunk *chunk) | |
919 | { | |
920 | struct audit_tree *owner; | |
921 | struct list_head *postponed = audit_killed_trees(); | |
922 | int need_prune = 0; | |
923 | int n; | |
924 | ||
925 | if (chunk->dead) | |
926 | return; | |
927 | ||
928 | chunk->dead = 1; | |
929 | mutex_lock(&audit_filter_mutex); | |
930 | spin_lock(&hash_lock); | |
931 | while (!list_empty(&chunk->trees)) { | |
932 | owner = list_entry(chunk->trees.next, | |
933 | struct audit_tree, same_root); | |
934 | owner->goner = 1; | |
935 | owner->root = NULL; | |
936 | list_del_init(&owner->same_root); | |
937 | spin_unlock(&hash_lock); | |
938 | if (!postponed) { | |
939 | kill_rules(owner); | |
940 | list_move(&owner->list, &prune_list); | |
941 | need_prune = 1; | |
942 | } else { | |
943 | list_move(&owner->list, postponed); | |
944 | } | |
945 | spin_lock(&hash_lock); | |
946 | } | |
947 | list_del_rcu(&chunk->hash); | |
948 | for (n = 0; n < chunk->count; n++) | |
949 | list_del_init(&chunk->owners[n].list); | |
950 | spin_unlock(&hash_lock); | |
951 | mutex_unlock(&audit_filter_mutex); | |
952 | if (need_prune) | |
953 | audit_schedule_prune(); | |
954 | } | |
955 | ||
956 | static int audit_tree_handle_event(struct fsnotify_group *group, | |
957 | struct inode *to_tell, | |
958 | struct fsnotify_mark *inode_mark, | |
959 | struct fsnotify_mark *vfsmount_mark, | |
960 | u32 mask, const void *data, int data_type, | |
961 | const unsigned char *file_name, u32 cookie) | |
962 | { | |
963 | return 0; | |
964 | } | |
965 | ||
966 | static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) | |
967 | { | |
968 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); | |
969 | ||
970 | evict_chunk(chunk); | |
971 | ||
972 | /* | |
973 | * We are guaranteed to have at least one reference to the mark from | |
974 | * either the inode or the caller of fsnotify_destroy_mark(). | |
975 | */ | |
976 | BUG_ON(atomic_read(&entry->refcnt) < 1); | |
977 | } | |
978 | ||
979 | static const struct fsnotify_ops audit_tree_ops = { | |
980 | .handle_event = audit_tree_handle_event, | |
981 | .freeing_mark = audit_tree_freeing_mark, | |
982 | }; | |
983 | ||
984 | static int __init audit_tree_init(void) | |
985 | { | |
986 | int i; | |
987 | ||
988 | audit_tree_group = fsnotify_alloc_group(&audit_tree_ops); | |
989 | if (IS_ERR(audit_tree_group)) | |
990 | audit_panic("cannot initialize fsnotify group for rectree watches"); | |
991 | ||
992 | for (i = 0; i < HASH_SIZE; i++) | |
993 | INIT_LIST_HEAD(&chunk_hash_heads[i]); | |
994 | ||
995 | return 0; | |
996 | } | |
997 | __initcall(audit_tree_init); |