1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/fsnotify_backend.h>
4 #include <linux/namei.h>
5 #include <linux/mount.h>
6 #include <linux/kthread.h>
7 #include <linux/refcount.h>
8 #include <linux/slab.h>
16 struct audit_chunk
*root
;
17 struct list_head chunks
;
18 struct list_head rules
;
19 struct list_head list
;
20 struct list_head same_root
;
26 struct list_head hash
;
28 struct fsnotify_mark mark
;
29 struct list_head trees
; /* with root here */
35 struct list_head list
;
36 struct audit_tree
*owner
;
37 unsigned index
; /* index; upper bit indicates 'will prune' */
41 static LIST_HEAD(tree_list
);
42 static LIST_HEAD(prune_list
);
43 static struct task_struct
*prune_thread
;
46 * One struct chunk is attached to each inode of interest.
47 * We replace struct chunk on tagging/untagging.
48 * Rules have pointer to struct audit_tree.
49 * Rules have struct list_head rlist forming a list of rules over
51 * References to struct chunk are collected at audit_inode{,_child}()
52 * time and used in AUDIT_TREE rule matching.
53 * These references are dropped at the same time we are calling
54 * audit_free_names(), etc.
56 * Cyclic lists galore:
57 * tree.chunks anchors chunk.owners[].list hash_lock
58 * tree.rules anchors rule.rlist audit_filter_mutex
59 * chunk.trees anchors tree.same_root hash_lock
60 * chunk.hash is a hash with middle bits of watch.inode as
61 * a hash function. RCU, hash_lock
63 * tree is refcounted; one reference for "some rules on rules_list refer to
64 * it", one for each chunk with pointer to it.
66 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
67 * of watch contributes 1 to .refs).
69 * node.index allows to get from node.list to containing chunk.
70 * MSB of that sucker is stolen to mark taggings that we might have to
71 * revert - several operations have very unpleasant cleanup logics and
72 * that makes a difference. Some.
75 static struct fsnotify_group
*audit_tree_group
;
77 static struct audit_tree
*alloc_tree(const char *s
)
79 struct audit_tree
*tree
;
81 tree
= kmalloc(sizeof(struct audit_tree
) + strlen(s
) + 1, GFP_KERNEL
);
83 refcount_set(&tree
->count
, 1);
85 INIT_LIST_HEAD(&tree
->chunks
);
86 INIT_LIST_HEAD(&tree
->rules
);
87 INIT_LIST_HEAD(&tree
->list
);
88 INIT_LIST_HEAD(&tree
->same_root
);
90 strcpy(tree
->pathname
, s
);
95 static inline void get_tree(struct audit_tree
*tree
)
97 refcount_inc(&tree
->count
);
100 static inline void put_tree(struct audit_tree
*tree
)
102 if (refcount_dec_and_test(&tree
->count
))
103 kfree_rcu(tree
, head
);
106 /* to avoid bringing the entire thing in audit.h */
107 const char *audit_tree_path(struct audit_tree
*tree
)
109 return tree
->pathname
;
112 static void free_chunk(struct audit_chunk
*chunk
)
116 for (i
= 0; i
< chunk
->count
; i
++) {
117 if (chunk
->owners
[i
].owner
)
118 put_tree(chunk
->owners
[i
].owner
);
123 void audit_put_chunk(struct audit_chunk
*chunk
)
125 if (atomic_long_dec_and_test(&chunk
->refs
))
129 static void __put_chunk(struct rcu_head
*rcu
)
131 struct audit_chunk
*chunk
= container_of(rcu
, struct audit_chunk
, head
);
132 audit_put_chunk(chunk
);
135 static void audit_tree_destroy_watch(struct fsnotify_mark
*entry
)
137 struct audit_chunk
*chunk
= container_of(entry
, struct audit_chunk
, mark
);
138 call_rcu(&chunk
->head
, __put_chunk
);
141 static struct audit_chunk
*alloc_chunk(int count
)
143 struct audit_chunk
*chunk
;
147 size
= offsetof(struct audit_chunk
, owners
) + count
* sizeof(struct node
);
148 chunk
= kzalloc(size
, GFP_KERNEL
);
152 INIT_LIST_HEAD(&chunk
->hash
);
153 INIT_LIST_HEAD(&chunk
->trees
);
154 chunk
->count
= count
;
155 atomic_long_set(&chunk
->refs
, 1);
156 for (i
= 0; i
< count
; i
++) {
157 INIT_LIST_HEAD(&chunk
->owners
[i
].list
);
158 chunk
->owners
[i
].index
= i
;
160 fsnotify_init_mark(&chunk
->mark
, audit_tree_group
);
161 chunk
->mark
.mask
= FS_IN_IGNORED
;
165 enum {HASH_SIZE
= 128};
166 static struct list_head chunk_hash_heads
[HASH_SIZE
];
167 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(hash_lock
);
169 /* Function to return search key in our hash from inode. */
170 static unsigned long inode_to_key(const struct inode
*inode
)
172 return (unsigned long)inode
;
175 static inline struct list_head
*chunk_hash(unsigned long key
)
177 unsigned long n
= key
/ L1_CACHE_BYTES
;
178 return chunk_hash_heads
+ n
% HASH_SIZE
;
181 /* hash_lock & entry->lock is held by caller */
182 static void insert_hash(struct audit_chunk
*chunk
)
184 struct list_head
*list
;
186 if (!(chunk
->mark
.flags
& FSNOTIFY_MARK_FLAG_ATTACHED
))
188 WARN_ON_ONCE(!chunk
->key
);
189 list
= chunk_hash(chunk
->key
);
190 list_add_rcu(&chunk
->hash
, list
);
193 /* called under rcu_read_lock */
194 struct audit_chunk
*audit_tree_lookup(const struct inode
*inode
)
196 unsigned long key
= inode_to_key(inode
);
197 struct list_head
*list
= chunk_hash(key
);
198 struct audit_chunk
*p
;
200 list_for_each_entry_rcu(p
, list
, hash
) {
202 atomic_long_inc(&p
->refs
);
209 bool audit_tree_match(struct audit_chunk
*chunk
, struct audit_tree
*tree
)
212 for (n
= 0; n
< chunk
->count
; n
++)
213 if (chunk
->owners
[n
].owner
== tree
)
218 /* tagging and untagging inodes with trees */
220 static struct audit_chunk
*find_chunk(struct node
*p
)
222 int index
= p
->index
& ~(1U<<31);
224 return container_of(p
, struct audit_chunk
, owners
[0]);
227 static void untag_chunk(struct node
*p
)
229 struct audit_chunk
*chunk
= find_chunk(p
);
230 struct fsnotify_mark
*entry
= &chunk
->mark
;
231 struct audit_chunk
*new = NULL
;
232 struct audit_tree
*owner
;
233 int size
= chunk
->count
- 1;
236 fsnotify_get_mark(entry
);
238 spin_unlock(&hash_lock
);
241 new = alloc_chunk(size
);
243 mutex_lock(&entry
->group
->mark_mutex
);
244 spin_lock(&entry
->lock
);
246 * mark_mutex protects mark from getting detached and thus also from
247 * mark->connector->inode getting NULL.
249 if (chunk
->dead
|| !(entry
->flags
& FSNOTIFY_MARK_FLAG_ATTACHED
)) {
250 spin_unlock(&entry
->lock
);
251 mutex_unlock(&entry
->group
->mark_mutex
);
253 fsnotify_put_mark(&new->mark
);
261 spin_lock(&hash_lock
);
262 list_del_init(&chunk
->trees
);
263 if (owner
->root
== chunk
)
265 list_del_init(&p
->list
);
266 list_del_rcu(&chunk
->hash
);
267 spin_unlock(&hash_lock
);
268 spin_unlock(&entry
->lock
);
269 mutex_unlock(&entry
->group
->mark_mutex
);
270 fsnotify_destroy_mark(entry
, audit_tree_group
);
277 if (fsnotify_add_mark_locked(&new->mark
, entry
->connector
->inode
,
279 fsnotify_put_mark(&new->mark
);
284 spin_lock(&hash_lock
);
285 new->key
= chunk
->key
;
286 list_replace_init(&chunk
->trees
, &new->trees
);
287 if (owner
->root
== chunk
) {
288 list_del_init(&owner
->same_root
);
292 for (i
= j
= 0; j
<= size
; i
++, j
++) {
293 struct audit_tree
*s
;
294 if (&chunk
->owners
[j
] == p
) {
295 list_del_init(&p
->list
);
299 s
= chunk
->owners
[j
].owner
;
300 new->owners
[i
].owner
= s
;
301 new->owners
[i
].index
= chunk
->owners
[j
].index
- j
+ i
;
302 if (!s
) /* result of earlier fallback */
305 list_replace_init(&chunk
->owners
[j
].list
, &new->owners
[i
].list
);
308 list_replace_rcu(&chunk
->hash
, &new->hash
);
309 list_for_each_entry(owner
, &new->trees
, same_root
)
311 spin_unlock(&hash_lock
);
312 spin_unlock(&entry
->lock
);
313 mutex_unlock(&entry
->group
->mark_mutex
);
314 fsnotify_destroy_mark(entry
, audit_tree_group
);
315 fsnotify_put_mark(&new->mark
); /* drop initial reference */
319 // do the best we can
320 spin_lock(&hash_lock
);
321 if (owner
->root
== chunk
) {
322 list_del_init(&owner
->same_root
);
325 list_del_init(&p
->list
);
328 spin_unlock(&hash_lock
);
329 spin_unlock(&entry
->lock
);
330 mutex_unlock(&entry
->group
->mark_mutex
);
332 fsnotify_put_mark(entry
);
333 spin_lock(&hash_lock
);
336 static int create_chunk(struct inode
*inode
, struct audit_tree
*tree
)
338 struct fsnotify_mark
*entry
;
339 struct audit_chunk
*chunk
= alloc_chunk(1);
343 entry
= &chunk
->mark
;
344 if (fsnotify_add_mark(entry
, inode
, NULL
, 0)) {
345 fsnotify_put_mark(entry
);
349 spin_lock(&entry
->lock
);
350 spin_lock(&hash_lock
);
352 spin_unlock(&hash_lock
);
354 spin_unlock(&entry
->lock
);
355 fsnotify_destroy_mark(entry
, audit_tree_group
);
356 fsnotify_put_mark(entry
);
359 chunk
->owners
[0].index
= (1U << 31);
360 chunk
->owners
[0].owner
= tree
;
362 list_add(&chunk
->owners
[0].list
, &tree
->chunks
);
365 list_add(&tree
->same_root
, &chunk
->trees
);
367 chunk
->key
= inode_to_key(inode
);
369 spin_unlock(&hash_lock
);
370 spin_unlock(&entry
->lock
);
371 fsnotify_put_mark(entry
); /* drop initial reference */
375 /* the first tagged inode becomes root of tree */
376 static int tag_chunk(struct inode
*inode
, struct audit_tree
*tree
)
378 struct fsnotify_mark
*old_entry
, *chunk_entry
;
379 struct audit_tree
*owner
;
380 struct audit_chunk
*chunk
, *old
;
384 old_entry
= fsnotify_find_mark(&inode
->i_fsnotify_marks
,
387 return create_chunk(inode
, tree
);
389 old
= container_of(old_entry
, struct audit_chunk
, mark
);
391 /* are we already there? */
392 spin_lock(&hash_lock
);
393 for (n
= 0; n
< old
->count
; n
++) {
394 if (old
->owners
[n
].owner
== tree
) {
395 spin_unlock(&hash_lock
);
396 fsnotify_put_mark(old_entry
);
400 spin_unlock(&hash_lock
);
402 chunk
= alloc_chunk(old
->count
+ 1);
404 fsnotify_put_mark(old_entry
);
408 chunk_entry
= &chunk
->mark
;
410 mutex_lock(&old_entry
->group
->mark_mutex
);
411 spin_lock(&old_entry
->lock
);
413 * mark_mutex protects mark from getting detached and thus also from
414 * mark->connector->inode getting NULL.
416 if (!(old_entry
->flags
& FSNOTIFY_MARK_FLAG_ATTACHED
)) {
417 /* old_entry is being shot, lets just lie */
418 spin_unlock(&old_entry
->lock
);
419 mutex_unlock(&old_entry
->group
->mark_mutex
);
420 fsnotify_put_mark(old_entry
);
421 fsnotify_put_mark(&chunk
->mark
);
425 if (fsnotify_add_mark_locked(chunk_entry
,
426 old_entry
->connector
->inode
, NULL
, 1)) {
427 spin_unlock(&old_entry
->lock
);
428 mutex_unlock(&old_entry
->group
->mark_mutex
);
429 fsnotify_put_mark(chunk_entry
);
430 fsnotify_put_mark(old_entry
);
434 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
435 spin_lock(&chunk_entry
->lock
);
436 spin_lock(&hash_lock
);
438 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
440 spin_unlock(&hash_lock
);
442 spin_unlock(&chunk_entry
->lock
);
443 spin_unlock(&old_entry
->lock
);
444 mutex_unlock(&old_entry
->group
->mark_mutex
);
446 fsnotify_destroy_mark(chunk_entry
, audit_tree_group
);
448 fsnotify_put_mark(chunk_entry
);
449 fsnotify_put_mark(old_entry
);
452 chunk
->key
= old
->key
;
453 list_replace_init(&old
->trees
, &chunk
->trees
);
454 for (n
= 0, p
= chunk
->owners
; n
< old
->count
; n
++, p
++) {
455 struct audit_tree
*s
= old
->owners
[n
].owner
;
457 p
->index
= old
->owners
[n
].index
;
458 if (!s
) /* result of fallback in untag */
461 list_replace_init(&old
->owners
[n
].list
, &p
->list
);
463 p
->index
= (chunk
->count
- 1) | (1U<<31);
466 list_add(&p
->list
, &tree
->chunks
);
467 list_replace_rcu(&old
->hash
, &chunk
->hash
);
468 list_for_each_entry(owner
, &chunk
->trees
, same_root
)
473 list_add(&tree
->same_root
, &chunk
->trees
);
475 spin_unlock(&hash_lock
);
476 spin_unlock(&chunk_entry
->lock
);
477 spin_unlock(&old_entry
->lock
);
478 mutex_unlock(&old_entry
->group
->mark_mutex
);
479 fsnotify_destroy_mark(old_entry
, audit_tree_group
);
480 fsnotify_put_mark(chunk_entry
); /* drop initial reference */
481 fsnotify_put_mark(old_entry
); /* pair to fsnotify_find mark_entry */
485 static void audit_tree_log_remove_rule(struct audit_krule
*rule
)
487 struct audit_buffer
*ab
;
489 ab
= audit_log_start(NULL
, GFP_KERNEL
, AUDIT_CONFIG_CHANGE
);
492 audit_log_format(ab
, "op=remove_rule");
493 audit_log_format(ab
, " dir=");
494 audit_log_untrustedstring(ab
, rule
->tree
->pathname
);
495 audit_log_key(ab
, rule
->filterkey
);
496 audit_log_format(ab
, " list=%d res=1", rule
->listnr
);
500 static void kill_rules(struct audit_tree
*tree
)
502 struct audit_krule
*rule
, *next
;
503 struct audit_entry
*entry
;
505 list_for_each_entry_safe(rule
, next
, &tree
->rules
, rlist
) {
506 entry
= container_of(rule
, struct audit_entry
, rule
);
508 list_del_init(&rule
->rlist
);
510 /* not a half-baked one */
511 audit_tree_log_remove_rule(rule
);
513 audit_remove_mark(entry
->rule
.exe
);
515 list_del_rcu(&entry
->list
);
516 list_del(&entry
->rule
.list
);
517 call_rcu(&entry
->rcu
, audit_free_rule_rcu
);
523 * finish killing struct audit_tree
525 static void prune_one(struct audit_tree
*victim
)
527 spin_lock(&hash_lock
);
528 while (!list_empty(&victim
->chunks
)) {
531 p
= list_entry(victim
->chunks
.next
, struct node
, list
);
535 spin_unlock(&hash_lock
);
539 /* trim the uncommitted chunks from tree */
541 static void trim_marked(struct audit_tree
*tree
)
543 struct list_head
*p
, *q
;
544 spin_lock(&hash_lock
);
546 spin_unlock(&hash_lock
);
550 for (p
= tree
->chunks
.next
; p
!= &tree
->chunks
; p
= q
) {
551 struct node
*node
= list_entry(p
, struct node
, list
);
553 if (node
->index
& (1U<<31)) {
555 list_add(p
, &tree
->chunks
);
559 while (!list_empty(&tree
->chunks
)) {
562 node
= list_entry(tree
->chunks
.next
, struct node
, list
);
564 /* have we run out of marked? */
565 if (!(node
->index
& (1U<<31)))
570 if (!tree
->root
&& !tree
->goner
) {
572 spin_unlock(&hash_lock
);
573 mutex_lock(&audit_filter_mutex
);
575 list_del_init(&tree
->list
);
576 mutex_unlock(&audit_filter_mutex
);
579 spin_unlock(&hash_lock
);
583 static void audit_schedule_prune(void);
585 /* called with audit_filter_mutex */
586 int audit_remove_tree_rule(struct audit_krule
*rule
)
588 struct audit_tree
*tree
;
591 spin_lock(&hash_lock
);
592 list_del_init(&rule
->rlist
);
593 if (list_empty(&tree
->rules
) && !tree
->goner
) {
595 list_del_init(&tree
->same_root
);
597 list_move(&tree
->list
, &prune_list
);
599 spin_unlock(&hash_lock
);
600 audit_schedule_prune();
604 spin_unlock(&hash_lock
);
610 static int compare_root(struct vfsmount
*mnt
, void *arg
)
612 return inode_to_key(d_backing_inode(mnt
->mnt_root
)) ==
616 void audit_trim_trees(void)
618 struct list_head cursor
;
620 mutex_lock(&audit_filter_mutex
);
621 list_add(&cursor
, &tree_list
);
622 while (cursor
.next
!= &tree_list
) {
623 struct audit_tree
*tree
;
625 struct vfsmount
*root_mnt
;
629 tree
= container_of(cursor
.next
, struct audit_tree
, list
);
632 list_add(&cursor
, &tree
->list
);
633 mutex_unlock(&audit_filter_mutex
);
635 err
= kern_path(tree
->pathname
, 0, &path
);
639 root_mnt
= collect_mounts(&path
);
641 if (IS_ERR(root_mnt
))
644 spin_lock(&hash_lock
);
645 list_for_each_entry(node
, &tree
->chunks
, list
) {
646 struct audit_chunk
*chunk
= find_chunk(node
);
647 /* this could be NULL if the watch is dying else where... */
648 node
->index
|= 1U<<31;
649 if (iterate_mounts(compare_root
,
650 (void *)(chunk
->key
),
652 node
->index
&= ~(1U<<31);
654 spin_unlock(&hash_lock
);
656 drop_collected_mounts(root_mnt
);
659 mutex_lock(&audit_filter_mutex
);
662 mutex_unlock(&audit_filter_mutex
);
665 int audit_make_tree(struct audit_krule
*rule
, char *pathname
, u32 op
)
668 if (pathname
[0] != '/' ||
669 rule
->listnr
!= AUDIT_FILTER_EXIT
||
671 rule
->inode_f
|| rule
->watch
|| rule
->tree
)
673 rule
->tree
= alloc_tree(pathname
);
679 void audit_put_tree(struct audit_tree
*tree
)
684 static int tag_mount(struct vfsmount
*mnt
, void *arg
)
686 return tag_chunk(d_backing_inode(mnt
->mnt_root
), arg
);
690 * That gets run when evict_chunk() ends up needing to kill audit_tree.
691 * Runs from a separate thread.
693 static int prune_tree_thread(void *unused
)
696 if (list_empty(&prune_list
)) {
697 set_current_state(TASK_INTERRUPTIBLE
);
701 mutex_lock(&audit_cmd_mutex
);
702 mutex_lock(&audit_filter_mutex
);
704 while (!list_empty(&prune_list
)) {
705 struct audit_tree
*victim
;
707 victim
= list_entry(prune_list
.next
,
708 struct audit_tree
, list
);
709 list_del_init(&victim
->list
);
711 mutex_unlock(&audit_filter_mutex
);
715 mutex_lock(&audit_filter_mutex
);
718 mutex_unlock(&audit_filter_mutex
);
719 mutex_unlock(&audit_cmd_mutex
);
724 static int audit_launch_prune(void)
728 prune_thread
= kthread_run(prune_tree_thread
, NULL
,
730 if (IS_ERR(prune_thread
)) {
731 pr_err("cannot start thread audit_prune_tree");
738 /* called with audit_filter_mutex */
739 int audit_add_tree_rule(struct audit_krule
*rule
)
741 struct audit_tree
*seed
= rule
->tree
, *tree
;
743 struct vfsmount
*mnt
;
747 list_for_each_entry(tree
, &tree_list
, list
) {
748 if (!strcmp(seed
->pathname
, tree
->pathname
)) {
751 list_add(&rule
->rlist
, &tree
->rules
);
756 list_add(&tree
->list
, &tree_list
);
757 list_add(&rule
->rlist
, &tree
->rules
);
758 /* do not set rule->tree yet */
759 mutex_unlock(&audit_filter_mutex
);
761 if (unlikely(!prune_thread
)) {
762 err
= audit_launch_prune();
767 err
= kern_path(tree
->pathname
, 0, &path
);
770 mnt
= collect_mounts(&path
);
778 err
= iterate_mounts(tag_mount
, tree
, mnt
);
779 drop_collected_mounts(mnt
);
783 spin_lock(&hash_lock
);
784 list_for_each_entry(node
, &tree
->chunks
, list
)
785 node
->index
&= ~(1U<<31);
786 spin_unlock(&hash_lock
);
792 mutex_lock(&audit_filter_mutex
);
793 if (list_empty(&rule
->rlist
)) {
802 mutex_lock(&audit_filter_mutex
);
803 list_del_init(&tree
->list
);
804 list_del_init(&tree
->rules
);
809 int audit_tag_tree(char *old
, char *new)
811 struct list_head cursor
, barrier
;
813 struct path path1
, path2
;
814 struct vfsmount
*tagged
;
817 err
= kern_path(new, 0, &path2
);
820 tagged
= collect_mounts(&path2
);
823 return PTR_ERR(tagged
);
825 err
= kern_path(old
, 0, &path1
);
827 drop_collected_mounts(tagged
);
831 mutex_lock(&audit_filter_mutex
);
832 list_add(&barrier
, &tree_list
);
833 list_add(&cursor
, &barrier
);
835 while (cursor
.next
!= &tree_list
) {
836 struct audit_tree
*tree
;
839 tree
= container_of(cursor
.next
, struct audit_tree
, list
);
842 list_add(&cursor
, &tree
->list
);
843 mutex_unlock(&audit_filter_mutex
);
845 err
= kern_path(tree
->pathname
, 0, &path2
);
847 good_one
= path_is_under(&path1
, &path2
);
853 mutex_lock(&audit_filter_mutex
);
857 failed
= iterate_mounts(tag_mount
, tree
, tagged
);
860 mutex_lock(&audit_filter_mutex
);
864 mutex_lock(&audit_filter_mutex
);
865 spin_lock(&hash_lock
);
867 list_del(&tree
->list
);
868 list_add(&tree
->list
, &tree_list
);
870 spin_unlock(&hash_lock
);
874 while (barrier
.prev
!= &tree_list
) {
875 struct audit_tree
*tree
;
877 tree
= container_of(barrier
.prev
, struct audit_tree
, list
);
879 list_del(&tree
->list
);
880 list_add(&tree
->list
, &barrier
);
881 mutex_unlock(&audit_filter_mutex
);
885 spin_lock(&hash_lock
);
886 list_for_each_entry(node
, &tree
->chunks
, list
)
887 node
->index
&= ~(1U<<31);
888 spin_unlock(&hash_lock
);
894 mutex_lock(&audit_filter_mutex
);
898 mutex_unlock(&audit_filter_mutex
);
900 drop_collected_mounts(tagged
);
905 static void audit_schedule_prune(void)
907 wake_up_process(prune_thread
);
911 * ... and that one is done if evict_chunk() decides to delay until the end
912 * of syscall. Runs synchronously.
914 void audit_kill_trees(struct list_head
*list
)
916 mutex_lock(&audit_cmd_mutex
);
917 mutex_lock(&audit_filter_mutex
);
919 while (!list_empty(list
)) {
920 struct audit_tree
*victim
;
922 victim
= list_entry(list
->next
, struct audit_tree
, list
);
924 list_del_init(&victim
->list
);
926 mutex_unlock(&audit_filter_mutex
);
930 mutex_lock(&audit_filter_mutex
);
933 mutex_unlock(&audit_filter_mutex
);
934 mutex_unlock(&audit_cmd_mutex
);
938 * Here comes the stuff asynchronous to auditctl operations
941 static void evict_chunk(struct audit_chunk
*chunk
)
943 struct audit_tree
*owner
;
944 struct list_head
*postponed
= audit_killed_trees();
952 mutex_lock(&audit_filter_mutex
);
953 spin_lock(&hash_lock
);
954 while (!list_empty(&chunk
->trees
)) {
955 owner
= list_entry(chunk
->trees
.next
,
956 struct audit_tree
, same_root
);
959 list_del_init(&owner
->same_root
);
960 spin_unlock(&hash_lock
);
963 list_move(&owner
->list
, &prune_list
);
966 list_move(&owner
->list
, postponed
);
968 spin_lock(&hash_lock
);
970 list_del_rcu(&chunk
->hash
);
971 for (n
= 0; n
< chunk
->count
; n
++)
972 list_del_init(&chunk
->owners
[n
].list
);
973 spin_unlock(&hash_lock
);
974 mutex_unlock(&audit_filter_mutex
);
976 audit_schedule_prune();
979 static int audit_tree_handle_event(struct fsnotify_group
*group
,
980 struct inode
*to_tell
,
981 struct fsnotify_mark
*inode_mark
,
982 struct fsnotify_mark
*vfsmount_mark
,
983 u32 mask
, const void *data
, int data_type
,
984 const unsigned char *file_name
, u32 cookie
,
985 struct fsnotify_iter_info
*iter_info
)
990 static void audit_tree_freeing_mark(struct fsnotify_mark
*entry
, struct fsnotify_group
*group
)
992 struct audit_chunk
*chunk
= container_of(entry
, struct audit_chunk
, mark
);
997 * We are guaranteed to have at least one reference to the mark from
998 * either the inode or the caller of fsnotify_destroy_mark().
1000 BUG_ON(refcount_read(&entry
->refcnt
) < 1);
1003 static const struct fsnotify_ops audit_tree_ops
= {
1004 .handle_event
= audit_tree_handle_event
,
1005 .freeing_mark
= audit_tree_freeing_mark
,
1006 .free_mark
= audit_tree_destroy_watch
,
1009 static int __init
audit_tree_init(void)
1013 audit_tree_group
= fsnotify_alloc_group(&audit_tree_ops
);
1014 if (IS_ERR(audit_tree_group
))
1015 audit_panic("cannot initialize fsnotify group for rectree watches");
1017 for (i
= 0; i
< HASH_SIZE
; i
++)
1018 INIT_LIST_HEAD(&chunk_hash_heads
[i
]);
1022 __initcall(audit_tree_init
);