2 #include <linux/fsnotify_backend.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5 #include <linux/kthread.h>
6 #include <linux/slab.h>
14 struct audit_chunk
*root
;
15 struct list_head chunks
;
16 struct list_head rules
;
17 struct list_head list
;
18 struct list_head same_root
;
24 struct list_head hash
;
25 struct fsnotify_mark mark
;
26 struct list_head trees
; /* with root here */
32 struct list_head list
;
33 struct audit_tree
*owner
;
34 unsigned index
; /* index; upper bit indicates 'will prune' */
38 static LIST_HEAD(tree_list
);
39 static LIST_HEAD(prune_list
);
40 static struct task_struct
*prune_thread
;
43 * One struct chunk is attached to each inode of interest.
44 * We replace struct chunk on tagging/untagging.
45 * Rules have pointer to struct audit_tree.
46 * Rules have struct list_head rlist forming a list of rules over
48 * References to struct chunk are collected at audit_inode{,_child}()
49 * time and used in AUDIT_TREE rule matching.
50 * These references are dropped at the same time we are calling
51 * audit_free_names(), etc.
53 * Cyclic lists galore:
54 * tree.chunks anchors chunk.owners[].list hash_lock
55 * tree.rules anchors rule.rlist audit_filter_mutex
56 * chunk.trees anchors tree.same_root hash_lock
57 * chunk.hash is a hash with middle bits of watch.inode as
58 * a hash function. RCU, hash_lock
60 * tree is refcounted; one reference for "some rules on rules_list refer to
61 * it", one for each chunk with pointer to it.
63 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
64 * of watch contributes 1 to .refs).
66 * node.index allows to get from node.list to containing chunk.
67 * MSB of that sucker is stolen to mark taggings that we might have to
68 * revert - several operations have very unpleasant cleanup logics and
69 * that makes a difference. Some.
72 static struct fsnotify_group
*audit_tree_group
;
74 static struct audit_tree
*alloc_tree(const char *s
)
76 struct audit_tree
*tree
;
78 tree
= kmalloc(sizeof(struct audit_tree
) + strlen(s
) + 1, GFP_KERNEL
);
80 atomic_set(&tree
->count
, 1);
82 INIT_LIST_HEAD(&tree
->chunks
);
83 INIT_LIST_HEAD(&tree
->rules
);
84 INIT_LIST_HEAD(&tree
->list
);
85 INIT_LIST_HEAD(&tree
->same_root
);
87 strcpy(tree
->pathname
, s
);
92 static inline void get_tree(struct audit_tree
*tree
)
94 atomic_inc(&tree
->count
);
97 static inline void put_tree(struct audit_tree
*tree
)
99 if (atomic_dec_and_test(&tree
->count
))
100 kfree_rcu(tree
, head
);
103 /* to avoid bringing the entire thing in audit.h */
104 const char *audit_tree_path(struct audit_tree
*tree
)
106 return tree
->pathname
;
109 static void free_chunk(struct audit_chunk
*chunk
)
113 for (i
= 0; i
< chunk
->count
; i
++) {
114 if (chunk
->owners
[i
].owner
)
115 put_tree(chunk
->owners
[i
].owner
);
120 void audit_put_chunk(struct audit_chunk
*chunk
)
122 if (atomic_long_dec_and_test(&chunk
->refs
))
126 static void __put_chunk(struct rcu_head
*rcu
)
128 struct audit_chunk
*chunk
= container_of(rcu
, struct audit_chunk
, head
);
129 audit_put_chunk(chunk
);
132 static void audit_tree_destroy_watch(struct fsnotify_mark
*entry
)
134 struct audit_chunk
*chunk
= container_of(entry
, struct audit_chunk
, mark
);
135 call_rcu(&chunk
->head
, __put_chunk
);
138 static struct audit_chunk
*alloc_chunk(int count
)
140 struct audit_chunk
*chunk
;
144 size
= offsetof(struct audit_chunk
, owners
) + count
* sizeof(struct node
);
145 chunk
= kzalloc(size
, GFP_KERNEL
);
149 INIT_LIST_HEAD(&chunk
->hash
);
150 INIT_LIST_HEAD(&chunk
->trees
);
151 chunk
->count
= count
;
152 atomic_long_set(&chunk
->refs
, 1);
153 for (i
= 0; i
< count
; i
++) {
154 INIT_LIST_HEAD(&chunk
->owners
[i
].list
);
155 chunk
->owners
[i
].index
= i
;
157 fsnotify_init_mark(&chunk
->mark
, audit_tree_destroy_watch
);
158 chunk
->mark
.mask
= FS_IN_IGNORED
;
162 enum {HASH_SIZE
= 128};
163 static struct list_head chunk_hash_heads
[HASH_SIZE
];
164 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(hash_lock
);
166 static inline struct list_head
*chunk_hash(const struct inode
*inode
)
168 unsigned long n
= (unsigned long)inode
/ L1_CACHE_BYTES
;
169 return chunk_hash_heads
+ n
% HASH_SIZE
;
172 /* hash_lock & entry->lock is held by caller */
173 static void insert_hash(struct audit_chunk
*chunk
)
175 struct fsnotify_mark
*entry
= &chunk
->mark
;
176 struct list_head
*list
;
180 list
= chunk_hash(entry
->inode
);
181 list_add_rcu(&chunk
->hash
, list
);
184 /* called under rcu_read_lock */
185 struct audit_chunk
*audit_tree_lookup(const struct inode
*inode
)
187 struct list_head
*list
= chunk_hash(inode
);
188 struct audit_chunk
*p
;
190 list_for_each_entry_rcu(p
, list
, hash
) {
191 /* mark.inode may have gone NULL, but who cares? */
192 if (p
->mark
.inode
== inode
) {
193 atomic_long_inc(&p
->refs
);
200 bool audit_tree_match(struct audit_chunk
*chunk
, struct audit_tree
*tree
)
203 for (n
= 0; n
< chunk
->count
; n
++)
204 if (chunk
->owners
[n
].owner
== tree
)
209 /* tagging and untagging inodes with trees */
211 static struct audit_chunk
*find_chunk(struct node
*p
)
213 int index
= p
->index
& ~(1U<<31);
215 return container_of(p
, struct audit_chunk
, owners
[0]);
218 static void untag_chunk(struct node
*p
)
220 struct audit_chunk
*chunk
= find_chunk(p
);
221 struct fsnotify_mark
*entry
= &chunk
->mark
;
222 struct audit_chunk
*new = NULL
;
223 struct audit_tree
*owner
;
224 int size
= chunk
->count
- 1;
227 fsnotify_get_mark(entry
);
229 spin_unlock(&hash_lock
);
232 new = alloc_chunk(size
);
234 spin_lock(&entry
->lock
);
235 if (chunk
->dead
|| !entry
->inode
) {
236 spin_unlock(&entry
->lock
);
246 spin_lock(&hash_lock
);
247 list_del_init(&chunk
->trees
);
248 if (owner
->root
== chunk
)
250 list_del_init(&p
->list
);
251 list_del_rcu(&chunk
->hash
);
252 spin_unlock(&hash_lock
);
253 spin_unlock(&entry
->lock
);
254 fsnotify_destroy_mark(entry
, audit_tree_group
);
261 fsnotify_duplicate_mark(&new->mark
, entry
);
262 if (fsnotify_add_mark(&new->mark
, new->mark
.group
, new->mark
.inode
, NULL
, 1)) {
263 fsnotify_put_mark(&new->mark
);
268 spin_lock(&hash_lock
);
269 list_replace_init(&chunk
->trees
, &new->trees
);
270 if (owner
->root
== chunk
) {
271 list_del_init(&owner
->same_root
);
275 for (i
= j
= 0; j
<= size
; i
++, j
++) {
276 struct audit_tree
*s
;
277 if (&chunk
->owners
[j
] == p
) {
278 list_del_init(&p
->list
);
282 s
= chunk
->owners
[j
].owner
;
283 new->owners
[i
].owner
= s
;
284 new->owners
[i
].index
= chunk
->owners
[j
].index
- j
+ i
;
285 if (!s
) /* result of earlier fallback */
288 list_replace_init(&chunk
->owners
[j
].list
, &new->owners
[i
].list
);
291 list_replace_rcu(&chunk
->hash
, &new->hash
);
292 list_for_each_entry(owner
, &new->trees
, same_root
)
294 spin_unlock(&hash_lock
);
295 spin_unlock(&entry
->lock
);
296 fsnotify_destroy_mark(entry
, audit_tree_group
);
297 fsnotify_put_mark(&new->mark
); /* drop initial reference */
301 // do the best we can
302 spin_lock(&hash_lock
);
303 if (owner
->root
== chunk
) {
304 list_del_init(&owner
->same_root
);
307 list_del_init(&p
->list
);
310 spin_unlock(&hash_lock
);
311 spin_unlock(&entry
->lock
);
313 fsnotify_put_mark(entry
);
314 spin_lock(&hash_lock
);
317 static int create_chunk(struct inode
*inode
, struct audit_tree
*tree
)
319 struct fsnotify_mark
*entry
;
320 struct audit_chunk
*chunk
= alloc_chunk(1);
324 entry
= &chunk
->mark
;
325 if (fsnotify_add_mark(entry
, audit_tree_group
, inode
, NULL
, 0)) {
326 fsnotify_put_mark(entry
);
330 spin_lock(&entry
->lock
);
331 spin_lock(&hash_lock
);
333 spin_unlock(&hash_lock
);
335 spin_unlock(&entry
->lock
);
336 fsnotify_destroy_mark(entry
, audit_tree_group
);
337 fsnotify_put_mark(entry
);
340 chunk
->owners
[0].index
= (1U << 31);
341 chunk
->owners
[0].owner
= tree
;
343 list_add(&chunk
->owners
[0].list
, &tree
->chunks
);
346 list_add(&tree
->same_root
, &chunk
->trees
);
349 spin_unlock(&hash_lock
);
350 spin_unlock(&entry
->lock
);
351 fsnotify_put_mark(entry
); /* drop initial reference */
355 /* the first tagged inode becomes root of tree */
356 static int tag_chunk(struct inode
*inode
, struct audit_tree
*tree
)
358 struct fsnotify_mark
*old_entry
, *chunk_entry
;
359 struct audit_tree
*owner
;
360 struct audit_chunk
*chunk
, *old
;
364 old_entry
= fsnotify_find_inode_mark(audit_tree_group
, inode
);
366 return create_chunk(inode
, tree
);
368 old
= container_of(old_entry
, struct audit_chunk
, mark
);
370 /* are we already there? */
371 spin_lock(&hash_lock
);
372 for (n
= 0; n
< old
->count
; n
++) {
373 if (old
->owners
[n
].owner
== tree
) {
374 spin_unlock(&hash_lock
);
375 fsnotify_put_mark(old_entry
);
379 spin_unlock(&hash_lock
);
381 chunk
= alloc_chunk(old
->count
+ 1);
383 fsnotify_put_mark(old_entry
);
387 chunk_entry
= &chunk
->mark
;
389 spin_lock(&old_entry
->lock
);
390 if (!old_entry
->inode
) {
391 /* old_entry is being shot, lets just lie */
392 spin_unlock(&old_entry
->lock
);
393 fsnotify_put_mark(old_entry
);
398 fsnotify_duplicate_mark(chunk_entry
, old_entry
);
399 if (fsnotify_add_mark(chunk_entry
, chunk_entry
->group
, chunk_entry
->inode
, NULL
, 1)) {
400 spin_unlock(&old_entry
->lock
);
401 fsnotify_put_mark(chunk_entry
);
402 fsnotify_put_mark(old_entry
);
406 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
407 spin_lock(&chunk_entry
->lock
);
408 spin_lock(&hash_lock
);
410 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
412 spin_unlock(&hash_lock
);
414 spin_unlock(&chunk_entry
->lock
);
415 spin_unlock(&old_entry
->lock
);
417 fsnotify_destroy_mark(chunk_entry
, audit_tree_group
);
419 fsnotify_put_mark(chunk_entry
);
420 fsnotify_put_mark(old_entry
);
423 list_replace_init(&old
->trees
, &chunk
->trees
);
424 for (n
= 0, p
= chunk
->owners
; n
< old
->count
; n
++, p
++) {
425 struct audit_tree
*s
= old
->owners
[n
].owner
;
427 p
->index
= old
->owners
[n
].index
;
428 if (!s
) /* result of fallback in untag */
431 list_replace_init(&old
->owners
[n
].list
, &p
->list
);
433 p
->index
= (chunk
->count
- 1) | (1U<<31);
436 list_add(&p
->list
, &tree
->chunks
);
437 list_replace_rcu(&old
->hash
, &chunk
->hash
);
438 list_for_each_entry(owner
, &chunk
->trees
, same_root
)
443 list_add(&tree
->same_root
, &chunk
->trees
);
445 spin_unlock(&hash_lock
);
446 spin_unlock(&chunk_entry
->lock
);
447 spin_unlock(&old_entry
->lock
);
448 fsnotify_destroy_mark(old_entry
, audit_tree_group
);
449 fsnotify_put_mark(chunk_entry
); /* drop initial reference */
450 fsnotify_put_mark(old_entry
); /* pair to fsnotify_find mark_entry */
454 static void audit_tree_log_remove_rule(struct audit_krule
*rule
)
456 struct audit_buffer
*ab
;
458 ab
= audit_log_start(NULL
, GFP_KERNEL
, AUDIT_CONFIG_CHANGE
);
461 audit_log_format(ab
, "op=remove_rule");
462 audit_log_format(ab
, " dir=");
463 audit_log_untrustedstring(ab
, rule
->tree
->pathname
);
464 audit_log_key(ab
, rule
->filterkey
);
465 audit_log_format(ab
, " list=%d res=1", rule
->listnr
);
469 static void kill_rules(struct audit_tree
*tree
)
471 struct audit_krule
*rule
, *next
;
472 struct audit_entry
*entry
;
474 list_for_each_entry_safe(rule
, next
, &tree
->rules
, rlist
) {
475 entry
= container_of(rule
, struct audit_entry
, rule
);
477 list_del_init(&rule
->rlist
);
479 /* not a half-baked one */
480 audit_tree_log_remove_rule(rule
);
482 audit_remove_mark(entry
->rule
.exe
);
484 list_del_rcu(&entry
->list
);
485 list_del(&entry
->rule
.list
);
486 call_rcu(&entry
->rcu
, audit_free_rule_rcu
);
492 * finish killing struct audit_tree
494 static void prune_one(struct audit_tree
*victim
)
496 spin_lock(&hash_lock
);
497 while (!list_empty(&victim
->chunks
)) {
500 p
= list_entry(victim
->chunks
.next
, struct node
, list
);
504 spin_unlock(&hash_lock
);
508 /* trim the uncommitted chunks from tree */
510 static void trim_marked(struct audit_tree
*tree
)
512 struct list_head
*p
, *q
;
513 spin_lock(&hash_lock
);
515 spin_unlock(&hash_lock
);
519 for (p
= tree
->chunks
.next
; p
!= &tree
->chunks
; p
= q
) {
520 struct node
*node
= list_entry(p
, struct node
, list
);
522 if (node
->index
& (1U<<31)) {
524 list_add(p
, &tree
->chunks
);
528 while (!list_empty(&tree
->chunks
)) {
531 node
= list_entry(tree
->chunks
.next
, struct node
, list
);
533 /* have we run out of marked? */
534 if (!(node
->index
& (1U<<31)))
539 if (!tree
->root
&& !tree
->goner
) {
541 spin_unlock(&hash_lock
);
542 mutex_lock(&audit_filter_mutex
);
544 list_del_init(&tree
->list
);
545 mutex_unlock(&audit_filter_mutex
);
548 spin_unlock(&hash_lock
);
552 static void audit_schedule_prune(void);
554 /* called with audit_filter_mutex */
555 int audit_remove_tree_rule(struct audit_krule
*rule
)
557 struct audit_tree
*tree
;
560 spin_lock(&hash_lock
);
561 list_del_init(&rule
->rlist
);
562 if (list_empty(&tree
->rules
) && !tree
->goner
) {
564 list_del_init(&tree
->same_root
);
566 list_move(&tree
->list
, &prune_list
);
568 spin_unlock(&hash_lock
);
569 audit_schedule_prune();
573 spin_unlock(&hash_lock
);
579 static int compare_root(struct vfsmount
*mnt
, void *arg
)
581 return d_backing_inode(mnt
->mnt_root
) == arg
;
584 void audit_trim_trees(void)
586 struct list_head cursor
;
588 mutex_lock(&audit_filter_mutex
);
589 list_add(&cursor
, &tree_list
);
590 while (cursor
.next
!= &tree_list
) {
591 struct audit_tree
*tree
;
593 struct vfsmount
*root_mnt
;
597 tree
= container_of(cursor
.next
, struct audit_tree
, list
);
600 list_add(&cursor
, &tree
->list
);
601 mutex_unlock(&audit_filter_mutex
);
603 err
= kern_path(tree
->pathname
, 0, &path
);
607 root_mnt
= collect_mounts(&path
);
609 if (IS_ERR(root_mnt
))
612 spin_lock(&hash_lock
);
613 list_for_each_entry(node
, &tree
->chunks
, list
) {
614 struct audit_chunk
*chunk
= find_chunk(node
);
615 /* this could be NULL if the watch is dying else where... */
616 struct inode
*inode
= chunk
->mark
.inode
;
617 node
->index
|= 1U<<31;
618 if (iterate_mounts(compare_root
, inode
, root_mnt
))
619 node
->index
&= ~(1U<<31);
621 spin_unlock(&hash_lock
);
623 drop_collected_mounts(root_mnt
);
626 mutex_lock(&audit_filter_mutex
);
629 mutex_unlock(&audit_filter_mutex
);
632 int audit_make_tree(struct audit_krule
*rule
, char *pathname
, u32 op
)
635 if (pathname
[0] != '/' ||
636 rule
->listnr
!= AUDIT_FILTER_EXIT
||
638 rule
->inode_f
|| rule
->watch
|| rule
->tree
)
640 rule
->tree
= alloc_tree(pathname
);
646 void audit_put_tree(struct audit_tree
*tree
)
651 static int tag_mount(struct vfsmount
*mnt
, void *arg
)
653 return tag_chunk(d_backing_inode(mnt
->mnt_root
), arg
);
657 * That gets run when evict_chunk() ends up needing to kill audit_tree.
658 * Runs from a separate thread.
660 static int prune_tree_thread(void *unused
)
663 if (list_empty(&prune_list
)) {
664 set_current_state(TASK_INTERRUPTIBLE
);
668 mutex_lock(&audit_cmd_mutex
);
669 mutex_lock(&audit_filter_mutex
);
671 while (!list_empty(&prune_list
)) {
672 struct audit_tree
*victim
;
674 victim
= list_entry(prune_list
.next
,
675 struct audit_tree
, list
);
676 list_del_init(&victim
->list
);
678 mutex_unlock(&audit_filter_mutex
);
682 mutex_lock(&audit_filter_mutex
);
685 mutex_unlock(&audit_filter_mutex
);
686 mutex_unlock(&audit_cmd_mutex
);
691 static int audit_launch_prune(void)
695 prune_thread
= kthread_run(prune_tree_thread
, NULL
,
697 if (IS_ERR(prune_thread
)) {
698 pr_err("cannot start thread audit_prune_tree");
705 /* called with audit_filter_mutex */
706 int audit_add_tree_rule(struct audit_krule
*rule
)
708 struct audit_tree
*seed
= rule
->tree
, *tree
;
710 struct vfsmount
*mnt
;
714 list_for_each_entry(tree
, &tree_list
, list
) {
715 if (!strcmp(seed
->pathname
, tree
->pathname
)) {
718 list_add(&rule
->rlist
, &tree
->rules
);
723 list_add(&tree
->list
, &tree_list
);
724 list_add(&rule
->rlist
, &tree
->rules
);
725 /* do not set rule->tree yet */
726 mutex_unlock(&audit_filter_mutex
);
728 if (unlikely(!prune_thread
)) {
729 err
= audit_launch_prune();
734 err
= kern_path(tree
->pathname
, 0, &path
);
737 mnt
= collect_mounts(&path
);
745 err
= iterate_mounts(tag_mount
, tree
, mnt
);
746 drop_collected_mounts(mnt
);
750 spin_lock(&hash_lock
);
751 list_for_each_entry(node
, &tree
->chunks
, list
)
752 node
->index
&= ~(1U<<31);
753 spin_unlock(&hash_lock
);
759 mutex_lock(&audit_filter_mutex
);
760 if (list_empty(&rule
->rlist
)) {
769 mutex_lock(&audit_filter_mutex
);
770 list_del_init(&tree
->list
);
771 list_del_init(&tree
->rules
);
776 int audit_tag_tree(char *old
, char *new)
778 struct list_head cursor
, barrier
;
780 struct path path1
, path2
;
781 struct vfsmount
*tagged
;
784 err
= kern_path(new, 0, &path2
);
787 tagged
= collect_mounts(&path2
);
790 return PTR_ERR(tagged
);
792 err
= kern_path(old
, 0, &path1
);
794 drop_collected_mounts(tagged
);
798 mutex_lock(&audit_filter_mutex
);
799 list_add(&barrier
, &tree_list
);
800 list_add(&cursor
, &barrier
);
802 while (cursor
.next
!= &tree_list
) {
803 struct audit_tree
*tree
;
806 tree
= container_of(cursor
.next
, struct audit_tree
, list
);
809 list_add(&cursor
, &tree
->list
);
810 mutex_unlock(&audit_filter_mutex
);
812 err
= kern_path(tree
->pathname
, 0, &path2
);
814 good_one
= path_is_under(&path1
, &path2
);
820 mutex_lock(&audit_filter_mutex
);
824 failed
= iterate_mounts(tag_mount
, tree
, tagged
);
827 mutex_lock(&audit_filter_mutex
);
831 mutex_lock(&audit_filter_mutex
);
832 spin_lock(&hash_lock
);
834 list_del(&tree
->list
);
835 list_add(&tree
->list
, &tree_list
);
837 spin_unlock(&hash_lock
);
841 while (barrier
.prev
!= &tree_list
) {
842 struct audit_tree
*tree
;
844 tree
= container_of(barrier
.prev
, struct audit_tree
, list
);
846 list_del(&tree
->list
);
847 list_add(&tree
->list
, &barrier
);
848 mutex_unlock(&audit_filter_mutex
);
852 spin_lock(&hash_lock
);
853 list_for_each_entry(node
, &tree
->chunks
, list
)
854 node
->index
&= ~(1U<<31);
855 spin_unlock(&hash_lock
);
861 mutex_lock(&audit_filter_mutex
);
865 mutex_unlock(&audit_filter_mutex
);
867 drop_collected_mounts(tagged
);
872 static void audit_schedule_prune(void)
874 wake_up_process(prune_thread
);
878 * ... and that one is done if evict_chunk() decides to delay until the end
879 * of syscall. Runs synchronously.
881 void audit_kill_trees(struct list_head
*list
)
883 mutex_lock(&audit_cmd_mutex
);
884 mutex_lock(&audit_filter_mutex
);
886 while (!list_empty(list
)) {
887 struct audit_tree
*victim
;
889 victim
= list_entry(list
->next
, struct audit_tree
, list
);
891 list_del_init(&victim
->list
);
893 mutex_unlock(&audit_filter_mutex
);
897 mutex_lock(&audit_filter_mutex
);
900 mutex_unlock(&audit_filter_mutex
);
901 mutex_unlock(&audit_cmd_mutex
);
905 * Here comes the stuff asynchronous to auditctl operations
908 static void evict_chunk(struct audit_chunk
*chunk
)
910 struct audit_tree
*owner
;
911 struct list_head
*postponed
= audit_killed_trees();
919 mutex_lock(&audit_filter_mutex
);
920 spin_lock(&hash_lock
);
921 while (!list_empty(&chunk
->trees
)) {
922 owner
= list_entry(chunk
->trees
.next
,
923 struct audit_tree
, same_root
);
926 list_del_init(&owner
->same_root
);
927 spin_unlock(&hash_lock
);
930 list_move(&owner
->list
, &prune_list
);
933 list_move(&owner
->list
, postponed
);
935 spin_lock(&hash_lock
);
937 list_del_rcu(&chunk
->hash
);
938 for (n
= 0; n
< chunk
->count
; n
++)
939 list_del_init(&chunk
->owners
[n
].list
);
940 spin_unlock(&hash_lock
);
941 mutex_unlock(&audit_filter_mutex
);
943 audit_schedule_prune();
946 static int audit_tree_handle_event(struct fsnotify_group
*group
,
947 struct inode
*to_tell
,
948 struct fsnotify_mark
*inode_mark
,
949 struct fsnotify_mark
*vfsmount_mark
,
950 u32 mask
, void *data
, int data_type
,
951 const unsigned char *file_name
, u32 cookie
)
956 static void audit_tree_freeing_mark(struct fsnotify_mark
*entry
, struct fsnotify_group
*group
)
958 struct audit_chunk
*chunk
= container_of(entry
, struct audit_chunk
, mark
);
963 * We are guaranteed to have at least one reference to the mark from
964 * either the inode or the caller of fsnotify_destroy_mark().
966 BUG_ON(atomic_read(&entry
->refcnt
) < 1);
969 static const struct fsnotify_ops audit_tree_ops
= {
970 .handle_event
= audit_tree_handle_event
,
971 .freeing_mark
= audit_tree_freeing_mark
,
974 static int __init
audit_tree_init(void)
978 audit_tree_group
= fsnotify_alloc_group(&audit_tree_ops
);
979 if (IS_ERR(audit_tree_group
))
980 audit_panic("cannot initialize fsnotify group for rectree watches");
982 for (i
= 0; i
< HASH_SIZE
; i
++)
983 INIT_LIST_HEAD(&chunk_hash_heads
[i
]);
987 __initcall(audit_tree_init
);