Showing error 1045

User: Jiri Slaby
Error type: Leaving function in locked state
Error type description: Some lock is not unlocked on all paths of a function, so it is leaked
File location: kernel/audit_tree.c
Line in file: 319
Project: Linux Kernel
Project version: 2.6.28
Tools: Undetermined 1
Entered: 2012-03-04 17:07:06 UTC


Source:

  1#include "audit.h"
  2#include <linux/inotify.h>
  3#include <linux/namei.h>
  4#include <linux/mount.h>
  5
  6struct audit_tree;
  7struct audit_chunk;
  8
  9struct audit_tree {
 10        atomic_t count;
 11        int goner;
 12        struct audit_chunk *root;
 13        struct list_head chunks;
 14        struct list_head rules;
 15        struct list_head list;
 16        struct list_head same_root;
 17        struct rcu_head head;
 18        char pathname[];
 19};
 20
 21struct audit_chunk {
 22        struct list_head hash;
 23        struct inotify_watch watch;
 24        struct list_head trees;                /* with root here */
 25        int dead;
 26        int count;
 27        atomic_long_t refs;
 28        struct rcu_head head;
 29        struct node {
 30                struct list_head list;
 31                struct audit_tree *owner;
 32                unsigned index;                /* index; upper bit indicates 'will prune' */
 33        } owners[];
 34};
 35
 36static LIST_HEAD(tree_list);
 37static LIST_HEAD(prune_list);
 38
 39/*
 40 * One struct chunk is attached to each inode of interest.
 41 * We replace struct chunk on tagging/untagging.
 42 * Rules have pointer to struct audit_tree.
 43 * Rules have struct list_head rlist forming a list of rules over
 44 * the same tree.
 45 * References to struct chunk are collected at audit_inode{,_child}()
 46 * time and used in AUDIT_TREE rule matching.
 47 * These references are dropped at the same time we are calling
 48 * audit_free_names(), etc.
 49 *
 50 * Cyclic lists galore:
 51 * tree.chunks anchors chunk.owners[].list                        hash_lock
 52 * tree.rules anchors rule.rlist                                audit_filter_mutex
 53 * chunk.trees anchors tree.same_root                                hash_lock
 54 * chunk.hash is a hash with middle bits of watch.inode as
 55 * a hash function.                                                RCU, hash_lock
 56 *
 57 * tree is refcounted; one reference for "some rules on rules_list refer to
 58 * it", one for each chunk with pointer to it.
 59 *
 60 * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
 61 * of watch contributes 1 to .refs).
 62 *
 63 * node.index allows to get from node.list to containing chunk.
 64 * MSB of that sucker is stolen to mark taggings that we might have to
 65 * revert - several operations have very unpleasant cleanup logics and
 66 * that makes a difference.  Some.
 67 */
 68
 69static struct inotify_handle *rtree_ih;
 70
 71static struct audit_tree *alloc_tree(const char *s)
 72{
 73        struct audit_tree *tree;
 74
 75        tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
 76        if (tree) {
 77                atomic_set(&tree->count, 1);
 78                tree->goner = 0;
 79                INIT_LIST_HEAD(&tree->chunks);
 80                INIT_LIST_HEAD(&tree->rules);
 81                INIT_LIST_HEAD(&tree->list);
 82                INIT_LIST_HEAD(&tree->same_root);
 83                tree->root = NULL;
 84                strcpy(tree->pathname, s);
 85        }
 86        return tree;
 87}
 88
 89static inline void get_tree(struct audit_tree *tree)
 90{
 91        atomic_inc(&tree->count);
 92}
 93
 94static void __put_tree(struct rcu_head *rcu)
 95{
 96        struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
 97        kfree(tree);
 98}
 99
100static inline void put_tree(struct audit_tree *tree)
101{
102        if (atomic_dec_and_test(&tree->count))
103                call_rcu(&tree->head, __put_tree);
104}
105
106/* to avoid bringing the entire thing in audit.h */
107const char *audit_tree_path(struct audit_tree *tree)
108{
109        return tree->pathname;
110}
111
112static struct audit_chunk *alloc_chunk(int count)
113{
114        struct audit_chunk *chunk;
115        size_t size;
116        int i;
117
118        size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
119        chunk = kzalloc(size, GFP_KERNEL);
120        if (!chunk)
121                return NULL;
122
123        INIT_LIST_HEAD(&chunk->hash);
124        INIT_LIST_HEAD(&chunk->trees);
125        chunk->count = count;
126        atomic_long_set(&chunk->refs, 1);
127        for (i = 0; i < count; i++) {
128                INIT_LIST_HEAD(&chunk->owners[i].list);
129                chunk->owners[i].index = i;
130        }
131        inotify_init_watch(&chunk->watch);
132        return chunk;
133}
134
135static void free_chunk(struct audit_chunk *chunk)
136{
137        int i;
138
139        for (i = 0; i < chunk->count; i++) {
140                if (chunk->owners[i].owner)
141                        put_tree(chunk->owners[i].owner);
142        }
143        kfree(chunk);
144}
145
146void audit_put_chunk(struct audit_chunk *chunk)
147{
148        if (atomic_long_dec_and_test(&chunk->refs))
149                free_chunk(chunk);
150}
151
152static void __put_chunk(struct rcu_head *rcu)
153{
154        struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
155        audit_put_chunk(chunk);
156}
157
158enum {HASH_SIZE = 128};
159static struct list_head chunk_hash_heads[HASH_SIZE];
160static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
161
162static inline struct list_head *chunk_hash(const struct inode *inode)
163{
164        unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
165        return chunk_hash_heads + n % HASH_SIZE;
166}
167
168/* hash_lock is held by caller */
169static void insert_hash(struct audit_chunk *chunk)
170{
171        struct list_head *list = chunk_hash(chunk->watch.inode);
172        list_add_rcu(&chunk->hash, list);
173}
174
175/* called under rcu_read_lock */
176struct audit_chunk *audit_tree_lookup(const struct inode *inode)
177{
178        struct list_head *list = chunk_hash(inode);
179        struct audit_chunk *p;
180
181        list_for_each_entry_rcu(p, list, hash) {
182                if (p->watch.inode == inode) {
183                        atomic_long_inc(&p->refs);
184                        return p;
185                }
186        }
187        return NULL;
188}
189
190int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
191{
192        int n;
193        for (n = 0; n < chunk->count; n++)
194                if (chunk->owners[n].owner == tree)
195                        return 1;
196        return 0;
197}
198
199/* tagging and untagging inodes with trees */
200
201static struct audit_chunk *find_chunk(struct node *p)
202{
203        int index = p->index & ~(1U<<31);
204        p -= index;
205        return container_of(p, struct audit_chunk, owners[0]);
206}
207
208static void untag_chunk(struct node *p)
209{
210        struct audit_chunk *chunk = find_chunk(p);
211        struct audit_chunk *new;
212        struct audit_tree *owner;
213        int size = chunk->count - 1;
214        int i, j;
215
216        if (!pin_inotify_watch(&chunk->watch)) {
217                /*
218                 * Filesystem is shutting down; all watches are getting
219                 * evicted, just take it off the node list for this
220                 * tree and let the eviction logics take care of the
221                 * rest.
222                 */
223                owner = p->owner;
224                if (owner->root == chunk) {
225                        list_del_init(&owner->same_root);
226                        owner->root = NULL;
227                }
228                list_del_init(&p->list);
229                p->owner = NULL;
230                put_tree(owner);
231                return;
232        }
233
234        spin_unlock(&hash_lock);
235
236        /*
237         * pin_inotify_watch() succeeded, so the watch won't go away
238         * from under us.
239         */
240        mutex_lock(&chunk->watch.inode->inotify_mutex);
241        if (chunk->dead) {
242                mutex_unlock(&chunk->watch.inode->inotify_mutex);
243                goto out;
244        }
245
246        owner = p->owner;
247
248        if (!size) {
249                chunk->dead = 1;
250                spin_lock(&hash_lock);
251                list_del_init(&chunk->trees);
252                if (owner->root == chunk)
253                        owner->root = NULL;
254                list_del_init(&p->list);
255                list_del_rcu(&chunk->hash);
256                spin_unlock(&hash_lock);
257                inotify_evict_watch(&chunk->watch);
258                mutex_unlock(&chunk->watch.inode->inotify_mutex);
259                put_inotify_watch(&chunk->watch);
260                goto out;
261        }
262
263        new = alloc_chunk(size);
264        if (!new)
265                goto Fallback;
266        if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
267                free_chunk(new);
268                goto Fallback;
269        }
270
271        chunk->dead = 1;
272        spin_lock(&hash_lock);
273        list_replace_init(&chunk->trees, &new->trees);
274        if (owner->root == chunk) {
275                list_del_init(&owner->same_root);
276                owner->root = NULL;
277        }
278
279        for (i = j = 0; i < size; i++, j++) {
280                struct audit_tree *s;
281                if (&chunk->owners[j] == p) {
282                        list_del_init(&p->list);
283                        i--;
284                        continue;
285                }
286                s = chunk->owners[j].owner;
287                new->owners[i].owner = s;
288                new->owners[i].index = chunk->owners[j].index - j + i;
289                if (!s) /* result of earlier fallback */
290                        continue;
291                get_tree(s);
292                list_replace_init(&chunk->owners[i].list, &new->owners[j].list);
293        }
294
295        list_replace_rcu(&chunk->hash, &new->hash);
296        list_for_each_entry(owner, &new->trees, same_root)
297                owner->root = new;
298        spin_unlock(&hash_lock);
299        inotify_evict_watch(&chunk->watch);
300        mutex_unlock(&chunk->watch.inode->inotify_mutex);
301        put_inotify_watch(&chunk->watch);
302        goto out;
303
304Fallback:
305        // do the best we can
306        spin_lock(&hash_lock);
307        if (owner->root == chunk) {
308                list_del_init(&owner->same_root);
309                owner->root = NULL;
310        }
311        list_del_init(&p->list);
312        p->owner = NULL;
313        put_tree(owner);
314        spin_unlock(&hash_lock);
315        mutex_unlock(&chunk->watch.inode->inotify_mutex);
316out:
317        unpin_inotify_watch(&chunk->watch);
318        spin_lock(&hash_lock);
319}
320
321static int create_chunk(struct inode *inode, struct audit_tree *tree)
322{
323        struct audit_chunk *chunk = alloc_chunk(1);
324        if (!chunk)
325                return -ENOMEM;
326
327        if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
328                free_chunk(chunk);
329                return -ENOSPC;
330        }
331
332        mutex_lock(&inode->inotify_mutex);
333        spin_lock(&hash_lock);
334        if (tree->goner) {
335                spin_unlock(&hash_lock);
336                chunk->dead = 1;
337                inotify_evict_watch(&chunk->watch);
338                mutex_unlock(&inode->inotify_mutex);
339                put_inotify_watch(&chunk->watch);
340                return 0;
341        }
342        chunk->owners[0].index = (1U << 31);
343        chunk->owners[0].owner = tree;
344        get_tree(tree);
345        list_add(&chunk->owners[0].list, &tree->chunks);
346        if (!tree->root) {
347                tree->root = chunk;
348                list_add(&tree->same_root, &chunk->trees);
349        }
350        insert_hash(chunk);
351        spin_unlock(&hash_lock);
352        mutex_unlock(&inode->inotify_mutex);
353        return 0;
354}
355
356/* the first tagged inode becomes root of tree */
357static int tag_chunk(struct inode *inode, struct audit_tree *tree)
358{
359        struct inotify_watch *watch;
360        struct audit_tree *owner;
361        struct audit_chunk *chunk, *old;
362        struct node *p;
363        int n;
364
365        if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
366                return create_chunk(inode, tree);
367
368        old = container_of(watch, struct audit_chunk, watch);
369
370        /* are we already there? */
371        spin_lock(&hash_lock);
372        for (n = 0; n < old->count; n++) {
373                if (old->owners[n].owner == tree) {
374                        spin_unlock(&hash_lock);
375                        put_inotify_watch(watch);
376                        return 0;
377                }
378        }
379        spin_unlock(&hash_lock);
380
381        chunk = alloc_chunk(old->count + 1);
382        if (!chunk)
383                return -ENOMEM;
384
385        mutex_lock(&inode->inotify_mutex);
386        if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
387                mutex_unlock(&inode->inotify_mutex);
388                free_chunk(chunk);
389                return -ENOSPC;
390        }
391        spin_lock(&hash_lock);
392        if (tree->goner) {
393                spin_unlock(&hash_lock);
394                chunk->dead = 1;
395                inotify_evict_watch(&chunk->watch);
396                mutex_unlock(&inode->inotify_mutex);
397                put_inotify_watch(&chunk->watch);
398                return 0;
399        }
400        list_replace_init(&old->trees, &chunk->trees);
401        for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
402                struct audit_tree *s = old->owners[n].owner;
403                p->owner = s;
404                p->index = old->owners[n].index;
405                if (!s) /* result of fallback in untag */
406                        continue;
407                get_tree(s);
408                list_replace_init(&old->owners[n].list, &p->list);
409        }
410        p->index = (chunk->count - 1) | (1U<<31);
411        p->owner = tree;
412        get_tree(tree);
413        list_add(&p->list, &tree->chunks);
414        list_replace_rcu(&old->hash, &chunk->hash);
415        list_for_each_entry(owner, &chunk->trees, same_root)
416                owner->root = chunk;
417        old->dead = 1;
418        if (!tree->root) {
419                tree->root = chunk;
420                list_add(&tree->same_root, &chunk->trees);
421        }
422        spin_unlock(&hash_lock);
423        inotify_evict_watch(&old->watch);
424        mutex_unlock(&inode->inotify_mutex);
425        put_inotify_watch(&old->watch);
426        return 0;
427}
428
429static void kill_rules(struct audit_tree *tree)
430{
431        struct audit_krule *rule, *next;
432        struct audit_entry *entry;
433        struct audit_buffer *ab;
434
435        list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
436                entry = container_of(rule, struct audit_entry, rule);
437
438                list_del_init(&rule->rlist);
439                if (rule->tree) {
440                        /* not a half-baked one */
441                        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
442                        audit_log_format(ab, "op=remove rule dir=");
443                        audit_log_untrustedstring(ab, rule->tree->pathname);
444                        if (rule->filterkey) {
445                                audit_log_format(ab, " key=");
446                                audit_log_untrustedstring(ab, rule->filterkey);
447                        } else
448                                audit_log_format(ab, " key=(null)");
449                        audit_log_format(ab, " list=%d res=1", rule->listnr);
450                        audit_log_end(ab);
451                        rule->tree = NULL;
452                        list_del_rcu(&entry->list);
453                        call_rcu(&entry->rcu, audit_free_rule_rcu);
454                }
455        }
456}
457
458/*
459 * finish killing struct audit_tree
460 */
461static void prune_one(struct audit_tree *victim)
462{
463        spin_lock(&hash_lock);
464        while (!list_empty(&victim->chunks)) {
465                struct node *p;
466
467                p = list_entry(victim->chunks.next, struct node, list);
468
469                untag_chunk(p);
470        }
471        spin_unlock(&hash_lock);
472        put_tree(victim);
473}
474
475/* trim the uncommitted chunks from tree */
476
477static void trim_marked(struct audit_tree *tree)
478{
479        struct list_head *p, *q;
480        spin_lock(&hash_lock);
481        if (tree->goner) {
482                spin_unlock(&hash_lock);
483                return;
484        }
485        /* reorder */
486        for (p = tree->chunks.next; p != &tree->chunks; p = q) {
487                struct node *node = list_entry(p, struct node, list);
488                q = p->next;
489                if (node->index & (1U<<31)) {
490                        list_del_init(p);
491                        list_add(p, &tree->chunks);
492                }
493        }
494
495        while (!list_empty(&tree->chunks)) {
496                struct node *node;
497
498                node = list_entry(tree->chunks.next, struct node, list);
499
500                /* have we run out of marked? */
501                if (!(node->index & (1U<<31)))
502                        break;
503
504                untag_chunk(node);
505        }
506        if (!tree->root && !tree->goner) {
507                tree->goner = 1;
508                spin_unlock(&hash_lock);
509                mutex_lock(&audit_filter_mutex);
510                kill_rules(tree);
511                list_del_init(&tree->list);
512                mutex_unlock(&audit_filter_mutex);
513                prune_one(tree);
514        } else {
515                spin_unlock(&hash_lock);
516        }
517}
518
519/* called with audit_filter_mutex */
520int audit_remove_tree_rule(struct audit_krule *rule)
521{
522        struct audit_tree *tree;
523        tree = rule->tree;
524        if (tree) {
525                spin_lock(&hash_lock);
526                list_del_init(&rule->rlist);
527                if (list_empty(&tree->rules) && !tree->goner) {
528                        tree->root = NULL;
529                        list_del_init(&tree->same_root);
530                        tree->goner = 1;
531                        list_move(&tree->list, &prune_list);
532                        rule->tree = NULL;
533                        spin_unlock(&hash_lock);
534                        audit_schedule_prune();
535                        return 1;
536                }
537                rule->tree = NULL;
538                spin_unlock(&hash_lock);
539                return 1;
540        }
541        return 0;
542}
543
544void audit_trim_trees(void)
545{
546        struct list_head cursor;
547
548        mutex_lock(&audit_filter_mutex);
549        list_add(&cursor, &tree_list);
550        while (cursor.next != &tree_list) {
551                struct audit_tree *tree;
552                struct path path;
553                struct vfsmount *root_mnt;
554                struct node *node;
555                struct list_head list;
556                int err;
557
558                tree = container_of(cursor.next, struct audit_tree, list);
559                get_tree(tree);
560                list_del(&cursor);
561                list_add(&cursor, &tree->list);
562                mutex_unlock(&audit_filter_mutex);
563
564                err = kern_path(tree->pathname, 0, &path);
565                if (err)
566                        goto skip_it;
567
568                root_mnt = collect_mounts(path.mnt, path.dentry);
569                path_put(&path);
570                if (!root_mnt)
571                        goto skip_it;
572
573                list_add_tail(&list, &root_mnt->mnt_list);
574                spin_lock(&hash_lock);
575                list_for_each_entry(node, &tree->chunks, list) {
576                        struct audit_chunk *chunk = find_chunk(node);
577                        struct inode *inode = chunk->watch.inode;
578                        struct vfsmount *mnt;
579                        node->index |= 1U<<31;
580                        list_for_each_entry(mnt, &list, mnt_list) {
581                                if (mnt->mnt_root->d_inode == inode) {
582                                        node->index &= ~(1U<<31);
583                                        break;
584                                }
585                        }
586                }
587                spin_unlock(&hash_lock);
588                trim_marked(tree);
589                put_tree(tree);
590                list_del_init(&list);
591                drop_collected_mounts(root_mnt);
592skip_it:
593                mutex_lock(&audit_filter_mutex);
594        }
595        list_del(&cursor);
596        mutex_unlock(&audit_filter_mutex);
597}
598
599static int is_under(struct vfsmount *mnt, struct dentry *dentry,
600                    struct path *path)
601{
602        if (mnt != path->mnt) {
603                for (;;) {
604                        if (mnt->mnt_parent == mnt)
605                                return 0;
606                        if (mnt->mnt_parent == path->mnt)
607                                        break;
608                        mnt = mnt->mnt_parent;
609                }
610                dentry = mnt->mnt_mountpoint;
611        }
612        return is_subdir(dentry, path->dentry);
613}
614
615int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
616{
617
618        if (pathname[0] != '/' ||
619            rule->listnr != AUDIT_FILTER_EXIT ||
620            op & ~AUDIT_EQUAL ||
621            rule->inode_f || rule->watch || rule->tree)
622                return -EINVAL;
623        rule->tree = alloc_tree(pathname);
624        if (!rule->tree)
625                return -ENOMEM;
626        return 0;
627}
628
629void audit_put_tree(struct audit_tree *tree)
630{
631        put_tree(tree);
632}
633
634/* called with audit_filter_mutex */
635int audit_add_tree_rule(struct audit_krule *rule)
636{
637        struct audit_tree *seed = rule->tree, *tree;
638        struct path path;
639        struct vfsmount *mnt, *p;
640        struct list_head list;
641        int err;
642
643        list_for_each_entry(tree, &tree_list, list) {
644                if (!strcmp(seed->pathname, tree->pathname)) {
645                        put_tree(seed);
646                        rule->tree = tree;
647                        list_add(&rule->rlist, &tree->rules);
648                        return 0;
649                }
650        }
651        tree = seed;
652        list_add(&tree->list, &tree_list);
653        list_add(&rule->rlist, &tree->rules);
654        /* do not set rule->tree yet */
655        mutex_unlock(&audit_filter_mutex);
656
657        err = kern_path(tree->pathname, 0, &path);
658        if (err)
659                goto Err;
660        mnt = collect_mounts(path.mnt, path.dentry);
661        path_put(&path);
662        if (!mnt) {
663                err = -ENOMEM;
664                goto Err;
665        }
666        list_add_tail(&list, &mnt->mnt_list);
667
668        get_tree(tree);
669        list_for_each_entry(p, &list, mnt_list) {
670                err = tag_chunk(p->mnt_root->d_inode, tree);
671                if (err)
672                        break;
673        }
674
675        list_del(&list);
676        drop_collected_mounts(mnt);
677
678        if (!err) {
679                struct node *node;
680                spin_lock(&hash_lock);
681                list_for_each_entry(node, &tree->chunks, list)
682                        node->index &= ~(1U<<31);
683                spin_unlock(&hash_lock);
684        } else {
685                trim_marked(tree);
686                goto Err;
687        }
688
689        mutex_lock(&audit_filter_mutex);
690        if (list_empty(&rule->rlist)) {
691                put_tree(tree);
692                return -ENOENT;
693        }
694        rule->tree = tree;
695        put_tree(tree);
696
697        return 0;
698Err:
699        mutex_lock(&audit_filter_mutex);
700        list_del_init(&tree->list);
701        list_del_init(&tree->rules);
702        put_tree(tree);
703        return err;
704}
705
706int audit_tag_tree(char *old, char *new)
707{
708        struct list_head cursor, barrier;
709        int failed = 0;
710        struct path path;
711        struct vfsmount *tagged;
712        struct list_head list;
713        struct vfsmount *mnt;
714        struct dentry *dentry;
715        int err;
716
717        err = kern_path(new, 0, &path);
718        if (err)
719                return err;
720        tagged = collect_mounts(path.mnt, path.dentry);
721        path_put(&path);
722        if (!tagged)
723                return -ENOMEM;
724
725        err = kern_path(old, 0, &path);
726        if (err) {
727                drop_collected_mounts(tagged);
728                return err;
729        }
730        mnt = mntget(path.mnt);
731        dentry = dget(path.dentry);
732        path_put(&path);
733
734        if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
735                follow_up(&mnt, &dentry);
736
737        list_add_tail(&list, &tagged->mnt_list);
738
739        mutex_lock(&audit_filter_mutex);
740        list_add(&barrier, &tree_list);
741        list_add(&cursor, &barrier);
742
743        while (cursor.next != &tree_list) {
744                struct audit_tree *tree;
745                struct vfsmount *p;
746
747                tree = container_of(cursor.next, struct audit_tree, list);
748                get_tree(tree);
749                list_del(&cursor);
750                list_add(&cursor, &tree->list);
751                mutex_unlock(&audit_filter_mutex);
752
753                err = kern_path(tree->pathname, 0, &path);
754                if (err) {
755                        put_tree(tree);
756                        mutex_lock(&audit_filter_mutex);
757                        continue;
758                }
759
760                spin_lock(&vfsmount_lock);
761                if (!is_under(mnt, dentry, &path)) {
762                        spin_unlock(&vfsmount_lock);
763                        path_put(&path);
764                        put_tree(tree);
765                        mutex_lock(&audit_filter_mutex);
766                        continue;
767                }
768                spin_unlock(&vfsmount_lock);
769                path_put(&path);
770
771                list_for_each_entry(p, &list, mnt_list) {
772                        failed = tag_chunk(p->mnt_root->d_inode, tree);
773                        if (failed)
774                                break;
775                }
776
777                if (failed) {
778                        put_tree(tree);
779                        mutex_lock(&audit_filter_mutex);
780                        break;
781                }
782
783                mutex_lock(&audit_filter_mutex);
784                spin_lock(&hash_lock);
785                if (!tree->goner) {
786                        list_del(&tree->list);
787                        list_add(&tree->list, &tree_list);
788                }
789                spin_unlock(&hash_lock);
790                put_tree(tree);
791        }
792
793        while (barrier.prev != &tree_list) {
794                struct audit_tree *tree;
795
796                tree = container_of(barrier.prev, struct audit_tree, list);
797                get_tree(tree);
798                list_del(&tree->list);
799                list_add(&tree->list, &barrier);
800                mutex_unlock(&audit_filter_mutex);
801
802                if (!failed) {
803                        struct node *node;
804                        spin_lock(&hash_lock);
805                        list_for_each_entry(node, &tree->chunks, list)
806                                node->index &= ~(1U<<31);
807                        spin_unlock(&hash_lock);
808                } else {
809                        trim_marked(tree);
810                }
811
812                put_tree(tree);
813                mutex_lock(&audit_filter_mutex);
814        }
815        list_del(&barrier);
816        list_del(&cursor);
817        list_del(&list);
818        mutex_unlock(&audit_filter_mutex);
819        dput(dentry);
820        mntput(mnt);
821        drop_collected_mounts(tagged);
822        return failed;
823}
824
825/*
826 * That gets run when evict_chunk() ends up needing to kill audit_tree.
827 * Runs from a separate thread, with audit_cmd_mutex held.
828 */
829void audit_prune_trees(void)
830{
831        mutex_lock(&audit_filter_mutex);
832
833        while (!list_empty(&prune_list)) {
834                struct audit_tree *victim;
835
836                victim = list_entry(prune_list.next, struct audit_tree, list);
837                list_del_init(&victim->list);
838
839                mutex_unlock(&audit_filter_mutex);
840
841                prune_one(victim);
842
843                mutex_lock(&audit_filter_mutex);
844        }
845
846        mutex_unlock(&audit_filter_mutex);
847}
848
849/*
850 *  Here comes the stuff asynchronous to auditctl operations
851 */
852
853/* inode->inotify_mutex is locked */
854static void evict_chunk(struct audit_chunk *chunk)
855{
856        struct audit_tree *owner;
857        int n;
858
859        if (chunk->dead)
860                return;
861
862        chunk->dead = 1;
863        mutex_lock(&audit_filter_mutex);
864        spin_lock(&hash_lock);
865        while (!list_empty(&chunk->trees)) {
866                owner = list_entry(chunk->trees.next,
867                                   struct audit_tree, same_root);
868                owner->goner = 1;
869                owner->root = NULL;
870                list_del_init(&owner->same_root);
871                spin_unlock(&hash_lock);
872                kill_rules(owner);
873                list_move(&owner->list, &prune_list);
874                audit_schedule_prune();
875                spin_lock(&hash_lock);
876        }
877        list_del_rcu(&chunk->hash);
878        for (n = 0; n < chunk->count; n++)
879                list_del_init(&chunk->owners[n].list);
880        spin_unlock(&hash_lock);
881        mutex_unlock(&audit_filter_mutex);
882}
883
884static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
885                         u32 cookie, const char *dname, struct inode *inode)
886{
887        struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
888
889        if (mask & IN_IGNORED) {
890                evict_chunk(chunk);
891                put_inotify_watch(watch);
892        }
893}
894
895static void destroy_watch(struct inotify_watch *watch)
896{
897        struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
898        call_rcu(&chunk->head, __put_chunk);
899}
900
901static const struct inotify_operations rtree_inotify_ops = {
902        .handle_event        = handle_event,
903        .destroy_watch        = destroy_watch,
904};
905
906static int __init audit_tree_init(void)
907{
908        int i;
909
910        rtree_ih = inotify_init(&rtree_inotify_ops);
911        if (IS_ERR(rtree_ih))
912                audit_panic("cannot initialize inotify handle for rectree watches");
913
914        for (i = 0; i < HASH_SIZE; i++)
915                INIT_LIST_HEAD(&chunk_hash_heads[i]);
916
917        return 0;
918}
919__initcall(audit_tree_init);