mm: migration: add migrate_entry_wait_huge()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / audit_tree.c
CommitLineData
74c3cbe3 1#include "audit.h"
28a3a7eb 2#include <linux/fsnotify_backend.h>
74c3cbe3
AV
3#include <linux/namei.h>
4#include <linux/mount.h>
916d7576 5#include <linux/kthread.h>
5a0e3ad6 6#include <linux/slab.h>
74c3cbe3
AV
7
8struct audit_tree;
9struct audit_chunk;
10
11struct audit_tree {
12 atomic_t count;
13 int goner;
14 struct audit_chunk *root;
15 struct list_head chunks;
16 struct list_head rules;
17 struct list_head list;
18 struct list_head same_root;
19 struct rcu_head head;
20 char pathname[];
21};
22
23struct audit_chunk {
24 struct list_head hash;
e61ce867 25 struct fsnotify_mark mark;
74c3cbe3
AV
26 struct list_head trees; /* with root here */
27 int dead;
28 int count;
8f7b0ba1 29 atomic_long_t refs;
74c3cbe3
AV
30 struct rcu_head head;
31 struct node {
32 struct list_head list;
33 struct audit_tree *owner;
34 unsigned index; /* index; upper bit indicates 'will prune' */
35 } owners[];
36};
37
38static LIST_HEAD(tree_list);
39static LIST_HEAD(prune_list);
40
41/*
42 * One struct chunk is attached to each inode of interest.
43 * We replace struct chunk on tagging/untagging.
44 * Rules have pointer to struct audit_tree.
45 * Rules have struct list_head rlist forming a list of rules over
46 * the same tree.
47 * References to struct chunk are collected at audit_inode{,_child}()
48 * time and used in AUDIT_TREE rule matching.
49 * These references are dropped at the same time we are calling
50 * audit_free_names(), etc.
51 *
52 * Cyclic lists galore:
53 * tree.chunks anchors chunk.owners[].list hash_lock
54 * tree.rules anchors rule.rlist audit_filter_mutex
55 * chunk.trees anchors tree.same_root hash_lock
56 * chunk.hash is a hash with middle bits of watch.inode as
57 * a hash function. RCU, hash_lock
58 *
59 * tree is refcounted; one reference for "some rules on rules_list refer to
60 * it", one for each chunk with pointer to it.
61 *
28a3a7eb 62 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
8f7b0ba1 63 * of watch contributes 1 to .refs).
74c3cbe3
AV
64 *
65 * node.index allows to get from node.list to containing chunk.
66 * MSB of that sucker is stolen to mark taggings that we might have to
67 * revert - several operations have very unpleasant cleanup logics and
68 * that makes a difference. Some.
69 */
70
28a3a7eb 71static struct fsnotify_group *audit_tree_group;
74c3cbe3
AV
72
73static struct audit_tree *alloc_tree(const char *s)
74{
75 struct audit_tree *tree;
76
77 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
78 if (tree) {
79 atomic_set(&tree->count, 1);
80 tree->goner = 0;
81 INIT_LIST_HEAD(&tree->chunks);
82 INIT_LIST_HEAD(&tree->rules);
83 INIT_LIST_HEAD(&tree->list);
84 INIT_LIST_HEAD(&tree->same_root);
85 tree->root = NULL;
86 strcpy(tree->pathname, s);
87 }
88 return tree;
89}
90
91static inline void get_tree(struct audit_tree *tree)
92{
93 atomic_inc(&tree->count);
94}
95
74c3cbe3
AV
96static inline void put_tree(struct audit_tree *tree)
97{
98 if (atomic_dec_and_test(&tree->count))
3b097c46 99 kfree_rcu(tree, head);
74c3cbe3
AV
100}
101
102/* to avoid bringing the entire thing in audit.h */
103const char *audit_tree_path(struct audit_tree *tree)
104{
105 return tree->pathname;
106}
107
8f7b0ba1 108static void free_chunk(struct audit_chunk *chunk)
74c3cbe3 109{
74c3cbe3
AV
110 int i;
111
112 for (i = 0; i < chunk->count; i++) {
113 if (chunk->owners[i].owner)
114 put_tree(chunk->owners[i].owner);
115 }
116 kfree(chunk);
117}
118
8f7b0ba1 119void audit_put_chunk(struct audit_chunk *chunk)
74c3cbe3 120{
8f7b0ba1
AV
121 if (atomic_long_dec_and_test(&chunk->refs))
122 free_chunk(chunk);
74c3cbe3
AV
123}
124
8f7b0ba1 125static void __put_chunk(struct rcu_head *rcu)
74c3cbe3 126{
8f7b0ba1
AV
127 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
128 audit_put_chunk(chunk);
74c3cbe3
AV
129}
130
e61ce867 131static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
28a3a7eb
EP
132{
133 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
134 call_rcu(&chunk->head, __put_chunk);
135}
136
137static struct audit_chunk *alloc_chunk(int count)
138{
139 struct audit_chunk *chunk;
140 size_t size;
141 int i;
142
143 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
144 chunk = kzalloc(size, GFP_KERNEL);
145 if (!chunk)
146 return NULL;
147
148 INIT_LIST_HEAD(&chunk->hash);
149 INIT_LIST_HEAD(&chunk->trees);
150 chunk->count = count;
151 atomic_long_set(&chunk->refs, 1);
152 for (i = 0; i < count; i++) {
153 INIT_LIST_HEAD(&chunk->owners[i].list);
154 chunk->owners[i].index = i;
155 }
156 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
157 return chunk;
158}
159
74c3cbe3
AV
160enum {HASH_SIZE = 128};
161static struct list_head chunk_hash_heads[HASH_SIZE];
162static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
163
164static inline struct list_head *chunk_hash(const struct inode *inode)
165{
166 unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
167 return chunk_hash_heads + n % HASH_SIZE;
168}
169
28a3a7eb 170/* hash_lock & entry->lock is held by caller */
74c3cbe3
AV
171static void insert_hash(struct audit_chunk *chunk)
172{
e61ce867 173 struct fsnotify_mark *entry = &chunk->mark;
28a3a7eb
EP
174 struct list_head *list;
175
2823e04d 176 if (!entry->i.inode)
28a3a7eb 177 return;
2823e04d 178 list = chunk_hash(entry->i.inode);
74c3cbe3
AV
179 list_add_rcu(&chunk->hash, list);
180}
181
182/* called under rcu_read_lock */
183struct audit_chunk *audit_tree_lookup(const struct inode *inode)
184{
185 struct list_head *list = chunk_hash(inode);
6793a051 186 struct audit_chunk *p;
74c3cbe3 187
6793a051 188 list_for_each_entry_rcu(p, list, hash) {
28a3a7eb 189 /* mark.inode may have gone NULL, but who cares? */
2823e04d 190 if (p->mark.i.inode == inode) {
8f7b0ba1 191 atomic_long_inc(&p->refs);
74c3cbe3
AV
192 return p;
193 }
194 }
195 return NULL;
196}
197
198int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
199{
200 int n;
201 for (n = 0; n < chunk->count; n++)
202 if (chunk->owners[n].owner == tree)
203 return 1;
204 return 0;
205}
206
207/* tagging and untagging inodes with trees */
208
8f7b0ba1
AV
209static struct audit_chunk *find_chunk(struct node *p)
210{
211 int index = p->index & ~(1U<<31);
212 p -= index;
213 return container_of(p, struct audit_chunk, owners[0]);
214}
215
216static void untag_chunk(struct node *p)
74c3cbe3 217{
8f7b0ba1 218 struct audit_chunk *chunk = find_chunk(p);
e61ce867 219 struct fsnotify_mark *entry = &chunk->mark;
f7a998a9 220 struct audit_chunk *new = NULL;
74c3cbe3
AV
221 struct audit_tree *owner;
222 int size = chunk->count - 1;
223 int i, j;
224
28a3a7eb 225 fsnotify_get_mark(entry);
8f7b0ba1
AV
226
227 spin_unlock(&hash_lock);
228
f7a998a9
AV
229 if (size)
230 new = alloc_chunk(size);
231
28a3a7eb 232 spin_lock(&entry->lock);
2823e04d 233 if (chunk->dead || !entry->i.inode) {
28a3a7eb 234 spin_unlock(&entry->lock);
f7a998a9
AV
235 if (new)
236 free_chunk(new);
8f7b0ba1 237 goto out;
74c3cbe3
AV
238 }
239
240 owner = p->owner;
241
242 if (!size) {
243 chunk->dead = 1;
244 spin_lock(&hash_lock);
245 list_del_init(&chunk->trees);
246 if (owner->root == chunk)
247 owner->root = NULL;
248 list_del_init(&p->list);
249 list_del_rcu(&chunk->hash);
250 spin_unlock(&hash_lock);
28a3a7eb 251 spin_unlock(&entry->lock);
e2a29943 252 fsnotify_destroy_mark(entry, audit_tree_group);
8f7b0ba1 253 goto out;
74c3cbe3
AV
254 }
255
74c3cbe3
AV
256 if (!new)
257 goto Fallback;
f7a998a9 258
28a3a7eb 259 fsnotify_duplicate_mark(&new->mark, entry);
5444e298 260 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
0fe33aae 261 fsnotify_put_mark(&new->mark);
74c3cbe3
AV
262 goto Fallback;
263 }
264
265 chunk->dead = 1;
266 spin_lock(&hash_lock);
267 list_replace_init(&chunk->trees, &new->trees);
268 if (owner->root == chunk) {
269 list_del_init(&owner->same_root);
270 owner->root = NULL;
271 }
272
6f5d5114 273 for (i = j = 0; j <= size; i++, j++) {
74c3cbe3
AV
274 struct audit_tree *s;
275 if (&chunk->owners[j] == p) {
276 list_del_init(&p->list);
277 i--;
278 continue;
279 }
280 s = chunk->owners[j].owner;
281 new->owners[i].owner = s;
282 new->owners[i].index = chunk->owners[j].index - j + i;
283 if (!s) /* result of earlier fallback */
284 continue;
285 get_tree(s);
6f5d5114 286 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
74c3cbe3
AV
287 }
288
289 list_replace_rcu(&chunk->hash, &new->hash);
290 list_for_each_entry(owner, &new->trees, same_root)
291 owner->root = new;
292 spin_unlock(&hash_lock);
28a3a7eb 293 spin_unlock(&entry->lock);
e2a29943 294 fsnotify_destroy_mark(entry, audit_tree_group);
b3e8692b 295 fsnotify_put_mark(&new->mark); /* drop initial reference */
8f7b0ba1 296 goto out;
74c3cbe3
AV
297
298Fallback:
299 // do the best we can
300 spin_lock(&hash_lock);
301 if (owner->root == chunk) {
302 list_del_init(&owner->same_root);
303 owner->root = NULL;
304 }
305 list_del_init(&p->list);
306 p->owner = NULL;
307 put_tree(owner);
308 spin_unlock(&hash_lock);
28a3a7eb 309 spin_unlock(&entry->lock);
8f7b0ba1 310out:
28a3a7eb 311 fsnotify_put_mark(entry);
8f7b0ba1 312 spin_lock(&hash_lock);
74c3cbe3
AV
313}
314
315static int create_chunk(struct inode *inode, struct audit_tree *tree)
316{
e61ce867 317 struct fsnotify_mark *entry;
74c3cbe3
AV
318 struct audit_chunk *chunk = alloc_chunk(1);
319 if (!chunk)
320 return -ENOMEM;
321
28a3a7eb 322 entry = &chunk->mark;
5444e298 323 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
0fe33aae 324 fsnotify_put_mark(entry);
74c3cbe3
AV
325 return -ENOSPC;
326 }
327
28a3a7eb 328 spin_lock(&entry->lock);
74c3cbe3
AV
329 spin_lock(&hash_lock);
330 if (tree->goner) {
331 spin_unlock(&hash_lock);
332 chunk->dead = 1;
28a3a7eb 333 spin_unlock(&entry->lock);
e2a29943 334 fsnotify_destroy_mark(entry, audit_tree_group);
28a3a7eb 335 fsnotify_put_mark(entry);
74c3cbe3
AV
336 return 0;
337 }
338 chunk->owners[0].index = (1U << 31);
339 chunk->owners[0].owner = tree;
340 get_tree(tree);
341 list_add(&chunk->owners[0].list, &tree->chunks);
342 if (!tree->root) {
343 tree->root = chunk;
344 list_add(&tree->same_root, &chunk->trees);
345 }
346 insert_hash(chunk);
347 spin_unlock(&hash_lock);
28a3a7eb 348 spin_unlock(&entry->lock);
b3e8692b 349 fsnotify_put_mark(entry); /* drop initial reference */
74c3cbe3
AV
350 return 0;
351}
352
353/* the first tagged inode becomes root of tree */
354static int tag_chunk(struct inode *inode, struct audit_tree *tree)
355{
e61ce867 356 struct fsnotify_mark *old_entry, *chunk_entry;
74c3cbe3
AV
357 struct audit_tree *owner;
358 struct audit_chunk *chunk, *old;
359 struct node *p;
360 int n;
361
5444e298 362 old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
28a3a7eb 363 if (!old_entry)
74c3cbe3
AV
364 return create_chunk(inode, tree);
365
28a3a7eb 366 old = container_of(old_entry, struct audit_chunk, mark);
74c3cbe3
AV
367
368 /* are we already there? */
369 spin_lock(&hash_lock);
370 for (n = 0; n < old->count; n++) {
371 if (old->owners[n].owner == tree) {
372 spin_unlock(&hash_lock);
28a3a7eb 373 fsnotify_put_mark(old_entry);
74c3cbe3
AV
374 return 0;
375 }
376 }
377 spin_unlock(&hash_lock);
378
379 chunk = alloc_chunk(old->count + 1);
b4c30aad 380 if (!chunk) {
28a3a7eb 381 fsnotify_put_mark(old_entry);
74c3cbe3 382 return -ENOMEM;
b4c30aad 383 }
74c3cbe3 384
28a3a7eb
EP
385 chunk_entry = &chunk->mark;
386
387 spin_lock(&old_entry->lock);
2823e04d 388 if (!old_entry->i.inode) {
28a3a7eb
EP
389 /* old_entry is being shot, lets just lie */
390 spin_unlock(&old_entry->lock);
391 fsnotify_put_mark(old_entry);
74c3cbe3 392 free_chunk(chunk);
28a3a7eb
EP
393 return -ENOENT;
394 }
395
396 fsnotify_duplicate_mark(chunk_entry, old_entry);
5444e298 397 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
28a3a7eb 398 spin_unlock(&old_entry->lock);
0fe33aae 399 fsnotify_put_mark(chunk_entry);
28a3a7eb 400 fsnotify_put_mark(old_entry);
74c3cbe3
AV
401 return -ENOSPC;
402 }
28a3a7eb
EP
403
404 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
405 spin_lock(&chunk_entry->lock);
74c3cbe3 406 spin_lock(&hash_lock);
28a3a7eb
EP
407
408 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
74c3cbe3
AV
409 if (tree->goner) {
410 spin_unlock(&hash_lock);
411 chunk->dead = 1;
28a3a7eb
EP
412 spin_unlock(&chunk_entry->lock);
413 spin_unlock(&old_entry->lock);
414
e2a29943 415 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
28a3a7eb
EP
416
417 fsnotify_put_mark(chunk_entry);
418 fsnotify_put_mark(old_entry);
74c3cbe3
AV
419 return 0;
420 }
421 list_replace_init(&old->trees, &chunk->trees);
422 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
423 struct audit_tree *s = old->owners[n].owner;
424 p->owner = s;
425 p->index = old->owners[n].index;
426 if (!s) /* result of fallback in untag */
427 continue;
428 get_tree(s);
429 list_replace_init(&old->owners[n].list, &p->list);
430 }
431 p->index = (chunk->count - 1) | (1U<<31);
432 p->owner = tree;
433 get_tree(tree);
434 list_add(&p->list, &tree->chunks);
435 list_replace_rcu(&old->hash, &chunk->hash);
436 list_for_each_entry(owner, &chunk->trees, same_root)
437 owner->root = chunk;
438 old->dead = 1;
439 if (!tree->root) {
440 tree->root = chunk;
441 list_add(&tree->same_root, &chunk->trees);
442 }
443 spin_unlock(&hash_lock);
28a3a7eb
EP
444 spin_unlock(&chunk_entry->lock);
445 spin_unlock(&old_entry->lock);
e2a29943 446 fsnotify_destroy_mark(old_entry, audit_tree_group);
b3e8692b 447 fsnotify_put_mark(chunk_entry); /* drop initial reference */
28a3a7eb 448 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
74c3cbe3
AV
449 return 0;
450}
451
0644ec0c
KC
452static void audit_log_remove_rule(struct audit_krule *rule)
453{
454 struct audit_buffer *ab;
455
456 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
457 if (unlikely(!ab))
458 return;
459 audit_log_format(ab, "op=");
460 audit_log_string(ab, "remove rule");
461 audit_log_format(ab, " dir=");
462 audit_log_untrustedstring(ab, rule->tree->pathname);
463 audit_log_key(ab, rule->filterkey);
464 audit_log_format(ab, " list=%d res=1", rule->listnr);
465 audit_log_end(ab);
466}
467
74c3cbe3
AV
468static void kill_rules(struct audit_tree *tree)
469{
470 struct audit_krule *rule, *next;
471 struct audit_entry *entry;
74c3cbe3
AV
472
473 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
474 entry = container_of(rule, struct audit_entry, rule);
475
476 list_del_init(&rule->rlist);
477 if (rule->tree) {
478 /* not a half-baked one */
0644ec0c 479 audit_log_remove_rule(rule);
74c3cbe3
AV
480 rule->tree = NULL;
481 list_del_rcu(&entry->list);
e45aa212 482 list_del(&entry->rule.list);
74c3cbe3
AV
483 call_rcu(&entry->rcu, audit_free_rule_rcu);
484 }
485 }
486}
487
488/*
489 * finish killing struct audit_tree
490 */
491static void prune_one(struct audit_tree *victim)
492{
493 spin_lock(&hash_lock);
494 while (!list_empty(&victim->chunks)) {
495 struct node *p;
74c3cbe3
AV
496
497 p = list_entry(victim->chunks.next, struct node, list);
74c3cbe3 498
8f7b0ba1 499 untag_chunk(p);
74c3cbe3
AV
500 }
501 spin_unlock(&hash_lock);
502 put_tree(victim);
503}
504
505/* trim the uncommitted chunks from tree */
506
507static void trim_marked(struct audit_tree *tree)
508{
509 struct list_head *p, *q;
510 spin_lock(&hash_lock);
511 if (tree->goner) {
512 spin_unlock(&hash_lock);
513 return;
514 }
515 /* reorder */
516 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
517 struct node *node = list_entry(p, struct node, list);
518 q = p->next;
519 if (node->index & (1U<<31)) {
520 list_del_init(p);
521 list_add(p, &tree->chunks);
522 }
523 }
524
525 while (!list_empty(&tree->chunks)) {
526 struct node *node;
74c3cbe3
AV
527
528 node = list_entry(tree->chunks.next, struct node, list);
529
530 /* have we run out of marked? */
531 if (!(node->index & (1U<<31)))
532 break;
533
8f7b0ba1 534 untag_chunk(node);
74c3cbe3
AV
535 }
536 if (!tree->root && !tree->goner) {
537 tree->goner = 1;
538 spin_unlock(&hash_lock);
539 mutex_lock(&audit_filter_mutex);
540 kill_rules(tree);
541 list_del_init(&tree->list);
542 mutex_unlock(&audit_filter_mutex);
543 prune_one(tree);
544 } else {
545 spin_unlock(&hash_lock);
546 }
547}
548
916d7576
AV
549static void audit_schedule_prune(void);
550
74c3cbe3
AV
551/* called with audit_filter_mutex */
552int audit_remove_tree_rule(struct audit_krule *rule)
553{
554 struct audit_tree *tree;
555 tree = rule->tree;
556 if (tree) {
557 spin_lock(&hash_lock);
558 list_del_init(&rule->rlist);
559 if (list_empty(&tree->rules) && !tree->goner) {
560 tree->root = NULL;
561 list_del_init(&tree->same_root);
562 tree->goner = 1;
563 list_move(&tree->list, &prune_list);
564 rule->tree = NULL;
565 spin_unlock(&hash_lock);
566 audit_schedule_prune();
567 return 1;
568 }
569 rule->tree = NULL;
570 spin_unlock(&hash_lock);
571 return 1;
572 }
573 return 0;
574}
575
1f707137
AV
576static int compare_root(struct vfsmount *mnt, void *arg)
577{
578 return mnt->mnt_root->d_inode == arg;
579}
580
74c3cbe3
AV
581void audit_trim_trees(void)
582{
583 struct list_head cursor;
584
585 mutex_lock(&audit_filter_mutex);
586 list_add(&cursor, &tree_list);
587 while (cursor.next != &tree_list) {
588 struct audit_tree *tree;
98bc993f 589 struct path path;
74c3cbe3
AV
590 struct vfsmount *root_mnt;
591 struct node *node;
74c3cbe3
AV
592 int err;
593
594 tree = container_of(cursor.next, struct audit_tree, list);
595 get_tree(tree);
596 list_del(&cursor);
597 list_add(&cursor, &tree->list);
598 mutex_unlock(&audit_filter_mutex);
599
98bc993f 600 err = kern_path(tree->pathname, 0, &path);
74c3cbe3
AV
601 if (err)
602 goto skip_it;
603
589ff870 604 root_mnt = collect_mounts(&path);
98bc993f 605 path_put(&path);
be34d1a3 606 if (IS_ERR(root_mnt))
74c3cbe3
AV
607 goto skip_it;
608
74c3cbe3
AV
609 spin_lock(&hash_lock);
610 list_for_each_entry(node, &tree->chunks, list) {
28a3a7eb 611 struct audit_chunk *chunk = find_chunk(node);
25985edc 612 /* this could be NULL if the watch is dying else where... */
2823e04d 613 struct inode *inode = chunk->mark.i.inode;
74c3cbe3 614 node->index |= 1U<<31;
1f707137
AV
615 if (iterate_mounts(compare_root, inode, root_mnt))
616 node->index &= ~(1U<<31);
74c3cbe3
AV
617 }
618 spin_unlock(&hash_lock);
619 trim_marked(tree);
74c3cbe3
AV
620 drop_collected_mounts(root_mnt);
621skip_it:
12b2f117 622 put_tree(tree);
74c3cbe3
AV
623 mutex_lock(&audit_filter_mutex);
624 }
625 list_del(&cursor);
626 mutex_unlock(&audit_filter_mutex);
627}
628
74c3cbe3
AV
629int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
630{
631
632 if (pathname[0] != '/' ||
633 rule->listnr != AUDIT_FILTER_EXIT ||
5af75d8d 634 op != Audit_equal ||
74c3cbe3
AV
635 rule->inode_f || rule->watch || rule->tree)
636 return -EINVAL;
637 rule->tree = alloc_tree(pathname);
638 if (!rule->tree)
639 return -ENOMEM;
640 return 0;
641}
642
643void audit_put_tree(struct audit_tree *tree)
644{
645 put_tree(tree);
646}
647
1f707137
AV
648static int tag_mount(struct vfsmount *mnt, void *arg)
649{
650 return tag_chunk(mnt->mnt_root->d_inode, arg);
651}
652
74c3cbe3
AV
653/* called with audit_filter_mutex */
654int audit_add_tree_rule(struct audit_krule *rule)
655{
656 struct audit_tree *seed = rule->tree, *tree;
98bc993f 657 struct path path;
1f707137 658 struct vfsmount *mnt;
74c3cbe3
AV
659 int err;
660
661 list_for_each_entry(tree, &tree_list, list) {
662 if (!strcmp(seed->pathname, tree->pathname)) {
663 put_tree(seed);
664 rule->tree = tree;
665 list_add(&rule->rlist, &tree->rules);
666 return 0;
667 }
668 }
669 tree = seed;
670 list_add(&tree->list, &tree_list);
671 list_add(&rule->rlist, &tree->rules);
672 /* do not set rule->tree yet */
673 mutex_unlock(&audit_filter_mutex);
674
98bc993f 675 err = kern_path(tree->pathname, 0, &path);
74c3cbe3
AV
676 if (err)
677 goto Err;
589ff870 678 mnt = collect_mounts(&path);
98bc993f 679 path_put(&path);
be34d1a3
DH
680 if (IS_ERR(mnt)) {
681 err = PTR_ERR(mnt);
74c3cbe3
AV
682 goto Err;
683 }
74c3cbe3
AV
684
685 get_tree(tree);
1f707137 686 err = iterate_mounts(tag_mount, tree, mnt);
74c3cbe3
AV
687 drop_collected_mounts(mnt);
688
689 if (!err) {
690 struct node *node;
691 spin_lock(&hash_lock);
692 list_for_each_entry(node, &tree->chunks, list)
693 node->index &= ~(1U<<31);
694 spin_unlock(&hash_lock);
695 } else {
696 trim_marked(tree);
697 goto Err;
698 }
699
700 mutex_lock(&audit_filter_mutex);
701 if (list_empty(&rule->rlist)) {
702 put_tree(tree);
703 return -ENOENT;
704 }
705 rule->tree = tree;
706 put_tree(tree);
707
708 return 0;
709Err:
710 mutex_lock(&audit_filter_mutex);
711 list_del_init(&tree->list);
712 list_del_init(&tree->rules);
713 put_tree(tree);
714 return err;
715}
716
717int audit_tag_tree(char *old, char *new)
718{
719 struct list_head cursor, barrier;
720 int failed = 0;
2096f759 721 struct path path1, path2;
74c3cbe3 722 struct vfsmount *tagged;
74c3cbe3
AV
723 int err;
724
2096f759 725 err = kern_path(new, 0, &path2);
74c3cbe3
AV
726 if (err)
727 return err;
2096f759
AV
728 tagged = collect_mounts(&path2);
729 path_put(&path2);
be34d1a3
DH
730 if (IS_ERR(tagged))
731 return PTR_ERR(tagged);
74c3cbe3 732
2096f759 733 err = kern_path(old, 0, &path1);
74c3cbe3
AV
734 if (err) {
735 drop_collected_mounts(tagged);
736 return err;
737 }
74c3cbe3 738
74c3cbe3
AV
739 mutex_lock(&audit_filter_mutex);
740 list_add(&barrier, &tree_list);
741 list_add(&cursor, &barrier);
742
743 while (cursor.next != &tree_list) {
744 struct audit_tree *tree;
2096f759 745 int good_one = 0;
74c3cbe3
AV
746
747 tree = container_of(cursor.next, struct audit_tree, list);
748 get_tree(tree);
749 list_del(&cursor);
750 list_add(&cursor, &tree->list);
751 mutex_unlock(&audit_filter_mutex);
752
2096f759
AV
753 err = kern_path(tree->pathname, 0, &path2);
754 if (!err) {
755 good_one = path_is_under(&path1, &path2);
756 path_put(&path2);
74c3cbe3
AV
757 }
758
2096f759 759 if (!good_one) {
74c3cbe3
AV
760 put_tree(tree);
761 mutex_lock(&audit_filter_mutex);
762 continue;
763 }
74c3cbe3 764
1f707137 765 failed = iterate_mounts(tag_mount, tree, tagged);
74c3cbe3
AV
766 if (failed) {
767 put_tree(tree);
768 mutex_lock(&audit_filter_mutex);
769 break;
770 }
771
772 mutex_lock(&audit_filter_mutex);
773 spin_lock(&hash_lock);
774 if (!tree->goner) {
775 list_del(&tree->list);
776 list_add(&tree->list, &tree_list);
777 }
778 spin_unlock(&hash_lock);
779 put_tree(tree);
780 }
781
782 while (barrier.prev != &tree_list) {
783 struct audit_tree *tree;
784
785 tree = container_of(barrier.prev, struct audit_tree, list);
786 get_tree(tree);
787 list_del(&tree->list);
788 list_add(&tree->list, &barrier);
789 mutex_unlock(&audit_filter_mutex);
790
791 if (!failed) {
792 struct node *node;
793 spin_lock(&hash_lock);
794 list_for_each_entry(node, &tree->chunks, list)
795 node->index &= ~(1U<<31);
796 spin_unlock(&hash_lock);
797 } else {
798 trim_marked(tree);
799 }
800
801 put_tree(tree);
802 mutex_lock(&audit_filter_mutex);
803 }
804 list_del(&barrier);
805 list_del(&cursor);
74c3cbe3 806 mutex_unlock(&audit_filter_mutex);
2096f759 807 path_put(&path1);
74c3cbe3
AV
808 drop_collected_mounts(tagged);
809 return failed;
810}
811
812/*
813 * That gets run when evict_chunk() ends up needing to kill audit_tree.
916d7576 814 * Runs from a separate thread.
74c3cbe3 815 */
916d7576 816static int prune_tree_thread(void *unused)
74c3cbe3 817{
916d7576 818 mutex_lock(&audit_cmd_mutex);
74c3cbe3
AV
819 mutex_lock(&audit_filter_mutex);
820
821 while (!list_empty(&prune_list)) {
822 struct audit_tree *victim;
823
824 victim = list_entry(prune_list.next, struct audit_tree, list);
825 list_del_init(&victim->list);
826
827 mutex_unlock(&audit_filter_mutex);
828
829 prune_one(victim);
830
831 mutex_lock(&audit_filter_mutex);
832 }
833
834 mutex_unlock(&audit_filter_mutex);
916d7576
AV
835 mutex_unlock(&audit_cmd_mutex);
836 return 0;
837}
838
839static void audit_schedule_prune(void)
840{
841 kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
842}
843
844/*
845 * ... and that one is done if evict_chunk() decides to delay until the end
846 * of syscall. Runs synchronously.
847 */
848void audit_kill_trees(struct list_head *list)
849{
850 mutex_lock(&audit_cmd_mutex);
851 mutex_lock(&audit_filter_mutex);
852
853 while (!list_empty(list)) {
854 struct audit_tree *victim;
855
856 victim = list_entry(list->next, struct audit_tree, list);
857 kill_rules(victim);
858 list_del_init(&victim->list);
859
860 mutex_unlock(&audit_filter_mutex);
861
862 prune_one(victim);
863
864 mutex_lock(&audit_filter_mutex);
865 }
866
867 mutex_unlock(&audit_filter_mutex);
868 mutex_unlock(&audit_cmd_mutex);
74c3cbe3
AV
869}
870
871/*
872 * Here comes the stuff asynchronous to auditctl operations
873 */
874
74c3cbe3
AV
875static void evict_chunk(struct audit_chunk *chunk)
876{
877 struct audit_tree *owner;
916d7576
AV
878 struct list_head *postponed = audit_killed_trees();
879 int need_prune = 0;
74c3cbe3
AV
880 int n;
881
882 if (chunk->dead)
883 return;
884
885 chunk->dead = 1;
886 mutex_lock(&audit_filter_mutex);
887 spin_lock(&hash_lock);
888 while (!list_empty(&chunk->trees)) {
889 owner = list_entry(chunk->trees.next,
890 struct audit_tree, same_root);
891 owner->goner = 1;
892 owner->root = NULL;
893 list_del_init(&owner->same_root);
894 spin_unlock(&hash_lock);
916d7576
AV
895 if (!postponed) {
896 kill_rules(owner);
897 list_move(&owner->list, &prune_list);
898 need_prune = 1;
899 } else {
900 list_move(&owner->list, postponed);
901 }
74c3cbe3
AV
902 spin_lock(&hash_lock);
903 }
904 list_del_rcu(&chunk->hash);
905 for (n = 0; n < chunk->count; n++)
906 list_del_init(&chunk->owners[n].list);
907 spin_unlock(&hash_lock);
916d7576
AV
908 if (need_prune)
909 audit_schedule_prune();
74c3cbe3
AV
910 mutex_unlock(&audit_filter_mutex);
911}
912
3a9b16b4 913static int audit_tree_handle_event(struct fsnotify_group *group,
ce8f76fb
EP
914 struct fsnotify_mark *inode_mark,
915 struct fsnotify_mark *vfsmonut_mark,
3a9b16b4 916 struct fsnotify_event *event)
74c3cbe3 917{
28a3a7eb
EP
918 BUG();
919 return -EOPNOTSUPP;
920}
74c3cbe3 921
e61ce867 922static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
28a3a7eb
EP
923{
924 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
925
926 evict_chunk(chunk);
b3e8692b
MS
927
928 /*
929 * We are guaranteed to have at least one reference to the mark from
930 * either the inode or the caller of fsnotify_destroy_mark().
931 */
932 BUG_ON(atomic_read(&entry->refcnt) < 1);
74c3cbe3
AV
933}
934
7b0a04fb 935static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
1968f5ee 936 struct fsnotify_mark *inode_mark,
ce8f76fb 937 struct fsnotify_mark *vfsmount_mark,
3a9b16b4 938 __u32 mask, void *data, int data_type)
74c3cbe3 939{
2612abb5 940 return false;
74c3cbe3
AV
941}
942
28a3a7eb
EP
943static const struct fsnotify_ops audit_tree_ops = {
944 .handle_event = audit_tree_handle_event,
945 .should_send_event = audit_tree_send_event,
946 .free_group_priv = NULL,
947 .free_event_priv = NULL,
948 .freeing_mark = audit_tree_freeing_mark,
74c3cbe3
AV
949};
950
951static int __init audit_tree_init(void)
952{
953 int i;
954
0d2e2a1d 955 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
28a3a7eb
EP
956 if (IS_ERR(audit_tree_group))
957 audit_panic("cannot initialize fsnotify group for rectree watches");
74c3cbe3
AV
958
959 for (i = 0; i < HASH_SIZE; i++)
960 INIT_LIST_HEAD(&chunk_hash_heads[i]);
961
962 return 0;
963}
964__initcall(audit_tree_init);