Commit | Line | Data |
---|---|---|
74c3cbe3 | 1 | #include "audit.h" |
28a3a7eb | 2 | #include <linux/fsnotify_backend.h> |
74c3cbe3 AV |
3 | #include <linux/namei.h> |
4 | #include <linux/mount.h> | |
916d7576 | 5 | #include <linux/kthread.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
74c3cbe3 AV |
7 | |
8 | struct audit_tree; | |
9 | struct audit_chunk; | |
10 | ||
11 | struct audit_tree { | |
12 | atomic_t count; | |
13 | int goner; | |
14 | struct audit_chunk *root; | |
15 | struct list_head chunks; | |
16 | struct list_head rules; | |
17 | struct list_head list; | |
18 | struct list_head same_root; | |
19 | struct rcu_head head; | |
20 | char pathname[]; | |
21 | }; | |
22 | ||
23 | struct audit_chunk { | |
24 | struct list_head hash; | |
e61ce867 | 25 | struct fsnotify_mark mark; |
74c3cbe3 AV |
26 | struct list_head trees; /* with root here */ |
27 | int dead; | |
28 | int count; | |
8f7b0ba1 | 29 | atomic_long_t refs; |
74c3cbe3 AV |
30 | struct rcu_head head; |
31 | struct node { | |
32 | struct list_head list; | |
33 | struct audit_tree *owner; | |
34 | unsigned index; /* index; upper bit indicates 'will prune' */ | |
35 | } owners[]; | |
36 | }; | |
37 | ||
38 | static LIST_HEAD(tree_list); | |
39 | static LIST_HEAD(prune_list); | |
f1aaf262 | 40 | static struct task_struct *prune_thread; |
74c3cbe3 AV |
41 | |
42 | /* | |
43 | * One struct chunk is attached to each inode of interest. | |
44 | * We replace struct chunk on tagging/untagging. | |
45 | * Rules have pointer to struct audit_tree. | |
46 | * Rules have struct list_head rlist forming a list of rules over | |
47 | * the same tree. | |
48 | * References to struct chunk are collected at audit_inode{,_child}() | |
49 | * time and used in AUDIT_TREE rule matching. | |
50 | * These references are dropped at the same time we are calling | |
51 | * audit_free_names(), etc. | |
52 | * | |
53 | * Cyclic lists galore: | |
54 | * tree.chunks anchors chunk.owners[].list hash_lock | |
55 | * tree.rules anchors rule.rlist audit_filter_mutex | |
56 | * chunk.trees anchors tree.same_root hash_lock | |
57 | * chunk.hash is a hash with middle bits of watch.inode as | |
58 | * a hash function. RCU, hash_lock | |
59 | * | |
60 | * tree is refcounted; one reference for "some rules on rules_list refer to | |
61 | * it", one for each chunk with pointer to it. | |
62 | * | |
28a3a7eb | 63 | * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount |
8f7b0ba1 | 64 | * of watch contributes 1 to .refs). |
74c3cbe3 AV |
65 | * |
66 | * node.index allows to get from node.list to containing chunk. | |
67 | * MSB of that sucker is stolen to mark taggings that we might have to | |
68 | * revert - several operations have very unpleasant cleanup logics and | |
69 | * that makes a difference. Some. | |
70 | */ | |
71 | ||
28a3a7eb | 72 | static struct fsnotify_group *audit_tree_group; |
74c3cbe3 AV |
73 | |
74 | static struct audit_tree *alloc_tree(const char *s) | |
75 | { | |
76 | struct audit_tree *tree; | |
77 | ||
78 | tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); | |
79 | if (tree) { | |
80 | atomic_set(&tree->count, 1); | |
81 | tree->goner = 0; | |
82 | INIT_LIST_HEAD(&tree->chunks); | |
83 | INIT_LIST_HEAD(&tree->rules); | |
84 | INIT_LIST_HEAD(&tree->list); | |
85 | INIT_LIST_HEAD(&tree->same_root); | |
86 | tree->root = NULL; | |
87 | strcpy(tree->pathname, s); | |
88 | } | |
89 | return tree; | |
90 | } | |
91 | ||
92 | static inline void get_tree(struct audit_tree *tree) | |
93 | { | |
94 | atomic_inc(&tree->count); | |
95 | } | |
96 | ||
74c3cbe3 AV |
97 | static inline void put_tree(struct audit_tree *tree) |
98 | { | |
99 | if (atomic_dec_and_test(&tree->count)) | |
3b097c46 | 100 | kfree_rcu(tree, head); |
74c3cbe3 AV |
101 | } |
102 | ||
103 | /* to avoid bringing the entire thing in audit.h */ | |
104 | const char *audit_tree_path(struct audit_tree *tree) | |
105 | { | |
106 | return tree->pathname; | |
107 | } | |
108 | ||
8f7b0ba1 | 109 | static void free_chunk(struct audit_chunk *chunk) |
74c3cbe3 | 110 | { |
74c3cbe3 AV |
111 | int i; |
112 | ||
113 | for (i = 0; i < chunk->count; i++) { | |
114 | if (chunk->owners[i].owner) | |
115 | put_tree(chunk->owners[i].owner); | |
116 | } | |
117 | kfree(chunk); | |
118 | } | |
119 | ||
8f7b0ba1 | 120 | void audit_put_chunk(struct audit_chunk *chunk) |
74c3cbe3 | 121 | { |
8f7b0ba1 AV |
122 | if (atomic_long_dec_and_test(&chunk->refs)) |
123 | free_chunk(chunk); | |
74c3cbe3 AV |
124 | } |
125 | ||
8f7b0ba1 | 126 | static void __put_chunk(struct rcu_head *rcu) |
74c3cbe3 | 127 | { |
8f7b0ba1 AV |
128 | struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); |
129 | audit_put_chunk(chunk); | |
74c3cbe3 AV |
130 | } |
131 | ||
e61ce867 | 132 | static void audit_tree_destroy_watch(struct fsnotify_mark *entry) |
28a3a7eb EP |
133 | { |
134 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); | |
135 | call_rcu(&chunk->head, __put_chunk); | |
136 | } | |
137 | ||
138 | static struct audit_chunk *alloc_chunk(int count) | |
139 | { | |
140 | struct audit_chunk *chunk; | |
141 | size_t size; | |
142 | int i; | |
143 | ||
144 | size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); | |
145 | chunk = kzalloc(size, GFP_KERNEL); | |
146 | if (!chunk) | |
147 | return NULL; | |
148 | ||
149 | INIT_LIST_HEAD(&chunk->hash); | |
150 | INIT_LIST_HEAD(&chunk->trees); | |
151 | chunk->count = count; | |
152 | atomic_long_set(&chunk->refs, 1); | |
153 | for (i = 0; i < count; i++) { | |
154 | INIT_LIST_HEAD(&chunk->owners[i].list); | |
155 | chunk->owners[i].index = i; | |
156 | } | |
157 | fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); | |
799b6014 | 158 | chunk->mark.mask = FS_IN_IGNORED; |
28a3a7eb EP |
159 | return chunk; |
160 | } | |
161 | ||
74c3cbe3 AV |
162 | enum {HASH_SIZE = 128}; |
163 | static struct list_head chunk_hash_heads[HASH_SIZE]; | |
164 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); | |
165 | ||
f410ff65 JK |
166 | /* Function to return search key in our hash from inode. */ |
167 | static unsigned long inode_to_key(const struct inode *inode) | |
74c3cbe3 | 168 | { |
f410ff65 JK |
169 | return (unsigned long)inode; |
170 | } | |
171 | ||
172 | /* | |
173 | * Function to return search key in our hash from chunk. Key 0 is special and | |
174 | * should never be present in the hash. | |
175 | */ | |
176 | static unsigned long chunk_to_key(struct audit_chunk *chunk) | |
177 | { | |
178 | return (unsigned long)chunk->mark.inode; | |
179 | } | |
180 | ||
181 | static inline struct list_head *chunk_hash(unsigned long key) | |
182 | { | |
183 | unsigned long n = key / L1_CACHE_BYTES; | |
74c3cbe3 AV |
184 | return chunk_hash_heads + n % HASH_SIZE; |
185 | } | |
186 | ||
28a3a7eb | 187 | /* hash_lock & entry->lock is held by caller */ |
74c3cbe3 AV |
188 | static void insert_hash(struct audit_chunk *chunk) |
189 | { | |
f410ff65 | 190 | unsigned long key = chunk_to_key(chunk); |
28a3a7eb EP |
191 | struct list_head *list; |
192 | ||
f410ff65 | 193 | if (!key) |
28a3a7eb | 194 | return; |
f410ff65 | 195 | list = chunk_hash(key); |
74c3cbe3 AV |
196 | list_add_rcu(&chunk->hash, list); |
197 | } | |
198 | ||
199 | /* called under rcu_read_lock */ | |
200 | struct audit_chunk *audit_tree_lookup(const struct inode *inode) | |
201 | { | |
f410ff65 JK |
202 | unsigned long key = inode_to_key(inode); |
203 | struct list_head *list = chunk_hash(key); | |
6793a051 | 204 | struct audit_chunk *p; |
74c3cbe3 | 205 | |
6793a051 | 206 | list_for_each_entry_rcu(p, list, hash) { |
f410ff65 | 207 | if (chunk_to_key(p) == key) { |
8f7b0ba1 | 208 | atomic_long_inc(&p->refs); |
74c3cbe3 AV |
209 | return p; |
210 | } | |
211 | } | |
212 | return NULL; | |
213 | } | |
214 | ||
6f1b5d7a | 215 | bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) |
74c3cbe3 AV |
216 | { |
217 | int n; | |
218 | for (n = 0; n < chunk->count; n++) | |
219 | if (chunk->owners[n].owner == tree) | |
6f1b5d7a YB |
220 | return true; |
221 | return false; | |
74c3cbe3 AV |
222 | } |
223 | ||
224 | /* tagging and untagging inodes with trees */ | |
225 | ||
8f7b0ba1 AV |
226 | static struct audit_chunk *find_chunk(struct node *p) |
227 | { | |
228 | int index = p->index & ~(1U<<31); | |
229 | p -= index; | |
230 | return container_of(p, struct audit_chunk, owners[0]); | |
231 | } | |
232 | ||
233 | static void untag_chunk(struct node *p) | |
74c3cbe3 | 234 | { |
8f7b0ba1 | 235 | struct audit_chunk *chunk = find_chunk(p); |
e61ce867 | 236 | struct fsnotify_mark *entry = &chunk->mark; |
f7a998a9 | 237 | struct audit_chunk *new = NULL; |
74c3cbe3 AV |
238 | struct audit_tree *owner; |
239 | int size = chunk->count - 1; | |
240 | int i, j; | |
241 | ||
28a3a7eb | 242 | fsnotify_get_mark(entry); |
8f7b0ba1 AV |
243 | |
244 | spin_unlock(&hash_lock); | |
245 | ||
f7a998a9 AV |
246 | if (size) |
247 | new = alloc_chunk(size); | |
248 | ||
be29d20f | 249 | mutex_lock(&entry->group->mark_mutex); |
28a3a7eb | 250 | spin_lock(&entry->lock); |
0809ab69 | 251 | if (chunk->dead || !entry->inode) { |
28a3a7eb | 252 | spin_unlock(&entry->lock); |
be29d20f | 253 | mutex_unlock(&entry->group->mark_mutex); |
f7a998a9 AV |
254 | if (new) |
255 | free_chunk(new); | |
8f7b0ba1 | 256 | goto out; |
74c3cbe3 AV |
257 | } |
258 | ||
259 | owner = p->owner; | |
260 | ||
261 | if (!size) { | |
262 | chunk->dead = 1; | |
263 | spin_lock(&hash_lock); | |
264 | list_del_init(&chunk->trees); | |
265 | if (owner->root == chunk) | |
266 | owner->root = NULL; | |
267 | list_del_init(&p->list); | |
268 | list_del_rcu(&chunk->hash); | |
269 | spin_unlock(&hash_lock); | |
28a3a7eb | 270 | spin_unlock(&entry->lock); |
be29d20f | 271 | mutex_unlock(&entry->group->mark_mutex); |
e2a29943 | 272 | fsnotify_destroy_mark(entry, audit_tree_group); |
8f7b0ba1 | 273 | goto out; |
74c3cbe3 AV |
274 | } |
275 | ||
74c3cbe3 AV |
276 | if (!new) |
277 | goto Fallback; | |
f7a998a9 | 278 | |
be29d20f JK |
279 | if (fsnotify_add_mark_locked(&new->mark, entry->group, entry->inode, |
280 | NULL, 1)) { | |
0fe33aae | 281 | fsnotify_put_mark(&new->mark); |
74c3cbe3 AV |
282 | goto Fallback; |
283 | } | |
284 | ||
285 | chunk->dead = 1; | |
286 | spin_lock(&hash_lock); | |
287 | list_replace_init(&chunk->trees, &new->trees); | |
288 | if (owner->root == chunk) { | |
289 | list_del_init(&owner->same_root); | |
290 | owner->root = NULL; | |
291 | } | |
292 | ||
6f5d5114 | 293 | for (i = j = 0; j <= size; i++, j++) { |
74c3cbe3 AV |
294 | struct audit_tree *s; |
295 | if (&chunk->owners[j] == p) { | |
296 | list_del_init(&p->list); | |
297 | i--; | |
298 | continue; | |
299 | } | |
300 | s = chunk->owners[j].owner; | |
301 | new->owners[i].owner = s; | |
302 | new->owners[i].index = chunk->owners[j].index - j + i; | |
303 | if (!s) /* result of earlier fallback */ | |
304 | continue; | |
305 | get_tree(s); | |
6f5d5114 | 306 | list_replace_init(&chunk->owners[j].list, &new->owners[i].list); |
74c3cbe3 AV |
307 | } |
308 | ||
309 | list_replace_rcu(&chunk->hash, &new->hash); | |
310 | list_for_each_entry(owner, &new->trees, same_root) | |
311 | owner->root = new; | |
312 | spin_unlock(&hash_lock); | |
28a3a7eb | 313 | spin_unlock(&entry->lock); |
be29d20f | 314 | mutex_unlock(&entry->group->mark_mutex); |
e2a29943 | 315 | fsnotify_destroy_mark(entry, audit_tree_group); |
b3e8692b | 316 | fsnotify_put_mark(&new->mark); /* drop initial reference */ |
8f7b0ba1 | 317 | goto out; |
74c3cbe3 AV |
318 | |
319 | Fallback: | |
320 | // do the best we can | |
321 | spin_lock(&hash_lock); | |
322 | if (owner->root == chunk) { | |
323 | list_del_init(&owner->same_root); | |
324 | owner->root = NULL; | |
325 | } | |
326 | list_del_init(&p->list); | |
327 | p->owner = NULL; | |
328 | put_tree(owner); | |
329 | spin_unlock(&hash_lock); | |
28a3a7eb | 330 | spin_unlock(&entry->lock); |
be29d20f | 331 | mutex_unlock(&entry->group->mark_mutex); |
8f7b0ba1 | 332 | out: |
28a3a7eb | 333 | fsnotify_put_mark(entry); |
8f7b0ba1 | 334 | spin_lock(&hash_lock); |
74c3cbe3 AV |
335 | } |
336 | ||
337 | static int create_chunk(struct inode *inode, struct audit_tree *tree) | |
338 | { | |
e61ce867 | 339 | struct fsnotify_mark *entry; |
74c3cbe3 AV |
340 | struct audit_chunk *chunk = alloc_chunk(1); |
341 | if (!chunk) | |
342 | return -ENOMEM; | |
343 | ||
28a3a7eb | 344 | entry = &chunk->mark; |
5444e298 | 345 | if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { |
0fe33aae | 346 | fsnotify_put_mark(entry); |
74c3cbe3 AV |
347 | return -ENOSPC; |
348 | } | |
349 | ||
28a3a7eb | 350 | spin_lock(&entry->lock); |
74c3cbe3 AV |
351 | spin_lock(&hash_lock); |
352 | if (tree->goner) { | |
353 | spin_unlock(&hash_lock); | |
354 | chunk->dead = 1; | |
28a3a7eb | 355 | spin_unlock(&entry->lock); |
e2a29943 | 356 | fsnotify_destroy_mark(entry, audit_tree_group); |
28a3a7eb | 357 | fsnotify_put_mark(entry); |
74c3cbe3 AV |
358 | return 0; |
359 | } | |
360 | chunk->owners[0].index = (1U << 31); | |
361 | chunk->owners[0].owner = tree; | |
362 | get_tree(tree); | |
363 | list_add(&chunk->owners[0].list, &tree->chunks); | |
364 | if (!tree->root) { | |
365 | tree->root = chunk; | |
366 | list_add(&tree->same_root, &chunk->trees); | |
367 | } | |
368 | insert_hash(chunk); | |
369 | spin_unlock(&hash_lock); | |
28a3a7eb | 370 | spin_unlock(&entry->lock); |
b3e8692b | 371 | fsnotify_put_mark(entry); /* drop initial reference */ |
74c3cbe3 AV |
372 | return 0; |
373 | } | |
374 | ||
375 | /* the first tagged inode becomes root of tree */ | |
376 | static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |
377 | { | |
e61ce867 | 378 | struct fsnotify_mark *old_entry, *chunk_entry; |
74c3cbe3 AV |
379 | struct audit_tree *owner; |
380 | struct audit_chunk *chunk, *old; | |
381 | struct node *p; | |
382 | int n; | |
383 | ||
5444e298 | 384 | old_entry = fsnotify_find_inode_mark(audit_tree_group, inode); |
28a3a7eb | 385 | if (!old_entry) |
74c3cbe3 AV |
386 | return create_chunk(inode, tree); |
387 | ||
28a3a7eb | 388 | old = container_of(old_entry, struct audit_chunk, mark); |
74c3cbe3 AV |
389 | |
390 | /* are we already there? */ | |
391 | spin_lock(&hash_lock); | |
392 | for (n = 0; n < old->count; n++) { | |
393 | if (old->owners[n].owner == tree) { | |
394 | spin_unlock(&hash_lock); | |
28a3a7eb | 395 | fsnotify_put_mark(old_entry); |
74c3cbe3 AV |
396 | return 0; |
397 | } | |
398 | } | |
399 | spin_unlock(&hash_lock); | |
400 | ||
401 | chunk = alloc_chunk(old->count + 1); | |
b4c30aad | 402 | if (!chunk) { |
28a3a7eb | 403 | fsnotify_put_mark(old_entry); |
74c3cbe3 | 404 | return -ENOMEM; |
b4c30aad | 405 | } |
74c3cbe3 | 406 | |
28a3a7eb EP |
407 | chunk_entry = &chunk->mark; |
408 | ||
be29d20f | 409 | mutex_lock(&old_entry->group->mark_mutex); |
28a3a7eb | 410 | spin_lock(&old_entry->lock); |
0809ab69 | 411 | if (!old_entry->inode) { |
28a3a7eb EP |
412 | /* old_entry is being shot, lets just lie */ |
413 | spin_unlock(&old_entry->lock); | |
be29d20f | 414 | mutex_unlock(&old_entry->group->mark_mutex); |
28a3a7eb | 415 | fsnotify_put_mark(old_entry); |
74c3cbe3 | 416 | free_chunk(chunk); |
28a3a7eb EP |
417 | return -ENOENT; |
418 | } | |
419 | ||
be29d20f JK |
420 | if (fsnotify_add_mark_locked(chunk_entry, old_entry->group, |
421 | old_entry->inode, NULL, 1)) { | |
28a3a7eb | 422 | spin_unlock(&old_entry->lock); |
be29d20f | 423 | mutex_unlock(&old_entry->group->mark_mutex); |
0fe33aae | 424 | fsnotify_put_mark(chunk_entry); |
28a3a7eb | 425 | fsnotify_put_mark(old_entry); |
74c3cbe3 AV |
426 | return -ENOSPC; |
427 | } | |
28a3a7eb EP |
428 | |
429 | /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */ | |
430 | spin_lock(&chunk_entry->lock); | |
74c3cbe3 | 431 | spin_lock(&hash_lock); |
28a3a7eb EP |
432 | |
433 | /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */ | |
74c3cbe3 AV |
434 | if (tree->goner) { |
435 | spin_unlock(&hash_lock); | |
436 | chunk->dead = 1; | |
28a3a7eb EP |
437 | spin_unlock(&chunk_entry->lock); |
438 | spin_unlock(&old_entry->lock); | |
be29d20f | 439 | mutex_unlock(&old_entry->group->mark_mutex); |
28a3a7eb | 440 | |
e2a29943 | 441 | fsnotify_destroy_mark(chunk_entry, audit_tree_group); |
28a3a7eb EP |
442 | |
443 | fsnotify_put_mark(chunk_entry); | |
444 | fsnotify_put_mark(old_entry); | |
74c3cbe3 AV |
445 | return 0; |
446 | } | |
447 | list_replace_init(&old->trees, &chunk->trees); | |
448 | for (n = 0, p = chunk->owners; n < old->count; n++, p++) { | |
449 | struct audit_tree *s = old->owners[n].owner; | |
450 | p->owner = s; | |
451 | p->index = old->owners[n].index; | |
452 | if (!s) /* result of fallback in untag */ | |
453 | continue; | |
454 | get_tree(s); | |
455 | list_replace_init(&old->owners[n].list, &p->list); | |
456 | } | |
457 | p->index = (chunk->count - 1) | (1U<<31); | |
458 | p->owner = tree; | |
459 | get_tree(tree); | |
460 | list_add(&p->list, &tree->chunks); | |
461 | list_replace_rcu(&old->hash, &chunk->hash); | |
462 | list_for_each_entry(owner, &chunk->trees, same_root) | |
463 | owner->root = chunk; | |
464 | old->dead = 1; | |
465 | if (!tree->root) { | |
466 | tree->root = chunk; | |
467 | list_add(&tree->same_root, &chunk->trees); | |
468 | } | |
469 | spin_unlock(&hash_lock); | |
28a3a7eb EP |
470 | spin_unlock(&chunk_entry->lock); |
471 | spin_unlock(&old_entry->lock); | |
be29d20f | 472 | mutex_unlock(&old_entry->group->mark_mutex); |
e2a29943 | 473 | fsnotify_destroy_mark(old_entry, audit_tree_group); |
b3e8692b | 474 | fsnotify_put_mark(chunk_entry); /* drop initial reference */ |
28a3a7eb | 475 | fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ |
74c3cbe3 AV |
476 | return 0; |
477 | } | |
478 | ||
2991dd2b | 479 | static void audit_tree_log_remove_rule(struct audit_krule *rule) |
0644ec0c KC |
480 | { |
481 | struct audit_buffer *ab; | |
482 | ||
483 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | |
484 | if (unlikely(!ab)) | |
485 | return; | |
c1e8f06d | 486 | audit_log_format(ab, "op=remove_rule"); |
0644ec0c KC |
487 | audit_log_format(ab, " dir="); |
488 | audit_log_untrustedstring(ab, rule->tree->pathname); | |
489 | audit_log_key(ab, rule->filterkey); | |
490 | audit_log_format(ab, " list=%d res=1", rule->listnr); | |
491 | audit_log_end(ab); | |
492 | } | |
493 | ||
74c3cbe3 AV |
494 | static void kill_rules(struct audit_tree *tree) |
495 | { | |
496 | struct audit_krule *rule, *next; | |
497 | struct audit_entry *entry; | |
74c3cbe3 AV |
498 | |
499 | list_for_each_entry_safe(rule, next, &tree->rules, rlist) { | |
500 | entry = container_of(rule, struct audit_entry, rule); | |
501 | ||
502 | list_del_init(&rule->rlist); | |
503 | if (rule->tree) { | |
504 | /* not a half-baked one */ | |
2991dd2b | 505 | audit_tree_log_remove_rule(rule); |
34d99af5 RGB |
506 | if (entry->rule.exe) |
507 | audit_remove_mark(entry->rule.exe); | |
74c3cbe3 AV |
508 | rule->tree = NULL; |
509 | list_del_rcu(&entry->list); | |
e45aa212 | 510 | list_del(&entry->rule.list); |
74c3cbe3 AV |
511 | call_rcu(&entry->rcu, audit_free_rule_rcu); |
512 | } | |
513 | } | |
514 | } | |
515 | ||
516 | /* | |
517 | * finish killing struct audit_tree | |
518 | */ | |
519 | static void prune_one(struct audit_tree *victim) | |
520 | { | |
521 | spin_lock(&hash_lock); | |
522 | while (!list_empty(&victim->chunks)) { | |
523 | struct node *p; | |
74c3cbe3 AV |
524 | |
525 | p = list_entry(victim->chunks.next, struct node, list); | |
74c3cbe3 | 526 | |
8f7b0ba1 | 527 | untag_chunk(p); |
74c3cbe3 AV |
528 | } |
529 | spin_unlock(&hash_lock); | |
530 | put_tree(victim); | |
531 | } | |
532 | ||
533 | /* trim the uncommitted chunks from tree */ | |
534 | ||
535 | static void trim_marked(struct audit_tree *tree) | |
536 | { | |
537 | struct list_head *p, *q; | |
538 | spin_lock(&hash_lock); | |
539 | if (tree->goner) { | |
540 | spin_unlock(&hash_lock); | |
541 | return; | |
542 | } | |
543 | /* reorder */ | |
544 | for (p = tree->chunks.next; p != &tree->chunks; p = q) { | |
545 | struct node *node = list_entry(p, struct node, list); | |
546 | q = p->next; | |
547 | if (node->index & (1U<<31)) { | |
548 | list_del_init(p); | |
549 | list_add(p, &tree->chunks); | |
550 | } | |
551 | } | |
552 | ||
553 | while (!list_empty(&tree->chunks)) { | |
554 | struct node *node; | |
74c3cbe3 AV |
555 | |
556 | node = list_entry(tree->chunks.next, struct node, list); | |
557 | ||
558 | /* have we run out of marked? */ | |
559 | if (!(node->index & (1U<<31))) | |
560 | break; | |
561 | ||
8f7b0ba1 | 562 | untag_chunk(node); |
74c3cbe3 AV |
563 | } |
564 | if (!tree->root && !tree->goner) { | |
565 | tree->goner = 1; | |
566 | spin_unlock(&hash_lock); | |
567 | mutex_lock(&audit_filter_mutex); | |
568 | kill_rules(tree); | |
569 | list_del_init(&tree->list); | |
570 | mutex_unlock(&audit_filter_mutex); | |
571 | prune_one(tree); | |
572 | } else { | |
573 | spin_unlock(&hash_lock); | |
574 | } | |
575 | } | |
576 | ||
916d7576 AV |
577 | static void audit_schedule_prune(void); |
578 | ||
74c3cbe3 AV |
579 | /* called with audit_filter_mutex */ |
580 | int audit_remove_tree_rule(struct audit_krule *rule) | |
581 | { | |
582 | struct audit_tree *tree; | |
583 | tree = rule->tree; | |
584 | if (tree) { | |
585 | spin_lock(&hash_lock); | |
586 | list_del_init(&rule->rlist); | |
587 | if (list_empty(&tree->rules) && !tree->goner) { | |
588 | tree->root = NULL; | |
589 | list_del_init(&tree->same_root); | |
590 | tree->goner = 1; | |
591 | list_move(&tree->list, &prune_list); | |
592 | rule->tree = NULL; | |
593 | spin_unlock(&hash_lock); | |
594 | audit_schedule_prune(); | |
595 | return 1; | |
596 | } | |
597 | rule->tree = NULL; | |
598 | spin_unlock(&hash_lock); | |
599 | return 1; | |
600 | } | |
601 | return 0; | |
602 | } | |
603 | ||
1f707137 AV |
604 | static int compare_root(struct vfsmount *mnt, void *arg) |
605 | { | |
f410ff65 JK |
606 | return inode_to_key(d_backing_inode(mnt->mnt_root)) == |
607 | (unsigned long)arg; | |
1f707137 AV |
608 | } |
609 | ||
74c3cbe3 AV |
610 | void audit_trim_trees(void) |
611 | { | |
612 | struct list_head cursor; | |
613 | ||
614 | mutex_lock(&audit_filter_mutex); | |
615 | list_add(&cursor, &tree_list); | |
616 | while (cursor.next != &tree_list) { | |
617 | struct audit_tree *tree; | |
98bc993f | 618 | struct path path; |
74c3cbe3 AV |
619 | struct vfsmount *root_mnt; |
620 | struct node *node; | |
74c3cbe3 AV |
621 | int err; |
622 | ||
623 | tree = container_of(cursor.next, struct audit_tree, list); | |
624 | get_tree(tree); | |
625 | list_del(&cursor); | |
626 | list_add(&cursor, &tree->list); | |
627 | mutex_unlock(&audit_filter_mutex); | |
628 | ||
98bc993f | 629 | err = kern_path(tree->pathname, 0, &path); |
74c3cbe3 AV |
630 | if (err) |
631 | goto skip_it; | |
632 | ||
589ff870 | 633 | root_mnt = collect_mounts(&path); |
98bc993f | 634 | path_put(&path); |
be34d1a3 | 635 | if (IS_ERR(root_mnt)) |
74c3cbe3 AV |
636 | goto skip_it; |
637 | ||
74c3cbe3 AV |
638 | spin_lock(&hash_lock); |
639 | list_for_each_entry(node, &tree->chunks, list) { | |
28a3a7eb | 640 | struct audit_chunk *chunk = find_chunk(node); |
25985edc | 641 | /* this could be NULL if the watch is dying else where... */ |
74c3cbe3 | 642 | node->index |= 1U<<31; |
f410ff65 JK |
643 | if (iterate_mounts(compare_root, |
644 | (void *)chunk_to_key(chunk), | |
645 | root_mnt)) | |
1f707137 | 646 | node->index &= ~(1U<<31); |
74c3cbe3 AV |
647 | } |
648 | spin_unlock(&hash_lock); | |
649 | trim_marked(tree); | |
74c3cbe3 AV |
650 | drop_collected_mounts(root_mnt); |
651 | skip_it: | |
12b2f117 | 652 | put_tree(tree); |
74c3cbe3 AV |
653 | mutex_lock(&audit_filter_mutex); |
654 | } | |
655 | list_del(&cursor); | |
656 | mutex_unlock(&audit_filter_mutex); | |
657 | } | |
658 | ||
74c3cbe3 AV |
659 | int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) |
660 | { | |
661 | ||
662 | if (pathname[0] != '/' || | |
663 | rule->listnr != AUDIT_FILTER_EXIT || | |
5af75d8d | 664 | op != Audit_equal || |
74c3cbe3 AV |
665 | rule->inode_f || rule->watch || rule->tree) |
666 | return -EINVAL; | |
667 | rule->tree = alloc_tree(pathname); | |
668 | if (!rule->tree) | |
669 | return -ENOMEM; | |
670 | return 0; | |
671 | } | |
672 | ||
673 | void audit_put_tree(struct audit_tree *tree) | |
674 | { | |
675 | put_tree(tree); | |
676 | } | |
677 | ||
1f707137 AV |
678 | static int tag_mount(struct vfsmount *mnt, void *arg) |
679 | { | |
3b362157 | 680 | return tag_chunk(d_backing_inode(mnt->mnt_root), arg); |
1f707137 AV |
681 | } |
682 | ||
f1aaf262 IP |
683 | /* |
684 | * That gets run when evict_chunk() ends up needing to kill audit_tree. | |
685 | * Runs from a separate thread. | |
686 | */ | |
687 | static int prune_tree_thread(void *unused) | |
688 | { | |
689 | for (;;) { | |
0bf676d1 JS |
690 | if (list_empty(&prune_list)) { |
691 | set_current_state(TASK_INTERRUPTIBLE); | |
f1aaf262 | 692 | schedule(); |
0bf676d1 | 693 | } |
f1aaf262 IP |
694 | |
695 | mutex_lock(&audit_cmd_mutex); | |
696 | mutex_lock(&audit_filter_mutex); | |
697 | ||
698 | while (!list_empty(&prune_list)) { | |
699 | struct audit_tree *victim; | |
700 | ||
701 | victim = list_entry(prune_list.next, | |
702 | struct audit_tree, list); | |
703 | list_del_init(&victim->list); | |
704 | ||
705 | mutex_unlock(&audit_filter_mutex); | |
706 | ||
707 | prune_one(victim); | |
708 | ||
709 | mutex_lock(&audit_filter_mutex); | |
710 | } | |
711 | ||
712 | mutex_unlock(&audit_filter_mutex); | |
713 | mutex_unlock(&audit_cmd_mutex); | |
714 | } | |
715 | return 0; | |
716 | } | |
717 | ||
718 | static int audit_launch_prune(void) | |
719 | { | |
720 | if (prune_thread) | |
721 | return 0; | |
0bf676d1 | 722 | prune_thread = kthread_run(prune_tree_thread, NULL, |
f1aaf262 IP |
723 | "audit_prune_tree"); |
724 | if (IS_ERR(prune_thread)) { | |
725 | pr_err("cannot start thread audit_prune_tree"); | |
726 | prune_thread = NULL; | |
727 | return -ENOMEM; | |
f1aaf262 | 728 | } |
0bf676d1 | 729 | return 0; |
f1aaf262 IP |
730 | } |
731 | ||
74c3cbe3 AV |
732 | /* called with audit_filter_mutex */ |
733 | int audit_add_tree_rule(struct audit_krule *rule) | |
734 | { | |
735 | struct audit_tree *seed = rule->tree, *tree; | |
98bc993f | 736 | struct path path; |
1f707137 | 737 | struct vfsmount *mnt; |
74c3cbe3 AV |
738 | int err; |
739 | ||
736f3203 | 740 | rule->tree = NULL; |
74c3cbe3 AV |
741 | list_for_each_entry(tree, &tree_list, list) { |
742 | if (!strcmp(seed->pathname, tree->pathname)) { | |
743 | put_tree(seed); | |
744 | rule->tree = tree; | |
745 | list_add(&rule->rlist, &tree->rules); | |
746 | return 0; | |
747 | } | |
748 | } | |
749 | tree = seed; | |
750 | list_add(&tree->list, &tree_list); | |
751 | list_add(&rule->rlist, &tree->rules); | |
752 | /* do not set rule->tree yet */ | |
753 | mutex_unlock(&audit_filter_mutex); | |
754 | ||
f1aaf262 IP |
755 | if (unlikely(!prune_thread)) { |
756 | err = audit_launch_prune(); | |
757 | if (err) | |
758 | goto Err; | |
759 | } | |
760 | ||
98bc993f | 761 | err = kern_path(tree->pathname, 0, &path); |
74c3cbe3 AV |
762 | if (err) |
763 | goto Err; | |
589ff870 | 764 | mnt = collect_mounts(&path); |
98bc993f | 765 | path_put(&path); |
be34d1a3 DH |
766 | if (IS_ERR(mnt)) { |
767 | err = PTR_ERR(mnt); | |
74c3cbe3 AV |
768 | goto Err; |
769 | } | |
74c3cbe3 AV |
770 | |
771 | get_tree(tree); | |
1f707137 | 772 | err = iterate_mounts(tag_mount, tree, mnt); |
74c3cbe3 AV |
773 | drop_collected_mounts(mnt); |
774 | ||
775 | if (!err) { | |
776 | struct node *node; | |
777 | spin_lock(&hash_lock); | |
778 | list_for_each_entry(node, &tree->chunks, list) | |
779 | node->index &= ~(1U<<31); | |
780 | spin_unlock(&hash_lock); | |
781 | } else { | |
782 | trim_marked(tree); | |
783 | goto Err; | |
784 | } | |
785 | ||
786 | mutex_lock(&audit_filter_mutex); | |
787 | if (list_empty(&rule->rlist)) { | |
788 | put_tree(tree); | |
789 | return -ENOENT; | |
790 | } | |
791 | rule->tree = tree; | |
792 | put_tree(tree); | |
793 | ||
794 | return 0; | |
795 | Err: | |
796 | mutex_lock(&audit_filter_mutex); | |
797 | list_del_init(&tree->list); | |
798 | list_del_init(&tree->rules); | |
799 | put_tree(tree); | |
800 | return err; | |
801 | } | |
802 | ||
803 | int audit_tag_tree(char *old, char *new) | |
804 | { | |
805 | struct list_head cursor, barrier; | |
806 | int failed = 0; | |
2096f759 | 807 | struct path path1, path2; |
74c3cbe3 | 808 | struct vfsmount *tagged; |
74c3cbe3 AV |
809 | int err; |
810 | ||
2096f759 | 811 | err = kern_path(new, 0, &path2); |
74c3cbe3 AV |
812 | if (err) |
813 | return err; | |
2096f759 AV |
814 | tagged = collect_mounts(&path2); |
815 | path_put(&path2); | |
be34d1a3 DH |
816 | if (IS_ERR(tagged)) |
817 | return PTR_ERR(tagged); | |
74c3cbe3 | 818 | |
2096f759 | 819 | err = kern_path(old, 0, &path1); |
74c3cbe3 AV |
820 | if (err) { |
821 | drop_collected_mounts(tagged); | |
822 | return err; | |
823 | } | |
74c3cbe3 | 824 | |
74c3cbe3 AV |
825 | mutex_lock(&audit_filter_mutex); |
826 | list_add(&barrier, &tree_list); | |
827 | list_add(&cursor, &barrier); | |
828 | ||
829 | while (cursor.next != &tree_list) { | |
830 | struct audit_tree *tree; | |
2096f759 | 831 | int good_one = 0; |
74c3cbe3 AV |
832 | |
833 | tree = container_of(cursor.next, struct audit_tree, list); | |
834 | get_tree(tree); | |
835 | list_del(&cursor); | |
836 | list_add(&cursor, &tree->list); | |
837 | mutex_unlock(&audit_filter_mutex); | |
838 | ||
2096f759 AV |
839 | err = kern_path(tree->pathname, 0, &path2); |
840 | if (!err) { | |
841 | good_one = path_is_under(&path1, &path2); | |
842 | path_put(&path2); | |
74c3cbe3 AV |
843 | } |
844 | ||
2096f759 | 845 | if (!good_one) { |
74c3cbe3 AV |
846 | put_tree(tree); |
847 | mutex_lock(&audit_filter_mutex); | |
848 | continue; | |
849 | } | |
74c3cbe3 | 850 | |
1f707137 | 851 | failed = iterate_mounts(tag_mount, tree, tagged); |
74c3cbe3 AV |
852 | if (failed) { |
853 | put_tree(tree); | |
854 | mutex_lock(&audit_filter_mutex); | |
855 | break; | |
856 | } | |
857 | ||
858 | mutex_lock(&audit_filter_mutex); | |
859 | spin_lock(&hash_lock); | |
860 | if (!tree->goner) { | |
861 | list_del(&tree->list); | |
862 | list_add(&tree->list, &tree_list); | |
863 | } | |
864 | spin_unlock(&hash_lock); | |
865 | put_tree(tree); | |
866 | } | |
867 | ||
868 | while (barrier.prev != &tree_list) { | |
869 | struct audit_tree *tree; | |
870 | ||
871 | tree = container_of(barrier.prev, struct audit_tree, list); | |
872 | get_tree(tree); | |
873 | list_del(&tree->list); | |
874 | list_add(&tree->list, &barrier); | |
875 | mutex_unlock(&audit_filter_mutex); | |
876 | ||
877 | if (!failed) { | |
878 | struct node *node; | |
879 | spin_lock(&hash_lock); | |
880 | list_for_each_entry(node, &tree->chunks, list) | |
881 | node->index &= ~(1U<<31); | |
882 | spin_unlock(&hash_lock); | |
883 | } else { | |
884 | trim_marked(tree); | |
885 | } | |
886 | ||
887 | put_tree(tree); | |
888 | mutex_lock(&audit_filter_mutex); | |
889 | } | |
890 | list_del(&barrier); | |
891 | list_del(&cursor); | |
74c3cbe3 | 892 | mutex_unlock(&audit_filter_mutex); |
2096f759 | 893 | path_put(&path1); |
74c3cbe3 AV |
894 | drop_collected_mounts(tagged); |
895 | return failed; | |
896 | } | |
897 | ||
916d7576 AV |
898 | |
899 | static void audit_schedule_prune(void) | |
900 | { | |
f1aaf262 | 901 | wake_up_process(prune_thread); |
916d7576 AV |
902 | } |
903 | ||
904 | /* | |
905 | * ... and that one is done if evict_chunk() decides to delay until the end | |
906 | * of syscall. Runs synchronously. | |
907 | */ | |
908 | void audit_kill_trees(struct list_head *list) | |
909 | { | |
910 | mutex_lock(&audit_cmd_mutex); | |
911 | mutex_lock(&audit_filter_mutex); | |
912 | ||
913 | while (!list_empty(list)) { | |
914 | struct audit_tree *victim; | |
915 | ||
916 | victim = list_entry(list->next, struct audit_tree, list); | |
917 | kill_rules(victim); | |
918 | list_del_init(&victim->list); | |
919 | ||
920 | mutex_unlock(&audit_filter_mutex); | |
921 | ||
922 | prune_one(victim); | |
923 | ||
924 | mutex_lock(&audit_filter_mutex); | |
925 | } | |
926 | ||
927 | mutex_unlock(&audit_filter_mutex); | |
928 | mutex_unlock(&audit_cmd_mutex); | |
74c3cbe3 AV |
929 | } |
930 | ||
931 | /* | |
932 | * Here comes the stuff asynchronous to auditctl operations | |
933 | */ | |
934 | ||
74c3cbe3 AV |
935 | static void evict_chunk(struct audit_chunk *chunk) |
936 | { | |
937 | struct audit_tree *owner; | |
916d7576 AV |
938 | struct list_head *postponed = audit_killed_trees(); |
939 | int need_prune = 0; | |
74c3cbe3 AV |
940 | int n; |
941 | ||
942 | if (chunk->dead) | |
943 | return; | |
944 | ||
945 | chunk->dead = 1; | |
946 | mutex_lock(&audit_filter_mutex); | |
947 | spin_lock(&hash_lock); | |
948 | while (!list_empty(&chunk->trees)) { | |
949 | owner = list_entry(chunk->trees.next, | |
950 | struct audit_tree, same_root); | |
951 | owner->goner = 1; | |
952 | owner->root = NULL; | |
953 | list_del_init(&owner->same_root); | |
954 | spin_unlock(&hash_lock); | |
916d7576 AV |
955 | if (!postponed) { |
956 | kill_rules(owner); | |
957 | list_move(&owner->list, &prune_list); | |
958 | need_prune = 1; | |
959 | } else { | |
960 | list_move(&owner->list, postponed); | |
961 | } | |
74c3cbe3 AV |
962 | spin_lock(&hash_lock); |
963 | } | |
964 | list_del_rcu(&chunk->hash); | |
965 | for (n = 0; n < chunk->count; n++) | |
966 | list_del_init(&chunk->owners[n].list); | |
967 | spin_unlock(&hash_lock); | |
f1aaf262 | 968 | mutex_unlock(&audit_filter_mutex); |
916d7576 AV |
969 | if (need_prune) |
970 | audit_schedule_prune(); | |
74c3cbe3 AV |
971 | } |
972 | ||
3a9b16b4 | 973 | static int audit_tree_handle_event(struct fsnotify_group *group, |
7053aee2 | 974 | struct inode *to_tell, |
ce8f76fb | 975 | struct fsnotify_mark *inode_mark, |
7053aee2 | 976 | struct fsnotify_mark *vfsmount_mark, |
3cd5eca8 | 977 | u32 mask, const void *data, int data_type, |
45a22f4c | 978 | const unsigned char *file_name, u32 cookie) |
74c3cbe3 | 979 | { |
83c4c4b0 | 980 | return 0; |
28a3a7eb | 981 | } |
74c3cbe3 | 982 | |
e61ce867 | 983 | static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) |
28a3a7eb EP |
984 | { |
985 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); | |
986 | ||
987 | evict_chunk(chunk); | |
b3e8692b MS |
988 | |
989 | /* | |
990 | * We are guaranteed to have at least one reference to the mark from | |
991 | * either the inode or the caller of fsnotify_destroy_mark(). | |
992 | */ | |
993 | BUG_ON(atomic_read(&entry->refcnt) < 1); | |
74c3cbe3 AV |
994 | } |
995 | ||
28a3a7eb EP |
996 | static const struct fsnotify_ops audit_tree_ops = { |
997 | .handle_event = audit_tree_handle_event, | |
28a3a7eb | 998 | .freeing_mark = audit_tree_freeing_mark, |
74c3cbe3 AV |
999 | }; |
1000 | ||
1001 | static int __init audit_tree_init(void) | |
1002 | { | |
1003 | int i; | |
1004 | ||
0d2e2a1d | 1005 | audit_tree_group = fsnotify_alloc_group(&audit_tree_ops); |
28a3a7eb EP |
1006 | if (IS_ERR(audit_tree_group)) |
1007 | audit_panic("cannot initialize fsnotify group for rectree watches"); | |
74c3cbe3 AV |
1008 | |
1009 | for (i = 0; i < HASH_SIZE; i++) | |
1010 | INIT_LIST_HEAD(&chunk_hash_heads[i]); | |
1011 | ||
1012 | return 0; | |
1013 | } | |
1014 | __initcall(audit_tree_init); |