static keys: Introduce 'struct static_key', static_key_true()/false() and static_key_...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / tracepoint.c
1 /*
2 * Copyright (C) 2008 Mathieu Desnoyers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/types.h>
21 #include <linux/jhash.h>
22 #include <linux/list.h>
23 #include <linux/rcupdate.h>
24 #include <linux/tracepoint.h>
25 #include <linux/err.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28 #include <linux/static_key.h>
29
30 extern struct tracepoint * const __start___tracepoints_ptrs[];
31 extern struct tracepoint * const __stop___tracepoints_ptrs[];
32
33 /* Set to 1 to enable tracepoint debug output */
34 static const int tracepoint_debug;
35
36 /*
37 * Tracepoints mutex protects the builtin and module tracepoints and the hash
38 * table, as well as the local module list.
39 */
40 static DEFINE_MUTEX(tracepoints_mutex);
41
42 #ifdef CONFIG_MODULES
43 /* Local list of struct module */
44 static LIST_HEAD(tracepoint_module_list);
45 #endif /* CONFIG_MODULES */
46
47 /*
48 * Tracepoint hash table, containing the active tracepoints.
49 * Protected by tracepoints_mutex.
50 */
51 #define TRACEPOINT_HASH_BITS 6
52 #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
53 static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
54
55 /*
56 * Note about RCU :
57 * It is used to delay the free of multiple probes array until a quiescent
58 * state is reached.
59 * Tracepoint entries modifications are protected by the tracepoints_mutex.
60 */
61 struct tracepoint_entry {
62 struct hlist_node hlist;
63 struct tracepoint_func *funcs;
64 int refcount; /* Number of times armed. 0 if disarmed. */
65 char name[0];
66 };
67
68 struct tp_probes {
69 union {
70 struct rcu_head rcu;
71 struct list_head list;
72 } u;
73 struct tracepoint_func probes[0];
74 };
75
76 static inline void *allocate_probes(int count)
77 {
78 struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func)
79 + sizeof(struct tp_probes), GFP_KERNEL);
80 return p == NULL ? NULL : p->probes;
81 }
82
83 static void rcu_free_old_probes(struct rcu_head *head)
84 {
85 kfree(container_of(head, struct tp_probes, u.rcu));
86 }
87
88 static inline void release_probes(struct tracepoint_func *old)
89 {
90 if (old) {
91 struct tp_probes *tp_probes = container_of(old,
92 struct tp_probes, probes[0]);
93 call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
94 }
95 }
96
97 static void debug_print_probes(struct tracepoint_entry *entry)
98 {
99 int i;
100
101 if (!tracepoint_debug || !entry->funcs)
102 return;
103
104 for (i = 0; entry->funcs[i].func; i++)
105 printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func);
106 }
107
108 static struct tracepoint_func *
109 tracepoint_entry_add_probe(struct tracepoint_entry *entry,
110 void *probe, void *data)
111 {
112 int nr_probes = 0;
113 struct tracepoint_func *old, *new;
114
115 WARN_ON(!probe);
116
117 debug_print_probes(entry);
118 old = entry->funcs;
119 if (old) {
120 /* (N -> N+1), (N != 0, 1) probes */
121 for (nr_probes = 0; old[nr_probes].func; nr_probes++)
122 if (old[nr_probes].func == probe &&
123 old[nr_probes].data == data)
124 return ERR_PTR(-EEXIST);
125 }
126 /* + 2 : one for new probe, one for NULL func */
127 new = allocate_probes(nr_probes + 2);
128 if (new == NULL)
129 return ERR_PTR(-ENOMEM);
130 if (old)
131 memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
132 new[nr_probes].func = probe;
133 new[nr_probes].data = data;
134 new[nr_probes + 1].func = NULL;
135 entry->refcount = nr_probes + 1;
136 entry->funcs = new;
137 debug_print_probes(entry);
138 return old;
139 }
140
141 static void *
142 tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
143 void *probe, void *data)
144 {
145 int nr_probes = 0, nr_del = 0, i;
146 struct tracepoint_func *old, *new;
147
148 old = entry->funcs;
149
150 if (!old)
151 return ERR_PTR(-ENOENT);
152
153 debug_print_probes(entry);
154 /* (N -> M), (N > 1, M >= 0) probes */
155 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
156 if (!probe ||
157 (old[nr_probes].func == probe &&
158 old[nr_probes].data == data))
159 nr_del++;
160 }
161
162 if (nr_probes - nr_del == 0) {
163 /* N -> 0, (N > 1) */
164 entry->funcs = NULL;
165 entry->refcount = 0;
166 debug_print_probes(entry);
167 return old;
168 } else {
169 int j = 0;
170 /* N -> M, (N > 1, M > 0) */
171 /* + 1 for NULL */
172 new = allocate_probes(nr_probes - nr_del + 1);
173 if (new == NULL)
174 return ERR_PTR(-ENOMEM);
175 for (i = 0; old[i].func; i++)
176 if (probe &&
177 (old[i].func != probe || old[i].data != data))
178 new[j++] = old[i];
179 new[nr_probes - nr_del].func = NULL;
180 entry->refcount = nr_probes - nr_del;
181 entry->funcs = new;
182 }
183 debug_print_probes(entry);
184 return old;
185 }
186
187 /*
188 * Get tracepoint if the tracepoint is present in the tracepoint hash table.
189 * Must be called with tracepoints_mutex held.
190 * Returns NULL if not present.
191 */
192 static struct tracepoint_entry *get_tracepoint(const char *name)
193 {
194 struct hlist_head *head;
195 struct hlist_node *node;
196 struct tracepoint_entry *e;
197 u32 hash = jhash(name, strlen(name), 0);
198
199 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
200 hlist_for_each_entry(e, node, head, hlist) {
201 if (!strcmp(name, e->name))
202 return e;
203 }
204 return NULL;
205 }
206
207 /*
208 * Add the tracepoint to the tracepoint hash table. Must be called with
209 * tracepoints_mutex held.
210 */
211 static struct tracepoint_entry *add_tracepoint(const char *name)
212 {
213 struct hlist_head *head;
214 struct hlist_node *node;
215 struct tracepoint_entry *e;
216 size_t name_len = strlen(name) + 1;
217 u32 hash = jhash(name, name_len-1, 0);
218
219 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
220 hlist_for_each_entry(e, node, head, hlist) {
221 if (!strcmp(name, e->name)) {
222 printk(KERN_NOTICE
223 "tracepoint %s busy\n", name);
224 return ERR_PTR(-EEXIST); /* Already there */
225 }
226 }
227 /*
228 * Using kmalloc here to allocate a variable length element. Could
229 * cause some memory fragmentation if overused.
230 */
231 e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
232 if (!e)
233 return ERR_PTR(-ENOMEM);
234 memcpy(&e->name[0], name, name_len);
235 e->funcs = NULL;
236 e->refcount = 0;
237 hlist_add_head(&e->hlist, head);
238 return e;
239 }
240
241 /*
242 * Remove the tracepoint from the tracepoint hash table. Must be called with
243 * mutex_lock held.
244 */
245 static inline void remove_tracepoint(struct tracepoint_entry *e)
246 {
247 hlist_del(&e->hlist);
248 kfree(e);
249 }
250
251 /*
252 * Sets the probe callback corresponding to one tracepoint.
253 */
254 static void set_tracepoint(struct tracepoint_entry **entry,
255 struct tracepoint *elem, int active)
256 {
257 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
258
259 if (elem->regfunc && !static_key_enabled(&elem->key) && active)
260 elem->regfunc();
261 else if (elem->unregfunc && static_key_enabled(&elem->key) && !active)
262 elem->unregfunc();
263
264 /*
265 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
266 * probe callbacks array is consistent before setting a pointer to it.
267 * This array is referenced by __DO_TRACE from
268 * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
269 * is used.
270 */
271 rcu_assign_pointer(elem->funcs, (*entry)->funcs);
272 if (active && !static_key_enabled(&elem->key))
273 static_key_slow_inc(&elem->key);
274 else if (!active && static_key_enabled(&elem->key))
275 static_key_slow_dec(&elem->key);
276 }
277
278 /*
279 * Disable a tracepoint and its probe callback.
280 * Note: only waiting an RCU period after setting elem->call to the empty
281 * function insures that the original callback is not used anymore. This insured
282 * by preempt_disable around the call site.
283 */
284 static void disable_tracepoint(struct tracepoint *elem)
285 {
286 if (elem->unregfunc && static_key_enabled(&elem->key))
287 elem->unregfunc();
288
289 if (static_key_enabled(&elem->key))
290 static_key_slow_dec(&elem->key);
291 rcu_assign_pointer(elem->funcs, NULL);
292 }
293
294 /**
295 * tracepoint_update_probe_range - Update a probe range
296 * @begin: beginning of the range
297 * @end: end of the range
298 *
299 * Updates the probe callback corresponding to a range of tracepoints.
300 * Called with tracepoints_mutex held.
301 */
302 static void tracepoint_update_probe_range(struct tracepoint * const *begin,
303 struct tracepoint * const *end)
304 {
305 struct tracepoint * const *iter;
306 struct tracepoint_entry *mark_entry;
307
308 if (!begin)
309 return;
310
311 for (iter = begin; iter < end; iter++) {
312 mark_entry = get_tracepoint((*iter)->name);
313 if (mark_entry) {
314 set_tracepoint(&mark_entry, *iter,
315 !!mark_entry->refcount);
316 } else {
317 disable_tracepoint(*iter);
318 }
319 }
320 }
321
322 #ifdef CONFIG_MODULES
323 void module_update_tracepoints(void)
324 {
325 struct tp_module *tp_mod;
326
327 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
328 tracepoint_update_probe_range(tp_mod->tracepoints_ptrs,
329 tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints);
330 }
331 #else /* CONFIG_MODULES */
332 void module_update_tracepoints(void)
333 {
334 }
335 #endif /* CONFIG_MODULES */
336
337
338 /*
339 * Update probes, removing the faulty probes.
340 * Called with tracepoints_mutex held.
341 */
342 static void tracepoint_update_probes(void)
343 {
344 /* Core kernel tracepoints */
345 tracepoint_update_probe_range(__start___tracepoints_ptrs,
346 __stop___tracepoints_ptrs);
347 /* tracepoints in modules. */
348 module_update_tracepoints();
349 }
350
351 static struct tracepoint_func *
352 tracepoint_add_probe(const char *name, void *probe, void *data)
353 {
354 struct tracepoint_entry *entry;
355 struct tracepoint_func *old;
356
357 entry = get_tracepoint(name);
358 if (!entry) {
359 entry = add_tracepoint(name);
360 if (IS_ERR(entry))
361 return (struct tracepoint_func *)entry;
362 }
363 old = tracepoint_entry_add_probe(entry, probe, data);
364 if (IS_ERR(old) && !entry->refcount)
365 remove_tracepoint(entry);
366 return old;
367 }
368
369 /**
370 * tracepoint_probe_register - Connect a probe to a tracepoint
371 * @name: tracepoint name
372 * @probe: probe handler
373 *
374 * Returns 0 if ok, error value on error.
375 * The probe address must at least be aligned on the architecture pointer size.
376 */
377 int tracepoint_probe_register(const char *name, void *probe, void *data)
378 {
379 struct tracepoint_func *old;
380
381 mutex_lock(&tracepoints_mutex);
382 old = tracepoint_add_probe(name, probe, data);
383 if (IS_ERR(old)) {
384 mutex_unlock(&tracepoints_mutex);
385 return PTR_ERR(old);
386 }
387 tracepoint_update_probes(); /* may update entry */
388 mutex_unlock(&tracepoints_mutex);
389 release_probes(old);
390 return 0;
391 }
392 EXPORT_SYMBOL_GPL(tracepoint_probe_register);
393
394 static struct tracepoint_func *
395 tracepoint_remove_probe(const char *name, void *probe, void *data)
396 {
397 struct tracepoint_entry *entry;
398 struct tracepoint_func *old;
399
400 entry = get_tracepoint(name);
401 if (!entry)
402 return ERR_PTR(-ENOENT);
403 old = tracepoint_entry_remove_probe(entry, probe, data);
404 if (IS_ERR(old))
405 return old;
406 if (!entry->refcount)
407 remove_tracepoint(entry);
408 return old;
409 }
410
411 /**
412 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
413 * @name: tracepoint name
414 * @probe: probe function pointer
415 *
416 * We do not need to call a synchronize_sched to make sure the probes have
417 * finished running before doing a module unload, because the module unload
418 * itself uses stop_machine(), which insures that every preempt disabled section
419 * have finished.
420 */
421 int tracepoint_probe_unregister(const char *name, void *probe, void *data)
422 {
423 struct tracepoint_func *old;
424
425 mutex_lock(&tracepoints_mutex);
426 old = tracepoint_remove_probe(name, probe, data);
427 if (IS_ERR(old)) {
428 mutex_unlock(&tracepoints_mutex);
429 return PTR_ERR(old);
430 }
431 tracepoint_update_probes(); /* may update entry */
432 mutex_unlock(&tracepoints_mutex);
433 release_probes(old);
434 return 0;
435 }
436 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
437
438 static LIST_HEAD(old_probes);
439 static int need_update;
440
441 static void tracepoint_add_old_probes(void *old)
442 {
443 need_update = 1;
444 if (old) {
445 struct tp_probes *tp_probes = container_of(old,
446 struct tp_probes, probes[0]);
447 list_add(&tp_probes->u.list, &old_probes);
448 }
449 }
450
451 /**
452 * tracepoint_probe_register_noupdate - register a probe but not connect
453 * @name: tracepoint name
454 * @probe: probe handler
455 *
456 * caller must call tracepoint_probe_update_all()
457 */
458 int tracepoint_probe_register_noupdate(const char *name, void *probe,
459 void *data)
460 {
461 struct tracepoint_func *old;
462
463 mutex_lock(&tracepoints_mutex);
464 old = tracepoint_add_probe(name, probe, data);
465 if (IS_ERR(old)) {
466 mutex_unlock(&tracepoints_mutex);
467 return PTR_ERR(old);
468 }
469 tracepoint_add_old_probes(old);
470 mutex_unlock(&tracepoints_mutex);
471 return 0;
472 }
473 EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
474
475 /**
476 * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
477 * @name: tracepoint name
478 * @probe: probe function pointer
479 *
480 * caller must call tracepoint_probe_update_all()
481 */
482 int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
483 void *data)
484 {
485 struct tracepoint_func *old;
486
487 mutex_lock(&tracepoints_mutex);
488 old = tracepoint_remove_probe(name, probe, data);
489 if (IS_ERR(old)) {
490 mutex_unlock(&tracepoints_mutex);
491 return PTR_ERR(old);
492 }
493 tracepoint_add_old_probes(old);
494 mutex_unlock(&tracepoints_mutex);
495 return 0;
496 }
497 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
498
499 /**
500 * tracepoint_probe_update_all - update tracepoints
501 */
502 void tracepoint_probe_update_all(void)
503 {
504 LIST_HEAD(release_probes);
505 struct tp_probes *pos, *next;
506
507 mutex_lock(&tracepoints_mutex);
508 if (!need_update) {
509 mutex_unlock(&tracepoints_mutex);
510 return;
511 }
512 if (!list_empty(&old_probes))
513 list_replace_init(&old_probes, &release_probes);
514 need_update = 0;
515 tracepoint_update_probes();
516 mutex_unlock(&tracepoints_mutex);
517 list_for_each_entry_safe(pos, next, &release_probes, u.list) {
518 list_del(&pos->u.list);
519 call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
520 }
521 }
522 EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
523
524 /**
525 * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
526 * @tracepoint: current tracepoints (in), next tracepoint (out)
527 * @begin: beginning of the range
528 * @end: end of the range
529 *
530 * Returns whether a next tracepoint has been found (1) or not (0).
531 * Will return the first tracepoint in the range if the input tracepoint is
532 * NULL.
533 */
534 static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
535 struct tracepoint * const *begin, struct tracepoint * const *end)
536 {
537 if (!*tracepoint && begin != end) {
538 *tracepoint = begin;
539 return 1;
540 }
541 if (*tracepoint >= begin && *tracepoint < end)
542 return 1;
543 return 0;
544 }
545
546 #ifdef CONFIG_MODULES
547 static void tracepoint_get_iter(struct tracepoint_iter *iter)
548 {
549 int found = 0;
550 struct tp_module *iter_mod;
551
552 /* Core kernel tracepoints */
553 if (!iter->module) {
554 found = tracepoint_get_iter_range(&iter->tracepoint,
555 __start___tracepoints_ptrs,
556 __stop___tracepoints_ptrs);
557 if (found)
558 goto end;
559 }
560 /* Tracepoints in modules */
561 mutex_lock(&tracepoints_mutex);
562 list_for_each_entry(iter_mod, &tracepoint_module_list, list) {
563 /*
564 * Sorted module list
565 */
566 if (iter_mod < iter->module)
567 continue;
568 else if (iter_mod > iter->module)
569 iter->tracepoint = NULL;
570 found = tracepoint_get_iter_range(&iter->tracepoint,
571 iter_mod->tracepoints_ptrs,
572 iter_mod->tracepoints_ptrs
573 + iter_mod->num_tracepoints);
574 if (found) {
575 iter->module = iter_mod;
576 break;
577 }
578 }
579 mutex_unlock(&tracepoints_mutex);
580 end:
581 if (!found)
582 tracepoint_iter_reset(iter);
583 }
584 #else /* CONFIG_MODULES */
585 static void tracepoint_get_iter(struct tracepoint_iter *iter)
586 {
587 int found = 0;
588
589 /* Core kernel tracepoints */
590 found = tracepoint_get_iter_range(&iter->tracepoint,
591 __start___tracepoints_ptrs,
592 __stop___tracepoints_ptrs);
593 if (!found)
594 tracepoint_iter_reset(iter);
595 }
596 #endif /* CONFIG_MODULES */
597
598 void tracepoint_iter_start(struct tracepoint_iter *iter)
599 {
600 tracepoint_get_iter(iter);
601 }
602 EXPORT_SYMBOL_GPL(tracepoint_iter_start);
603
604 void tracepoint_iter_next(struct tracepoint_iter *iter)
605 {
606 iter->tracepoint++;
607 /*
608 * iter->tracepoint may be invalid because we blindly incremented it.
609 * Make sure it is valid by marshalling on the tracepoints, getting the
610 * tracepoints from following modules if necessary.
611 */
612 tracepoint_get_iter(iter);
613 }
614 EXPORT_SYMBOL_GPL(tracepoint_iter_next);
615
616 void tracepoint_iter_stop(struct tracepoint_iter *iter)
617 {
618 }
619 EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
620
621 void tracepoint_iter_reset(struct tracepoint_iter *iter)
622 {
623 #ifdef CONFIG_MODULES
624 iter->module = NULL;
625 #endif /* CONFIG_MODULES */
626 iter->tracepoint = NULL;
627 }
628 EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
629
630 #ifdef CONFIG_MODULES
631 static int tracepoint_module_coming(struct module *mod)
632 {
633 struct tp_module *tp_mod, *iter;
634 int ret = 0;
635
636 /*
637 * We skip modules that taint the kernel, especially those with different
638 * module headers (for forced load), to make sure we don't cause a crash.
639 * Staging and out-of-tree GPL modules are fine.
640 */
641 if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
642 return 0;
643 mutex_lock(&tracepoints_mutex);
644 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
645 if (!tp_mod) {
646 ret = -ENOMEM;
647 goto end;
648 }
649 tp_mod->num_tracepoints = mod->num_tracepoints;
650 tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs;
651
652 /*
653 * tracepoint_module_list is kept sorted by struct module pointer
654 * address for iteration on tracepoints from a seq_file that can release
655 * the mutex between calls.
656 */
657 list_for_each_entry_reverse(iter, &tracepoint_module_list, list) {
658 BUG_ON(iter == tp_mod); /* Should never be in the list twice */
659 if (iter < tp_mod) {
660 /* We belong to the location right after iter. */
661 list_add(&tp_mod->list, &iter->list);
662 goto module_added;
663 }
664 }
665 /* We belong to the beginning of the list */
666 list_add(&tp_mod->list, &tracepoint_module_list);
667 module_added:
668 tracepoint_update_probe_range(mod->tracepoints_ptrs,
669 mod->tracepoints_ptrs + mod->num_tracepoints);
670 end:
671 mutex_unlock(&tracepoints_mutex);
672 return ret;
673 }
674
675 static int tracepoint_module_going(struct module *mod)
676 {
677 struct tp_module *pos;
678
679 mutex_lock(&tracepoints_mutex);
680 tracepoint_update_probe_range(mod->tracepoints_ptrs,
681 mod->tracepoints_ptrs + mod->num_tracepoints);
682 list_for_each_entry(pos, &tracepoint_module_list, list) {
683 if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) {
684 list_del(&pos->list);
685 kfree(pos);
686 break;
687 }
688 }
689 /*
690 * In the case of modules that were tainted at "coming", we'll simply
691 * walk through the list without finding it. We cannot use the "tainted"
692 * flag on "going", in case a module taints the kernel only after being
693 * loaded.
694 */
695 mutex_unlock(&tracepoints_mutex);
696 return 0;
697 }
698
699 int tracepoint_module_notify(struct notifier_block *self,
700 unsigned long val, void *data)
701 {
702 struct module *mod = data;
703 int ret = 0;
704
705 switch (val) {
706 case MODULE_STATE_COMING:
707 ret = tracepoint_module_coming(mod);
708 break;
709 case MODULE_STATE_LIVE:
710 break;
711 case MODULE_STATE_GOING:
712 ret = tracepoint_module_going(mod);
713 break;
714 }
715 return ret;
716 }
717
718 struct notifier_block tracepoint_module_nb = {
719 .notifier_call = tracepoint_module_notify,
720 .priority = 0,
721 };
722
723 static int init_tracepoints(void)
724 {
725 return register_module_notifier(&tracepoint_module_nb);
726 }
727 __initcall(init_tracepoints);
728 #endif /* CONFIG_MODULES */
729
730 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
731
732 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
733 static int sys_tracepoint_refcount;
734
735 void syscall_regfunc(void)
736 {
737 unsigned long flags;
738 struct task_struct *g, *t;
739
740 if (!sys_tracepoint_refcount) {
741 read_lock_irqsave(&tasklist_lock, flags);
742 do_each_thread(g, t) {
743 /* Skip kernel threads. */
744 if (t->mm)
745 set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
746 } while_each_thread(g, t);
747 read_unlock_irqrestore(&tasklist_lock, flags);
748 }
749 sys_tracepoint_refcount++;
750 }
751
752 void syscall_unregfunc(void)
753 {
754 unsigned long flags;
755 struct task_struct *g, *t;
756
757 sys_tracepoint_refcount--;
758 if (!sys_tracepoint_refcount) {
759 read_lock_irqsave(&tasklist_lock, flags);
760 do_each_thread(g, t) {
761 clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
762 } while_each_thread(g, t);
763 read_unlock_irqrestore(&tasklist_lock, flags);
764 }
765 }
766 #endif