tracing: Remove duplicate id information in event structure
authorSteven Rostedt <srostedt@redhat.com>
Fri, 23 Apr 2010 14:38:03 +0000 (10:38 -0400)
committerSteven Rostedt <rostedt@goodmis.org>
Fri, 14 May 2010 18:33:15 +0000 (14:33 -0400)
Now that the trace_event structure is embedded in the ftrace_event_call
structure, there is no need for the ftrace_event_call id field.
The id field is the same as the trace_event type field.

Removing the id and re-arranging the structure brings down the tracepoint
footprint by another 5K.

   text    data     bss     dec     hex filename
4913961 1088356  861512 6863829  68bbd5 vmlinux.orig
4895024 1023812  861512 6780348  6775bc vmlinux.print
4894944 1018052  861512 6774508  675eec vmlinux.id

Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
include/linux/ftrace_event.h
include/trace/ftrace.h
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_export.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_syscalls.c

index b1a007d6e8fd7c6ae97ab1474fabffae57ecc491..0be028527633386469e70102dc60345efcbbb595 100644 (file)
@@ -149,14 +149,13 @@ struct ftrace_event_call {
        char                    *name;
        struct dentry           *dir;
        struct trace_event      event;
-       int                     enabled;
-       int                     id;
        const char              *print_fmt;
-       int                     filter_active;
        struct event_filter     *filter;
        void                    *mod;
        void                    *data;
 
+       int                     enabled;
+       int                     filter_active;
        int                     perf_refcount;
 };
 
index 824141d5cf04b4469f50fe07b1b4ee83f984706e..4866c109fa9a58230cec916d22acfc313eda458f 100644 (file)
  *
  *     entry = iter->ent;
  *
- *     if (entry->type != event_<call>.id) {
+ *     if (entry->type != event_<call>->event.type) {
  *             WARN_ON_ONCE(1);
  *             return TRACE_TYPE_UNHANDLED;
  *     }
@@ -221,7 +221,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags,    \
                                                                        \
        entry = iter->ent;                                              \
                                                                        \
-       if (entry->type != event->id) {                                 \
+       if (entry->type != event->event.type) {                         \
                WARN_ON_ONCE(1);                                        \
                return TRACE_TYPE_UNHANDLED;                            \
        }                                                               \
@@ -257,7 +257,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags,    \
                                                                        \
        entry = iter->ent;                                              \
                                                                        \
-       if (entry->type != event_##call.id) {                           \
+       if (entry->type != event_##call.event.type) {                   \
                WARN_ON_ONCE(1);                                        \
                return TRACE_TYPE_UNHANDLED;                            \
        }                                                               \
@@ -409,7 +409,7 @@ static inline notrace int ftrace_get_offsets_##call(                        \
  *     __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
  *
  *     event = trace_current_buffer_lock_reserve(&buffer,
- *                               event_<call>.id,
+ *                               event_<call>->event.type,
  *                               sizeof(*entry) + __data_size,
  *                               irq_flags, pc);
  *     if (!event)
@@ -510,7 +510,7 @@ ftrace_raw_event_##call(void *__data, proto)                                \
        __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
                                                                        \
        event = trace_current_buffer_lock_reserve(&buffer,              \
-                                event_call->id,                        \
+                                event_call->event.type,                \
                                 sizeof(*entry) + __data_size,          \
                                 irq_flags, pc);                        \
        if (!event)                                                     \
@@ -711,7 +711,7 @@ perf_trace_##call(void *__data, proto)                                      \
                      "profile buffer not large enough"))               \
                return;                                                 \
        entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare(     \
-               __entry_size, event_call->id, &rctx, &irq_flags);       \
+               __entry_size, event_call->event.type, &rctx, &irq_flags); \
        if (!entry)                                                     \
                return;                                                 \
        tstruct                                                         \
index 196fe9d267739485918164d626e155e5da4cd5af..0a47e8d6b4914e555ece3abec80a45db64f3c3d3 100644 (file)
@@ -80,7 +80,7 @@ int perf_trace_enable(int event_id)
 
        mutex_lock(&event_mutex);
        list_for_each_entry(event, &ftrace_events, list) {
-               if (event->id == event_id &&
+               if (event->event.type == event_id &&
                    event->class && event->class->perf_probe &&
                    try_module_get(event->mod)) {
                        ret = perf_trace_event_enable(event);
@@ -128,7 +128,7 @@ void perf_trace_disable(int event_id)
 
        mutex_lock(&event_mutex);
        list_for_each_entry(event, &ftrace_events, list) {
-               if (event->id == event_id) {
+               if (event->event.type == event_id) {
                        perf_trace_event_disable(event);
                        module_put(event->mod);
                        break;
index aafe5bff8f5979ec264fe8c58d4038ed1bc822fc..8daaca5475b5568ced9da89acee910ecafecea11 100644 (file)
@@ -125,7 +125,6 @@ int trace_event_raw_init(struct ftrace_event_call *call)
        id = register_ftrace_event(&call->event);
        if (!id)
                return -ENODEV;
-       call->id = id;
 
        return 0;
 }
@@ -567,7 +566,7 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
        trace_seq_init(s);
 
        trace_seq_printf(s, "name: %s\n", call->name);
-       trace_seq_printf(s, "ID: %d\n", call->id);
+       trace_seq_printf(s, "ID: %d\n", call->event.type);
        trace_seq_printf(s, "format:\n");
 
        head = trace_get_fields(call);
@@ -641,7 +640,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
                return -ENOMEM;
 
        trace_seq_init(s);
-       trace_seq_printf(s, "%d\n", call->id);
+       trace_seq_printf(s, "%d\n", call->event.type);
 
        r = simple_read_from_buffer(ubuf, cnt, ppos,
                                    s->buffer, s->len);
@@ -969,7 +968,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
                                  enable);
 
 #ifdef CONFIG_PERF_EVENTS
-       if (call->id && (call->class->perf_probe || call->class->reg))
+       if (call->event.type && (call->class->perf_probe || call->class->reg))
                trace_create_file("id", 0444, call->dir, call,
                                  id);
 #endif
index 961f99b74bdd0a63b1b723cd9ebc05821725ebd0..2702d6bbf1ab4185a8ebd2d4e8c25d97d73f21d4 100644 (file)
@@ -1395,7 +1395,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
        mutex_lock(&event_mutex);
 
        list_for_each_entry(call, &ftrace_events, list) {
-               if (call->id == event_id)
+               if (call->event.type == event_id)
                        break;
        }
 
index e878d06c0ac00df2e1577c45b666d55bb6b224bc..8536e2a659690f5aa82935ec20eb7d8fa74e3b5f 100644 (file)
@@ -153,7 +153,7 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call)
 #define F_printk(fmt, args...) #fmt ", "  __stringify(args)
 
 #undef FTRACE_ENTRY
-#define FTRACE_ENTRY(call, struct_name, type, tstruct, print)          \
+#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print)         \
                                                                        \
 struct ftrace_event_class event_class_ftrace_##call = {                        \
        .system                 = __stringify(TRACE_SYSTEM),            \
@@ -165,7 +165,7 @@ struct ftrace_event_call __used                                             \
 __attribute__((__aligned__(4)))                                                \
 __attribute__((section("_ftrace_events"))) event_##call = {            \
        .name                   = #call,                                \
-       .id                     = type,                                 \
+       .event.type             = etype,                                \
        .class                  = &event_class_ftrace_##call,           \
        .print_fmt              = print,                                \
 };                                                                     \
index d8061c3e02c9427b8dfd961eba050f710fed6552..934078bca3f96a7c355c00ee21871ebaaf702580 100644 (file)
@@ -960,8 +960,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
 
        size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
 
-       event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
-                                                 irq_flags, pc);
+       event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
+                                                 size, irq_flags, pc);
        if (!event)
                return;
 
@@ -992,8 +992,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
 
        size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
 
-       event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
-                                                 irq_flags, pc);
+       event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
+                                                 size, irq_flags, pc);
        if (!event)
                return;
 
@@ -1228,7 +1228,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
                     "profile buffer not large enough"))
                return;
 
-       entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
+       entry = perf_trace_buf_prepare(size, call->event.type,
+                                      &rctx, &irq_flags);
        if (!entry)
                return;
 
@@ -1258,7 +1259,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
                     "profile buffer not large enough"))
                return;
 
-       entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
+       entry = perf_trace_buf_prepare(size, call->event.type,
+                                      &rctx, &irq_flags);
        if (!entry)
                return;
 
@@ -1375,8 +1377,8 @@ static int register_probe_event(struct trace_probe *tp)
        }
        if (set_print_fmt(tp) < 0)
                return -ENOMEM;
-       call->id = register_ftrace_event(&call->event);
-       if (!call->id) {
+       ret = register_ftrace_event(&call->event);
+       if (!ret) {
                kfree(call->print_fmt);
                return -ENODEV;
        }
index 7c7cfe95a8538f0cbf049baf21e514264c61ff6e..9d358301ae3eeea4503fc794a9127da87c94f3ad 100644 (file)
@@ -117,7 +117,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags,
        if (!entry)
                goto end;
 
-       if (entry->enter_event->id != ent->type) {
+       if (entry->enter_event->event.type != ent->type) {
                WARN_ON_ONCE(1);
                goto end;
        }
@@ -173,7 +173,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags,
                return TRACE_TYPE_HANDLED;
        }
 
-       if (entry->exit_event->id != ent->type) {
+       if (entry->exit_event->event.type != ent->type) {
                WARN_ON_ONCE(1);
                return TRACE_TYPE_UNHANDLED;
        }
@@ -315,7 +315,7 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
        size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
 
        event = trace_current_buffer_lock_reserve(&buffer,
-                       sys_data->enter_event->id, size, 0, 0);
+                       sys_data->enter_event->event.type, size, 0, 0);
        if (!event)
                return;
 
@@ -347,7 +347,7 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
                return;
 
        event = trace_current_buffer_lock_reserve(&buffer,
-                       sys_data->exit_event->id, sizeof(*entry), 0, 0);
+                       sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
        if (!event)
                return;
 
@@ -511,7 +511,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
                return;
 
        rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
-                               sys_data->enter_event->id, &rctx, &flags);
+                               sys_data->enter_event->event.type,
+                               &rctx, &flags);
        if (!rec)
                return;
 
@@ -586,7 +587,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
                return;
 
        rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
-                               sys_data->exit_event->id, &rctx, &flags);
+                               sys_data->exit_event->event.type,
+                               &rctx, &flags);
        if (!rec)
                return;