(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
}
+struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
+ unsigned char type,
+ unsigned long len,
+ unsigned long flags, int pc)
+{
+ struct ring_buffer_event *event;
+
+ event = ring_buffer_lock_reserve(tr->buffer, len);
+ if (event != NULL) {
+ struct trace_entry *ent = ring_buffer_event_data(event);
+
+ tracing_generic_entry_update(ent, flags, pc);
+ ent->type = type;
+ }
+
+ return event;
+}
+static void ftrace_trace_stack(struct trace_array *tr,
+ unsigned long flags, int skip, int pc);
+static void ftrace_trace_userstack(struct trace_array *tr,
+ unsigned long flags, int pc);
+
+void trace_buffer_unlock_commit(struct trace_array *tr,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc)
+{
+ ring_buffer_unlock_commit(tr->buffer, event);
+
+ ftrace_trace_stack(tr, flags, 6, pc);
+ ftrace_trace_userstack(tr, flags, pc);
+ trace_wake_up();
+}
+
void
trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip, unsigned long flags,
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
+ event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry),
+ flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_FN;
entry->ip = ip;
entry->parent_ip = parent_ip;
ring_buffer_unlock_commit(tr->buffer, event);
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return;
- event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry));
+ event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_GRAPH_ENT;
entry->graph_ent = *trace;
ring_buffer_unlock_commit(global_trace.buffer, event);
}
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return;
- event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry));
+ event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_GRAPH_RET;
entry->ret = *trace;
ring_buffer_unlock_commit(global_trace.buffer, event);
}
struct stack_entry *entry;
struct stack_trace trace;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
+ event = trace_buffer_lock_reserve(tr, TRACE_STACK,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_STACK;
-
memset(&entry->caller, 0, sizeof(entry->caller));
trace.nr_entries = 0;
if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
+ event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_USER_STACK;
memset(&entry->caller, 0, sizeof(entry->caller));
struct trace_array *tr = __tr;
struct special_entry *entry;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
+ event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL,
+ sizeof(*entry), 0, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, pc);
- entry->ent.type = TRACE_SPECIAL;
entry->arg1 = arg1;
entry->arg2 = arg2;
entry->arg3 = arg3;
- ring_buffer_unlock_commit(tr->buffer, event);
- ftrace_trace_stack(tr, 0, 4, pc);
- ftrace_trace_userstack(tr, 0, pc);
-
- trace_wake_up();
+ trace_buffer_unlock_commit(tr, event, 0, pc);
}
void
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
+ event = trace_buffer_lock_reserve(tr, TRACE_CTX,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_CTX;
entry->prev_pid = prev->pid;
entry->prev_prio = prev->prio;
entry->prev_state = prev->state;
entry->next_prio = next->prio;
entry->next_state = next->state;
entry->next_cpu = task_cpu(next);
- ring_buffer_unlock_commit(tr->buffer, event);
- ftrace_trace_stack(tr, flags, 5, pc);
- ftrace_trace_userstack(tr, flags, pc);
+ trace_buffer_unlock_commit(tr, event, flags, pc);
}
void
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
+ event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_WAKE;
entry->prev_pid = curr->pid;
entry->prev_prio = curr->prio;
entry->prev_state = curr->state;
entry->next_prio = wakee->prio;
entry->next_state = wakee->state;
entry->next_cpu = task_cpu(wakee);
- ring_buffer_unlock_commit(tr->buffer, event);
- ftrace_trace_stack(tr, flags, 6, pc);
- ftrace_trace_userstack(tr, flags, pc);
-
- trace_wake_up();
+ trace_buffer_unlock_commit(tr, event, flags, pc);
}
void
trace_buf[len] = 0;
size = sizeof(*entry) + len + 1;
- event = ring_buffer_lock_reserve(tr->buffer, size);
+ event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);
if (!event)
goto out_unlock;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, irq_flags, pc);
- entry->ent.type = TRACE_PRINT;
entry->ip = ip;
entry->depth = depth;
{
struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry;
+ int pc = preempt_count();
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
+ event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW,
+ sizeof(*entry), 0, pc);
if (!event) {
atomic_inc(&dropped_count);
return;
}
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, preempt_count());
- entry->ent.type = TRACE_MMIO_RW;
entry->rw = *rw;
- ring_buffer_unlock_commit(tr->buffer, event);
-
- trace_wake_up();
+ trace_buffer_unlock_commit(tr, event, 0, pc);
}
void mmio_trace_rw(struct mmiotrace_rw *rw)
{
struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry;
+ int pc = preempt_count();
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
+ event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP,
+ sizeof(*entry), 0, pc);
if (!event) {
atomic_inc(&dropped_count);
return;
}
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, preempt_count());
- entry->ent.type = TRACE_MMIO_MAP;
entry->map = *map;
- ring_buffer_unlock_commit(tr->buffer, event);
-
- trace_wake_up();
+ trace_buffer_unlock_commit(tr, event, 0, pc);
}
void mmio_trace_mapping(struct mmiotrace_map *map)