ftrace_wake_up_task(void *rq, struct task_struct *wakee,
struct task_struct *curr);
extern void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data);
+extern void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
#else
static inline void
ftrace_ctx_switch(void *rq, struct task_struct *prev, struct task_struct *next)
static inline void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
{
}
+static inline void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+}
#endif
extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
if (!(this_sd->flags & SD_WAKE_AFFINE))
return 0;
+ ftrace_special(__LINE__, curr->se.avg_overlap, sync);
+ ftrace_special(__LINE__, p->se.avg_overlap, -1);
/*
* If the currently running task will sleep within
* a reasonable amount of time then attract this newly
if (unlikely(se == pse))
return;
+ ftrace_special(__LINE__, p->pid, se->last_wakeup);
cfs_rq_of(pse)->next = pse;
/*
comm);
break;
case TRACE_SPECIAL:
- trace_seq_printf(s, " %ld %ld %ld\n",
+ trace_seq_printf(s, "# %ld %ld %ld\n",
entry->special.arg1,
entry->special.arg2,
entry->special.arg3);
return 0;
break;
case TRACE_SPECIAL:
- ret = trace_seq_printf(s, " %ld %ld %ld\n",
+ ret = trace_seq_printf(s, "# %ld %ld %ld\n",
entry->special.arg1,
entry->special.arg2,
entry->special.arg3);
break;
case TRACE_SPECIAL:
case TRACE_STACK:
- ret = trace_seq_printf(s, " %ld %ld %ld\n",
+ ret = trace_seq_printf(s, "# %ld %ld %ld\n",
entry->special.arg1,
entry->special.arg2,
entry->special.arg3);
wakeup_sched_wakeup(wakee, curr);
}
+void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+ struct trace_array *tr = ctx_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+
+ if (!tracer_enabled)
+ return;
+
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1))
+ __trace_special(tr, data, arg1, arg2, arg3);
+
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+}
+
static void sched_switch_reset(struct trace_array *tr)
{
int cpu;