return !trace_seq_has_overflowed(s);
}
-int
-seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
- unsigned long sym_flags)
-{
- struct mm_struct *mm = NULL;
- unsigned int i;
-
- if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
- struct task_struct *task;
- /*
- * we do the lookup on the thread group leader,
- * since individual threads might have already quit!
- */
- rcu_read_lock();
- task = find_task_by_vpid(entry->tgid);
- if (task)
- mm = get_task_mm(task);
- rcu_read_unlock();
- }
-
- for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
- unsigned long ip = entry->caller[i];
-
- if (ip == ULONG_MAX || trace_seq_has_overflowed(s))
- break;
-
- trace_seq_puts(s, " => ");
-
- if (!ip) {
- trace_seq_puts(s, "??");
- trace_seq_putc(s, '\n');
- continue;
- }
-
- seq_print_user_ip(s, mm, ip, sym_flags);
- trace_seq_putc(s, '\n');
- }
-
- if (mm)
- mmput(mm);
-
- return !trace_seq_has_overflowed(s);
-}
-
int
seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
{
{
struct userstack_entry *field;
struct trace_seq *s = &iter->seq;
+ struct mm_struct *mm = NULL;
+ unsigned int i;
trace_assign_type(field, iter->ent);
trace_seq_puts(s, "<user stack trace>\n");
- seq_print_userip_objs(field, s, flags);
+
+ if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
+ struct task_struct *task;
+ /*
+ * we do the lookup on the thread group leader,
+ * since individual threads might have already quit!
+ */
+ rcu_read_lock();
+ task = find_task_by_vpid(field->tgid);
+ if (task)
+ mm = get_task_mm(task);
+ rcu_read_unlock();
+ }
+
+ for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+ unsigned long ip = field->caller[i];
+
+ if (ip == ULONG_MAX || trace_seq_has_overflowed(s))
+ break;
+
+ trace_seq_puts(s, " => ");
+
+ if (!ip) {
+ trace_seq_puts(s, "??");
+ trace_seq_putc(s, '\n');
+ continue;
+ }
+
+ seq_print_user_ip(s, mm, ip, flags);
+ trace_seq_putc(s, '\n');
+ }
+
+ if (mm)
+ mmput(mm);
return trace_handle_return(s);
}