/* Long is fine, even if it is only 4 bytes ;-) */
static long *ftrace_nop;
-struct ftrace_record {
- struct dyn_ftrace rec;
- int failed;
-} __attribute__((packed));
-
-struct ftrace_page {
- struct ftrace_page *next;
- int index;
- struct ftrace_record records[];
-} __attribute__((packed));
-
-#define ENTRIES_PER_PAGE \
- ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct ftrace_record))
-
-/* estimate from running different kernels */
-#define NR_TO_INIT 10000
-
-#define MCOUNT_ADDR ((long)(&mcount))
-
union ftrace_code_union {
char code[5];
struct {
} __attribute__((packed));
};
-static struct ftrace_page *ftrace_pages_start;
-static struct ftrace_page *ftrace_pages;
-
-notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip)
+notrace int ftrace_ip_converted(unsigned long ip)
{
- struct ftrace_record *rec;
unsigned long save;
ip -= CALL_BACK;
save = *(long *)ip;
- /* If this was already converted, skip it */
- if (save == *ftrace_nop)
- return NULL;
+ return save == *ftrace_nop;
+}
- if (ftrace_pages->index == ENTRIES_PER_PAGE) {
- if (!ftrace_pages->next)
- return NULL;
- ftrace_pages = ftrace_pages->next;
- }
+static int notrace ftrace_calc_offset(long ip, long addr)
+{
+ return (int)(addr - ip);
+}
- rec = &ftrace_pages->records[ftrace_pages->index++];
+notrace unsigned char *ftrace_nop_replace(void)
+{
+ return (char *)ftrace_nop;
+}
+
+notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+{
+ static union ftrace_code_union calc;
- return &rec->rec;
+ calc.e8 = 0xe8;
+ calc.offset = ftrace_calc_offset(ip, addr);
+
+ /*
+ * No locking needed, this must be called via kstop_machine
+ * which in essence is like running on a uniprocessor machine.
+ */
+ return calc.code;
}
-static int notrace
+notrace int
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
{
unsigned char newch = new_code[4];
int faulted = 0;
+ /* move the IP back to the start of the call */
+ ip -= CALL_BACK;
+
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
return faulted;
}
-static int notrace ftrace_calc_offset(long ip)
-{
- return (int)(MCOUNT_ADDR - ip);
-}
-
-notrace void ftrace_code_disable(struct dyn_ftrace *rec)
-{
- unsigned long ip;
- union ftrace_code_union save;
- struct ftrace_record *r =
- container_of(rec, struct ftrace_record, rec);
-
- ip = rec->ip;
-
- save.e8 = 0xe8;
- save.offset = ftrace_calc_offset(ip);
-
- /* move the IP back to the start of the call */
- ip -= CALL_BACK;
-
- r->failed = ftrace_modify_code(ip, save.code, (char *)ftrace_nop);
-}
-
-static void notrace ftrace_replace_code(int saved)
-{
- unsigned char *new = NULL, *old = NULL;
- struct ftrace_record *rec;
- struct ftrace_page *pg;
- unsigned long ip;
- int i;
-
- if (saved)
- old = (char *)ftrace_nop;
- else
- new = (char *)ftrace_nop;
-
- for (pg = ftrace_pages_start; pg; pg = pg->next) {
- for (i = 0; i < pg->index; i++) {
- union ftrace_code_union calc;
- rec = &pg->records[i];
-
- /* don't modify code that has already faulted */
- if (rec->failed)
- continue;
-
- ip = rec->rec.ip;
-
- calc.e8 = 0xe8;
- calc.offset = ftrace_calc_offset(ip);
-
- if (saved)
- new = calc.code;
- else
- old = calc.code;
-
- ip -= CALL_BACK;
-
- rec->failed = ftrace_modify_code(ip, old, new);
- }
- }
-
-}
-
-notrace void ftrace_startup_code(void)
-{
- ftrace_replace_code(1);
-}
-
-notrace void ftrace_shutdown_code(void)
-{
- ftrace_replace_code(0);
-}
-
-notrace void ftrace_shutdown_replenish(void)
-{
- if (ftrace_pages->next)
- return;
-
- /* allocate another page */
- ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
-}
-
-notrace int __init ftrace_shutdown_arch_init(void)
+int __init ftrace_dyn_arch_init(void)
{
const unsigned char *const *noptable = find_nop_table();
- struct ftrace_page *pg;
- int cnt;
- int i;
ftrace_nop = (unsigned long *)noptable[CALL_BACK];
- /* allocate a few pages */
- ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
- if (!ftrace_pages_start)
- return -1;
-
- /*
- * Allocate a few more pages.
- *
- * TODO: have some parser search vmlinux before
- * final linking to find all calls to ftrace.
- * Then we can:
- * a) know how many pages to allocate.
- * and/or
- * b) set up the table then.
- *
- * The dynamic code is still necessary for
- * modules.
- */
-
- pg = ftrace_pages = ftrace_pages_start;
-
- cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
-
- for (i = 0; i < cnt; i++) {
- pg->next = (void *)get_zeroed_page(GFP_KERNEL);
-
- /* If we fail, we'll try later anyway */
- if (!pg->next)
- break;
-
- pg = pg->next;
- }
-
return 0;
}
+
# define FTRACE_HASHBITS 10
# define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS)
+enum {
+ FTRACE_FL_FAILED = (1<<0),
+};
+
struct dyn_ftrace {
struct hlist_node node;
unsigned long ip;
+ unsigned long flags;
};
/* defined in arch */
-extern struct dyn_ftrace *
-ftrace_alloc_shutdown_node(unsigned long ip);
-extern int ftrace_shutdown_arch_init(void);
-extern void ftrace_code_disable(struct dyn_ftrace *rec);
-extern void ftrace_startup_code(void);
-extern void ftrace_shutdown_code(void);
-extern void ftrace_shutdown_replenish(void);
+extern int ftrace_ip_converted(unsigned long ip);
+extern unsigned char *ftrace_nop_replace(void);
+extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
+extern int ftrace_dyn_arch_init(void);
+extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
+ unsigned char *new_code);
#endif
#ifdef CONFIG_FRAME_POINTER
static DEFINE_SPINLOCK(ftrace_shutdown_lock);
static DEFINE_MUTEX(ftraced_lock);
+struct ftrace_page {
+ struct ftrace_page *next;
+ int index;
+ struct dyn_ftrace records[];
+} __attribute__((packed));
+
+#define ENTRIES_PER_PAGE \
+ ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
+
+/* estimate from running different kernels */
+#define NR_TO_INIT 10000
+
+static struct ftrace_page *ftrace_pages_start;
+static struct ftrace_page *ftrace_pages;
+
static int ftraced_trigger;
static int ftraced_suspend;
hlist_add_head(&node->node, &ftrace_hash[key]);
}
+static notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip)
+{
+ /* If this was already converted, skip it */
+ if (ftrace_ip_converted(ip))
+ return NULL;
+
+ if (ftrace_pages->index == ENTRIES_PER_PAGE) {
+ if (!ftrace_pages->next)
+ return NULL;
+ ftrace_pages = ftrace_pages->next;
+ }
+
+ return &ftrace_pages->records[ftrace_pages->index++];
+}
+
static void notrace
ftrace_record_ip(unsigned long ip, unsigned long parent_ip)
{
.func = ftrace_record_ip,
};
+#define MCOUNT_ADDR ((long)(&mcount))
+
+static void notrace ftrace_replace_code(int saved)
+{
+ unsigned char *new = NULL, *old = NULL;
+ struct dyn_ftrace *rec;
+ struct ftrace_page *pg;
+ unsigned long ip;
+ int failed;
+ int i;
+
+ if (saved)
+ old = ftrace_nop_replace();
+ else
+ new = ftrace_nop_replace();
+
+ for (pg = ftrace_pages_start; pg; pg = pg->next) {
+ for (i = 0; i < pg->index; i++) {
+ rec = &pg->records[i];
+
+ /* don't modify code that has already faulted */
+ if (rec->flags & FTRACE_FL_FAILED)
+ continue;
+
+ ip = rec->ip;
+
+ if (saved)
+ new = ftrace_call_replace(ip, MCOUNT_ADDR);
+ else
+ old = ftrace_call_replace(ip, MCOUNT_ADDR);
+
+ failed = ftrace_modify_code(ip, old, new);
+ if (failed)
+ rec->flags |= FTRACE_FL_FAILED;
+ }
+ }
+}
+
+static notrace void ftrace_startup_code(void)
+{
+ ftrace_replace_code(1);
+}
+
+static notrace void ftrace_shutdown_code(void)
+{
+ ftrace_replace_code(0);
+}
+
+static notrace void ftrace_shutdown_replenish(void)
+{
+ if (ftrace_pages->next)
+ return;
+
+ /* allocate another page */
+ ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
+}
static int notrace __ftrace_modify_code(void *data)
{
return 0;
}
+static notrace void
+ftrace_code_disable(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long ip;
+ unsigned char *nop, *call;
+ int failed;
+
+ ip = rec->ip;
+
+ nop = ftrace_nop_replace();
+ call = ftrace_call_replace(ip, addr);
+
+ failed = ftrace_modify_code(ip, call, nop);
+ if (failed)
+ rec->flags |= FTRACE_FL_FAILED;
+}
+
static void notrace ftrace_run_startup_code(void)
{
stop_machine_run(__ftrace_modify_code, ftrace_startup_code, NR_CPUS);
/* all CPUS are stopped, we are safe to modify code */
hlist_for_each_entry(p, t, &head, node) {
- ftrace_code_disable(p);
+ ftrace_code_disable(p, MCOUNT_ADDR);
ftrace_update_cnt++;
}
return 0;
}
+static int __init ftrace_dyn_table_alloc(void)
+{
+ struct ftrace_page *pg;
+ int cnt;
+ int i;
+ int ret;
+
+ ret = ftrace_dyn_arch_init();
+ if (ret)
+ return ret;
+
+ /* allocate a few pages */
+ ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!ftrace_pages_start)
+ return -1;
+
+ /*
+ * Allocate a few more pages.
+ *
+ * TODO: have some parser search vmlinux before
+ * final linking to find all calls to ftrace.
+ * Then we can:
+ * a) know how many pages to allocate.
+ * and/or
+ * b) set up the table then.
+ *
+ * The dynamic code is still necessary for
+ * modules.
+ */
+
+ pg = ftrace_pages = ftrace_pages_start;
+
+ cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
+
+ for (i = 0; i < cnt; i++) {
+ pg->next = (void *)get_zeroed_page(GFP_KERNEL);
+
+ /* If we fail, we'll try later anyway */
+ if (!pg->next)
+ break;
+
+ pg = pg->next;
+ }
+
+ return 0;
+}
+
static int __init notrace ftrace_shutdown_init(void)
{
struct task_struct *p;
int ret;
- ret = ftrace_shutdown_arch_init();
+ ret = ftrace_dyn_table_alloc();
if (ret)
return ret;