Merge tag 'md/4.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / kernel / kprobes.c
index 699c5bc51a9219d664578e4d7978dee94d291c82..7367e0ec6f8188973a0ee63539cd9a8485e47585 100644 (file)
 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
 
 
-/*
- * Some oddball architectures like 64bit powerpc have function descriptors
- * so this must be overridable.
- */
-#ifndef kprobe_lookup_name
-#define kprobe_lookup_name(name, addr) \
-       addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
-#endif
-
 static int kprobes_initialized;
 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
@@ -81,6 +72,12 @@ static struct {
        raw_spinlock_t lock ____cacheline_aligned_in_smp;
 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
 
+kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
+                                       unsigned int __unused)
+{
+       return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
+}
+
 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
 {
        return &(kretprobe_table_locks[hash].lock);
@@ -746,13 +743,20 @@ static void kill_optimized_kprobe(struct kprobe *p)
        arch_remove_optimized_kprobe(op);
 }
 
+static inline
+void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
+{
+       if (!kprobe_ftrace(p))
+               arch_prepare_optimized_kprobe(op, p);
+}
+
 /* Try to prepare optimized instructions */
 static void prepare_optimized_kprobe(struct kprobe *p)
 {
        struct optimized_kprobe *op;
 
        op = container_of(p, struct optimized_kprobe, kp);
-       arch_prepare_optimized_kprobe(op, p);
+       __prepare_optimized_kprobe(op, p);
 }
 
 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
@@ -766,7 +770,7 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 
        INIT_LIST_HEAD(&op->list);
        op->kp.addr = p->addr;
-       arch_prepare_optimized_kprobe(op, p);
+       __prepare_optimized_kprobe(op, p);
 
        return &op->kp;
 }
@@ -1391,21 +1395,19 @@ bool within_kprobe_blacklist(unsigned long addr)
  * This returns encoded errors if it fails to look up symbol or invalid
  * combination of parameters.
  */
-static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
+static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
+                       const char *symbol_name, unsigned int offset)
 {
-       kprobe_opcode_t *addr = p->addr;
-
-       if ((p->symbol_name && p->addr) ||
-           (!p->symbol_name && !p->addr))
+       if ((symbol_name && addr) || (!symbol_name && !addr))
                goto invalid;
 
-       if (p->symbol_name) {
-               kprobe_lookup_name(p->symbol_name, addr);
+       if (symbol_name) {
+               addr = kprobe_lookup_name(symbol_name, offset);
                if (!addr)
                        return ERR_PTR(-ENOENT);
        }
 
-       addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
+       addr = (kprobe_opcode_t *)(((char *)addr) + offset);
        if (addr)
                return addr;
 
@@ -1413,6 +1415,11 @@ invalid:
        return ERR_PTR(-EINVAL);
 }
 
+static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
+{
+       return _kprobe_addr(p->addr, p->symbol_name, p->offset);
+}
+
 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
 static struct kprobe *__get_valid_kprobe(struct kprobe *p)
 {
@@ -1740,11 +1747,12 @@ void unregister_kprobes(struct kprobe **kps, int num)
 }
 EXPORT_SYMBOL_GPL(unregister_kprobes);
 
-int __weak __kprobes kprobe_exceptions_notify(struct notifier_block *self,
-                                             unsigned long val, void *data)
+int __weak kprobe_exceptions_notify(struct notifier_block *self,
+                                       unsigned long val, void *data)
 {
        return NOTIFY_DONE;
 }
+NOKPROBE_SYMBOL(kprobe_exceptions_notify);
 
 static struct notifier_block kprobe_exceptions_nb = {
        .notifier_call = kprobe_exceptions_notify,
@@ -1875,6 +1883,25 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
 }
 NOKPROBE_SYMBOL(pre_handler_kretprobe);
 
+bool __weak arch_function_offset_within_entry(unsigned long offset)
+{
+       return !offset;
+}
+
+bool function_offset_within_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
+{
+       kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
+
+       if (IS_ERR(kp_addr))
+               return false;
+
+       if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
+                                               !arch_function_offset_within_entry(offset))
+               return false;
+
+       return true;
+}
+
 int register_kretprobe(struct kretprobe *rp)
 {
        int ret = 0;
@@ -1882,6 +1909,9 @@ int register_kretprobe(struct kretprobe *rp)
        int i;
        void *addr;
 
+       if (!function_offset_within_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
+               return -EINVAL;
+
        if (kretprobe_blacklist_size) {
                addr = kprobe_addr(&rp->kp);
                if (IS_ERR(addr))
@@ -2192,8 +2222,8 @@ static int __init init_kprobes(void)
        if (kretprobe_blacklist_size) {
                /* lookup the function address from its name */
                for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
-                       kprobe_lookup_name(kretprobe_blacklist[i].name,
-                                          kretprobe_blacklist[i].addr);
+                       kretprobe_blacklist[i].addr =
+                               kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
                        if (!kretprobe_blacklist[i].addr)
                                printk("kretprobe: lookup failed: %s\n",
                                       kretprobe_blacklist[i].name);