Merge tag 'v3.10.77' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / module.c
1 /*
2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/ftrace_event.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/sysfs.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/elf.h>
31 #include <linux/proc_fs.h>
32 #include <linux/security.h>
33 #include <linux/seq_file.h>
34 #include <linux/syscalls.h>
35 #include <linux/fcntl.h>
36 #include <linux/rcupdate.h>
37 #include <linux/capability.h>
38 #include <linux/cpu.h>
39 #include <linux/moduleparam.h>
40 #include <linux/errno.h>
41 #include <linux/err.h>
42 #include <linux/vermagic.h>
43 #include <linux/notifier.h>
44 #include <linux/sched.h>
45 #include <linux/stop_machine.h>
46 #include <linux/device.h>
47 #include <linux/string.h>
48 #include <linux/mutex.h>
49 #include <linux/rculist.h>
50 #include <asm/uaccess.h>
51 #include <asm/cacheflush.h>
52 #include <asm/mmu_context.h>
53 #include <linux/license.h>
54 #include <asm/sections.h>
55 #include <linux/tracepoint.h>
56 #include <linux/ftrace.h>
57 #include <linux/async.h>
58 #include <linux/percpu.h>
59 #include <linux/kmemleak.h>
60 #include <linux/jump_label.h>
61 #include <linux/pfn.h>
62 #include <linux/bsearch.h>
63 #include <linux/fips.h>
64 #include <uapi/linux/module.h>
65 #include "module-internal.h"
66
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/module.h>
69
70 #ifndef ARCH_SHF_SMALL
71 #define ARCH_SHF_SMALL 0
72 #endif
73
74 /*
75 * Modules' sections will be aligned on page boundaries
76 * to ensure complete separation of code and data, but
77 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
78 */
79 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
80 # define debug_align(X) ALIGN(X, PAGE_SIZE)
81 #else
82 # define debug_align(X) (X)
83 #endif
84
85 /*
86 * Given BASE and SIZE this macro calculates the number of pages the
87 * memory regions occupies
88 */
89 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
90 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
91 PFN_DOWN((unsigned long)BASE) + 1) \
92 : (0UL))
93
94 /* If this is set, the section belongs in the init part of the module */
95 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
96
97 /*
98 * Mutex protects:
99 * 1) List of modules (also safely readable with preempt_disable),
100 * 2) module_use links,
101 * 3) module_addr_min/module_addr_max.
102 * (delete uses stop_machine/add uses RCU list operations). */
103 DEFINE_MUTEX(module_mutex);
104 EXPORT_SYMBOL_GPL(module_mutex);
105 static LIST_HEAD(modules);
106 #ifdef CONFIG_KGDB_KDB
107 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
108 #endif /* CONFIG_KGDB_KDB */
109
110 #ifdef CONFIG_MODULE_SIG
111 #ifdef CONFIG_MODULE_SIG_FORCE
112 static bool sig_enforce = true;
113 #else
114 static bool sig_enforce = false;
115
116 static int param_set_bool_enable_only(const char *val,
117 const struct kernel_param *kp)
118 {
119 int err;
120 bool test;
121 struct kernel_param dummy_kp = *kp;
122
123 dummy_kp.arg = &test;
124
125 err = param_set_bool(val, &dummy_kp);
126 if (err)
127 return err;
128
129 /* Don't let them unset it once it's set! */
130 if (!test && sig_enforce)
131 return -EROFS;
132
133 if (test)
134 sig_enforce = true;
135 return 0;
136 }
137
138 static const struct kernel_param_ops param_ops_bool_enable_only = {
139 .set = param_set_bool_enable_only,
140 .get = param_get_bool,
141 };
142 #define param_check_bool_enable_only param_check_bool
143
144 module_param(sig_enforce, bool_enable_only, 0644);
145 #endif /* !CONFIG_MODULE_SIG_FORCE */
146 #endif /* CONFIG_MODULE_SIG */
147
148 /* Block module loading/unloading? */
149 int modules_disabled = 0;
150 core_param(nomodule, modules_disabled, bint, 0);
151
152 /* Waiting for a module to finish initializing? */
153 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
154
155 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
156
157 /* Bounds of module allocation, for speeding __module_address.
158 * Protected by module_mutex. */
159 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
160
161 int register_module_notifier(struct notifier_block * nb)
162 {
163 return blocking_notifier_chain_register(&module_notify_list, nb);
164 }
165 EXPORT_SYMBOL(register_module_notifier);
166
167 int unregister_module_notifier(struct notifier_block * nb)
168 {
169 return blocking_notifier_chain_unregister(&module_notify_list, nb);
170 }
171 EXPORT_SYMBOL(unregister_module_notifier);
172
173 struct load_info {
174 Elf_Ehdr *hdr;
175 unsigned long len;
176 Elf_Shdr *sechdrs;
177 char *secstrings, *strtab;
178 unsigned long symoffs, stroffs;
179 struct _ddebug *debug;
180 unsigned int num_debug;
181 bool sig_ok;
182 struct {
183 unsigned int sym, str, mod, vers, info, pcpu;
184 } index;
185 };
186
187 /* We require a truly strong try_module_get(): 0 means failure due to
188 ongoing or failed initialization etc. */
189 static inline int strong_try_module_get(struct module *mod)
190 {
191 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
192 if (mod && mod->state == MODULE_STATE_COMING)
193 return -EBUSY;
194 if (try_module_get(mod))
195 return 0;
196 else
197 return -ENOENT;
198 }
199
200 static inline void add_taint_module(struct module *mod, unsigned flag,
201 enum lockdep_ok lockdep_ok)
202 {
203 add_taint(flag, lockdep_ok);
204 mod->taints |= (1U << flag);
205 }
206
207 /*
208 * A thread that wants to hold a reference to a module only while it
209 * is running can call this to safely exit. nfsd and lockd use this.
210 */
211 void __module_put_and_exit(struct module *mod, long code)
212 {
213 module_put(mod);
214 do_exit(code);
215 }
216 EXPORT_SYMBOL(__module_put_and_exit);
217
218 /* Find a module section: 0 means not found. */
219 static unsigned int find_sec(const struct load_info *info, const char *name)
220 {
221 unsigned int i;
222
223 for (i = 1; i < info->hdr->e_shnum; i++) {
224 Elf_Shdr *shdr = &info->sechdrs[i];
225 /* Alloc bit cleared means "ignore it." */
226 if ((shdr->sh_flags & SHF_ALLOC)
227 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
228 return i;
229 }
230 return 0;
231 }
232
233 /* Find a module section, or NULL. */
234 static void *section_addr(const struct load_info *info, const char *name)
235 {
236 /* Section 0 has sh_addr 0. */
237 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
238 }
239
240 /* Find a module section, or NULL. Fill in number of "objects" in section. */
241 static void *section_objs(const struct load_info *info,
242 const char *name,
243 size_t object_size,
244 unsigned int *num)
245 {
246 unsigned int sec = find_sec(info, name);
247
248 /* Section 0 has sh_addr 0 and sh_size 0. */
249 *num = info->sechdrs[sec].sh_size / object_size;
250 return (void *)info->sechdrs[sec].sh_addr;
251 }
252
253 /* Provided by the linker */
254 extern const struct kernel_symbol __start___ksymtab[];
255 extern const struct kernel_symbol __stop___ksymtab[];
256 extern const struct kernel_symbol __start___ksymtab_gpl[];
257 extern const struct kernel_symbol __stop___ksymtab_gpl[];
258 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
259 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
260 extern const unsigned long __start___kcrctab[];
261 extern const unsigned long __start___kcrctab_gpl[];
262 extern const unsigned long __start___kcrctab_gpl_future[];
263 #ifdef CONFIG_UNUSED_SYMBOLS
264 extern const struct kernel_symbol __start___ksymtab_unused[];
265 extern const struct kernel_symbol __stop___ksymtab_unused[];
266 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
267 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
268 extern const unsigned long __start___kcrctab_unused[];
269 extern const unsigned long __start___kcrctab_unused_gpl[];
270 #endif
271
272 #ifndef CONFIG_MODVERSIONS
273 #define symversion(base, idx) NULL
274 #else
275 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
276 #endif
277
278 static bool each_symbol_in_section(const struct symsearch *arr,
279 unsigned int arrsize,
280 struct module *owner,
281 bool (*fn)(const struct symsearch *syms,
282 struct module *owner,
283 void *data),
284 void *data)
285 {
286 unsigned int j;
287
288 for (j = 0; j < arrsize; j++) {
289 if (fn(&arr[j], owner, data))
290 return true;
291 }
292
293 return false;
294 }
295
296 /* Returns true as soon as fn returns true, otherwise false. */
297 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
298 struct module *owner,
299 void *data),
300 void *data)
301 {
302 struct module *mod;
303 static const struct symsearch arr[] = {
304 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
305 NOT_GPL_ONLY, false },
306 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
307 __start___kcrctab_gpl,
308 GPL_ONLY, false },
309 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
310 __start___kcrctab_gpl_future,
311 WILL_BE_GPL_ONLY, false },
312 #ifdef CONFIG_UNUSED_SYMBOLS
313 { __start___ksymtab_unused, __stop___ksymtab_unused,
314 __start___kcrctab_unused,
315 NOT_GPL_ONLY, true },
316 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
317 __start___kcrctab_unused_gpl,
318 GPL_ONLY, true },
319 #endif
320 };
321
322 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
323 return true;
324
325 list_for_each_entry_rcu(mod, &modules, list) {
326 struct symsearch arr[] = {
327 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
328 NOT_GPL_ONLY, false },
329 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
330 mod->gpl_crcs,
331 GPL_ONLY, false },
332 { mod->gpl_future_syms,
333 mod->gpl_future_syms + mod->num_gpl_future_syms,
334 mod->gpl_future_crcs,
335 WILL_BE_GPL_ONLY, false },
336 #ifdef CONFIG_UNUSED_SYMBOLS
337 { mod->unused_syms,
338 mod->unused_syms + mod->num_unused_syms,
339 mod->unused_crcs,
340 NOT_GPL_ONLY, true },
341 { mod->unused_gpl_syms,
342 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
343 mod->unused_gpl_crcs,
344 GPL_ONLY, true },
345 #endif
346 };
347
348 if (mod->state == MODULE_STATE_UNFORMED)
349 continue;
350
351 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
352 return true;
353 }
354 return false;
355 }
356 EXPORT_SYMBOL_GPL(each_symbol_section);
357
358 struct find_symbol_arg {
359 /* Input */
360 const char *name;
361 bool gplok;
362 bool warn;
363
364 /* Output */
365 struct module *owner;
366 const unsigned long *crc;
367 const struct kernel_symbol *sym;
368 };
369
370 static bool check_symbol(const struct symsearch *syms,
371 struct module *owner,
372 unsigned int symnum, void *data)
373 {
374 struct find_symbol_arg *fsa = data;
375
376 if (!fsa->gplok) {
377 if (syms->licence == GPL_ONLY)
378 return false;
379 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
380 printk(KERN_WARNING "Symbol %s is being used "
381 "by a non-GPL module, which will not "
382 "be allowed in the future\n", fsa->name);
383 }
384 }
385
386 #ifdef CONFIG_UNUSED_SYMBOLS
387 if (syms->unused && fsa->warn) {
388 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
389 "however this module is using it.\n", fsa->name);
390 printk(KERN_WARNING
391 "This symbol will go away in the future.\n");
392 printk(KERN_WARNING
393 "Please evalute if this is the right api to use and if "
394 "it really is, submit a report the linux kernel "
395 "mailinglist together with submitting your code for "
396 "inclusion.\n");
397 }
398 #endif
399
400 fsa->owner = owner;
401 fsa->crc = symversion(syms->crcs, symnum);
402 fsa->sym = &syms->start[symnum];
403 return true;
404 }
405
406 static int cmp_name(const void *va, const void *vb)
407 {
408 const char *a;
409 const struct kernel_symbol *b;
410 a = va; b = vb;
411 return strcmp(a, b->name);
412 }
413
414 static bool find_symbol_in_section(const struct symsearch *syms,
415 struct module *owner,
416 void *data)
417 {
418 struct find_symbol_arg *fsa = data;
419 struct kernel_symbol *sym;
420
421 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
422 sizeof(struct kernel_symbol), cmp_name);
423
424 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
425 return true;
426
427 return false;
428 }
429
430 /* Find a symbol and return it, along with, (optional) crc and
431 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
432 const struct kernel_symbol *find_symbol(const char *name,
433 struct module **owner,
434 const unsigned long **crc,
435 bool gplok,
436 bool warn)
437 {
438 struct find_symbol_arg fsa;
439
440 fsa.name = name;
441 fsa.gplok = gplok;
442 fsa.warn = warn;
443
444 if (each_symbol_section(find_symbol_in_section, &fsa)) {
445 if (owner)
446 *owner = fsa.owner;
447 if (crc)
448 *crc = fsa.crc;
449 return fsa.sym;
450 }
451
452 pr_debug("Failed to find symbol %s\n", name);
453 return NULL;
454 }
455 EXPORT_SYMBOL_GPL(find_symbol);
456
457 /* Search for module by name: must hold module_mutex. */
458 static struct module *find_module_all(const char *name,
459 bool even_unformed)
460 {
461 struct module *mod;
462
463 list_for_each_entry(mod, &modules, list) {
464 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
465 continue;
466 if (strcmp(mod->name, name) == 0)
467 return mod;
468 }
469 return NULL;
470 }
471
472 struct module *find_module(const char *name)
473 {
474 return find_module_all(name, false);
475 }
476 EXPORT_SYMBOL_GPL(find_module);
477
478 #ifdef CONFIG_SMP
479
480 static inline void __percpu *mod_percpu(struct module *mod)
481 {
482 return mod->percpu;
483 }
484
485 static int percpu_modalloc(struct module *mod,
486 unsigned long size, unsigned long align)
487 {
488 if (align > PAGE_SIZE) {
489 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
490 mod->name, align, PAGE_SIZE);
491 align = PAGE_SIZE;
492 }
493
494 mod->percpu = __alloc_reserved_percpu(size, align);
495 if (!mod->percpu) {
496 printk(KERN_WARNING
497 "%s: Could not allocate %lu bytes percpu data\n",
498 mod->name, size);
499 return -ENOMEM;
500 }
501 mod->percpu_size = size;
502 return 0;
503 }
504
505 static void percpu_modfree(struct module *mod)
506 {
507 free_percpu(mod->percpu);
508 }
509
510 static unsigned int find_pcpusec(struct load_info *info)
511 {
512 return find_sec(info, ".data..percpu");
513 }
514
515 static void percpu_modcopy(struct module *mod,
516 const void *from, unsigned long size)
517 {
518 int cpu;
519
520 for_each_possible_cpu(cpu)
521 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
522 }
523
524 /**
525 * is_module_percpu_address - test whether address is from module static percpu
526 * @addr: address to test
527 *
528 * Test whether @addr belongs to module static percpu area.
529 *
530 * RETURNS:
531 * %true if @addr is from module static percpu area
532 */
533 bool is_module_percpu_address(unsigned long addr)
534 {
535 struct module *mod;
536 unsigned int cpu;
537
538 preempt_disable();
539
540 list_for_each_entry_rcu(mod, &modules, list) {
541 if (mod->state == MODULE_STATE_UNFORMED)
542 continue;
543 if (!mod->percpu_size)
544 continue;
545 for_each_possible_cpu(cpu) {
546 void *start = per_cpu_ptr(mod->percpu, cpu);
547
548 if ((void *)addr >= start &&
549 (void *)addr < start + mod->percpu_size) {
550 preempt_enable();
551 return true;
552 }
553 }
554 }
555
556 preempt_enable();
557 return false;
558 }
559
560 #else /* ... !CONFIG_SMP */
561
562 static inline void __percpu *mod_percpu(struct module *mod)
563 {
564 return NULL;
565 }
566 static inline int percpu_modalloc(struct module *mod,
567 unsigned long size, unsigned long align)
568 {
569 return -ENOMEM;
570 }
571 static inline void percpu_modfree(struct module *mod)
572 {
573 }
574 static unsigned int find_pcpusec(struct load_info *info)
575 {
576 return 0;
577 }
578 static inline void percpu_modcopy(struct module *mod,
579 const void *from, unsigned long size)
580 {
581 /* pcpusec should be 0, and size of that section should be 0. */
582 BUG_ON(size != 0);
583 }
584 bool is_module_percpu_address(unsigned long addr)
585 {
586 return false;
587 }
588
589 #endif /* CONFIG_SMP */
590
591 #define MODINFO_ATTR(field) \
592 static void setup_modinfo_##field(struct module *mod, const char *s) \
593 { \
594 mod->field = kstrdup(s, GFP_KERNEL); \
595 } \
596 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
597 struct module_kobject *mk, char *buffer) \
598 { \
599 return sprintf(buffer, "%s\n", mk->mod->field); \
600 } \
601 static int modinfo_##field##_exists(struct module *mod) \
602 { \
603 return mod->field != NULL; \
604 } \
605 static void free_modinfo_##field(struct module *mod) \
606 { \
607 kfree(mod->field); \
608 mod->field = NULL; \
609 } \
610 static struct module_attribute modinfo_##field = { \
611 .attr = { .name = __stringify(field), .mode = 0444 }, \
612 .show = show_modinfo_##field, \
613 .setup = setup_modinfo_##field, \
614 .test = modinfo_##field##_exists, \
615 .free = free_modinfo_##field, \
616 };
617
618 MODINFO_ATTR(version);
619 MODINFO_ATTR(srcversion);
620
621 static char last_unloaded_module[MODULE_NAME_LEN+1];
622
623 #ifdef CONFIG_MODULE_UNLOAD
624
625 EXPORT_TRACEPOINT_SYMBOL(module_get);
626
627 /* Init the unload section of the module. */
628 static int module_unload_init(struct module *mod)
629 {
630 mod->refptr = alloc_percpu(struct module_ref);
631 if (!mod->refptr)
632 return -ENOMEM;
633
634 INIT_LIST_HEAD(&mod->source_list);
635 INIT_LIST_HEAD(&mod->target_list);
636
637 /* Hold reference count during initialization. */
638 __this_cpu_write(mod->refptr->incs, 1);
639 /* Backwards compatibility macros put refcount during init. */
640 mod->waiter = current;
641
642 return 0;
643 }
644
645 /* Does a already use b? */
646 static int already_uses(struct module *a, struct module *b)
647 {
648 struct module_use *use;
649
650 list_for_each_entry(use, &b->source_list, source_list) {
651 if (use->source == a) {
652 pr_debug("%s uses %s!\n", a->name, b->name);
653 return 1;
654 }
655 }
656 pr_debug("%s does not use %s!\n", a->name, b->name);
657 return 0;
658 }
659
660 /*
661 * Module a uses b
662 * - we add 'a' as a "source", 'b' as a "target" of module use
663 * - the module_use is added to the list of 'b' sources (so
664 * 'b' can walk the list to see who sourced them), and of 'a'
665 * targets (so 'a' can see what modules it targets).
666 */
667 static int add_module_usage(struct module *a, struct module *b)
668 {
669 struct module_use *use;
670
671 pr_debug("Allocating new usage for %s.\n", a->name);
672 use = kmalloc(sizeof(*use), GFP_ATOMIC);
673 if (!use) {
674 printk(KERN_WARNING "%s: out of memory loading\n", a->name);
675 return -ENOMEM;
676 }
677
678 use->source = a;
679 use->target = b;
680 list_add(&use->source_list, &b->source_list);
681 list_add(&use->target_list, &a->target_list);
682 return 0;
683 }
684
685 /* Module a uses b: caller needs module_mutex() */
686 int ref_module(struct module *a, struct module *b)
687 {
688 int err;
689
690 if (b == NULL || already_uses(a, b))
691 return 0;
692
693 /* If module isn't available, we fail. */
694 err = strong_try_module_get(b);
695 if (err)
696 return err;
697
698 err = add_module_usage(a, b);
699 if (err) {
700 module_put(b);
701 return err;
702 }
703 return 0;
704 }
705 EXPORT_SYMBOL_GPL(ref_module);
706
707 /* Clear the unload stuff of the module. */
708 static void module_unload_free(struct module *mod)
709 {
710 struct module_use *use, *tmp;
711
712 mutex_lock(&module_mutex);
713 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
714 struct module *i = use->target;
715 pr_debug("%s unusing %s\n", mod->name, i->name);
716 module_put(i);
717 list_del(&use->source_list);
718 list_del(&use->target_list);
719 kfree(use);
720 }
721 mutex_unlock(&module_mutex);
722
723 free_percpu(mod->refptr);
724 }
725
726 #ifdef CONFIG_MODULE_FORCE_UNLOAD
727 static inline int try_force_unload(unsigned int flags)
728 {
729 int ret = (flags & O_TRUNC);
730 if (ret)
731 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
732 return ret;
733 }
734 #else
735 static inline int try_force_unload(unsigned int flags)
736 {
737 return 0;
738 }
739 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
740
741 struct stopref
742 {
743 struct module *mod;
744 int flags;
745 int *forced;
746 };
747
748 /* Whole machine is stopped with interrupts off when this runs. */
749 static int __try_stop_module(void *_sref)
750 {
751 struct stopref *sref = _sref;
752
753 /* If it's not unused, quit unless we're forcing. */
754 if (module_refcount(sref->mod) != 0) {
755 if (!(*sref->forced = try_force_unload(sref->flags)))
756 return -EWOULDBLOCK;
757 }
758
759 /* Mark it as dying. */
760 sref->mod->state = MODULE_STATE_GOING;
761 return 0;
762 }
763
764 static int try_stop_module(struct module *mod, int flags, int *forced)
765 {
766 if (flags & O_NONBLOCK) {
767 struct stopref sref = { mod, flags, forced };
768
769 return stop_machine(__try_stop_module, &sref, NULL);
770 } else {
771 /* We don't need to stop the machine for this. */
772 mod->state = MODULE_STATE_GOING;
773 synchronize_sched();
774 return 0;
775 }
776 }
777
778 unsigned long module_refcount(struct module *mod)
779 {
780 unsigned long incs = 0, decs = 0;
781 int cpu;
782
783 for_each_possible_cpu(cpu)
784 decs += per_cpu_ptr(mod->refptr, cpu)->decs;
785 /*
786 * ensure the incs are added up after the decs.
787 * module_put ensures incs are visible before decs with smp_wmb.
788 *
789 * This 2-count scheme avoids the situation where the refcount
790 * for CPU0 is read, then CPU0 increments the module refcount,
791 * then CPU1 drops that refcount, then the refcount for CPU1 is
792 * read. We would record a decrement but not its corresponding
793 * increment so we would see a low count (disaster).
794 *
795 * Rare situation? But module_refcount can be preempted, and we
796 * might be tallying up 4096+ CPUs. So it is not impossible.
797 */
798 smp_rmb();
799 for_each_possible_cpu(cpu)
800 incs += per_cpu_ptr(mod->refptr, cpu)->incs;
801 return incs - decs;
802 }
803 EXPORT_SYMBOL(module_refcount);
804
805 /* This exists whether we can unload or not */
806 static void free_module(struct module *mod);
807
808 static void wait_for_zero_refcount(struct module *mod)
809 {
810 /* Since we might sleep for some time, release the mutex first */
811 mutex_unlock(&module_mutex);
812 for (;;) {
813 pr_debug("Looking at refcount...\n");
814 set_current_state(TASK_UNINTERRUPTIBLE);
815 if (module_refcount(mod) == 0)
816 break;
817 schedule();
818 }
819 current->state = TASK_RUNNING;
820 mutex_lock(&module_mutex);
821 }
822
823 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
824 unsigned int, flags)
825 {
826 struct module *mod;
827 char name[MODULE_NAME_LEN];
828 int ret, forced = 0;
829
830 if (!capable(CAP_SYS_MODULE) || modules_disabled)
831 return -EPERM;
832
833 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
834 return -EFAULT;
835 name[MODULE_NAME_LEN-1] = '\0';
836
837 if (mutex_lock_interruptible(&module_mutex) != 0)
838 return -EINTR;
839
840 mod = find_module(name);
841 if (!mod) {
842 ret = -ENOENT;
843 goto out;
844 }
845
846 if (!list_empty(&mod->source_list)) {
847 /* Other modules depend on us: get rid of them first. */
848 ret = -EWOULDBLOCK;
849 goto out;
850 }
851
852 /* Doing init or already dying? */
853 if (mod->state != MODULE_STATE_LIVE) {
854 /* FIXME: if (force), slam module count and wake up
855 waiter --RR */
856 pr_debug("%s already dying\n", mod->name);
857 ret = -EBUSY;
858 goto out;
859 }
860
861 /* If it has an init func, it must have an exit func to unload */
862 if (mod->init && !mod->exit) {
863 forced = try_force_unload(flags);
864 if (!forced) {
865 /* This module can't be removed */
866 ret = -EBUSY;
867 goto out;
868 }
869 }
870
871 /* Set this up before setting mod->state */
872 mod->waiter = current;
873
874 /* Stop the machine so refcounts can't move and disable module. */
875 ret = try_stop_module(mod, flags, &forced);
876 if (ret != 0)
877 goto out;
878
879 /* Never wait if forced. */
880 if (!forced && module_refcount(mod) != 0)
881 wait_for_zero_refcount(mod);
882
883 mutex_unlock(&module_mutex);
884 /* Final destruction now no one is using it. */
885 if (mod->exit != NULL)
886 mod->exit();
887 blocking_notifier_call_chain(&module_notify_list,
888 MODULE_STATE_GOING, mod);
889 async_synchronize_full();
890
891 /* Store the name of the last unloaded module for diagnostic purposes */
892 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
893
894 free_module(mod);
895 return 0;
896 out:
897 mutex_unlock(&module_mutex);
898 return ret;
899 }
900
901 static inline void print_unload_info(struct seq_file *m, struct module *mod)
902 {
903 struct module_use *use;
904 int printed_something = 0;
905
906 seq_printf(m, " %lu ", module_refcount(mod));
907
908 /* Always include a trailing , so userspace can differentiate
909 between this and the old multi-field proc format. */
910 list_for_each_entry(use, &mod->source_list, source_list) {
911 printed_something = 1;
912 seq_printf(m, "%s,", use->source->name);
913 }
914
915 if (mod->init != NULL && mod->exit == NULL) {
916 printed_something = 1;
917 seq_printf(m, "[permanent],");
918 }
919
920 if (!printed_something)
921 seq_printf(m, "-");
922 }
923
924 void __symbol_put(const char *symbol)
925 {
926 struct module *owner;
927
928 preempt_disable();
929 if (!find_symbol(symbol, &owner, NULL, true, false))
930 BUG();
931 module_put(owner);
932 preempt_enable();
933 }
934 EXPORT_SYMBOL(__symbol_put);
935
936 /* Note this assumes addr is a function, which it currently always is. */
937 void symbol_put_addr(void *addr)
938 {
939 struct module *modaddr;
940 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
941
942 if (core_kernel_text(a))
943 return;
944
945 /* module_text_address is safe here: we're supposed to have reference
946 * to module from symbol_get, so it can't go away. */
947 modaddr = __module_text_address(a);
948 BUG_ON(!modaddr);
949 module_put(modaddr);
950 }
951 EXPORT_SYMBOL_GPL(symbol_put_addr);
952
953 static ssize_t show_refcnt(struct module_attribute *mattr,
954 struct module_kobject *mk, char *buffer)
955 {
956 return sprintf(buffer, "%lu\n", module_refcount(mk->mod));
957 }
958
959 static struct module_attribute modinfo_refcnt =
960 __ATTR(refcnt, 0444, show_refcnt, NULL);
961
962 void __module_get(struct module *module)
963 {
964 if (module) {
965 preempt_disable();
966 __this_cpu_inc(module->refptr->incs);
967 trace_module_get(module, _RET_IP_);
968 preempt_enable();
969 }
970 }
971 EXPORT_SYMBOL(__module_get);
972
973 bool try_module_get(struct module *module)
974 {
975 bool ret = true;
976
977 if (module) {
978 preempt_disable();
979
980 if (likely(module_is_live(module))) {
981 __this_cpu_inc(module->refptr->incs);
982 trace_module_get(module, _RET_IP_);
983 } else
984 ret = false;
985
986 preempt_enable();
987 }
988 return ret;
989 }
990 EXPORT_SYMBOL(try_module_get);
991
992 void module_put(struct module *module)
993 {
994 if (module) {
995 preempt_disable();
996 smp_wmb(); /* see comment in module_refcount */
997 __this_cpu_inc(module->refptr->decs);
998
999 trace_module_put(module, _RET_IP_);
1000 /* Maybe they're waiting for us to drop reference? */
1001 if (unlikely(!module_is_live(module)))
1002 wake_up_process(module->waiter);
1003 preempt_enable();
1004 }
1005 }
1006 EXPORT_SYMBOL(module_put);
1007
1008 #else /* !CONFIG_MODULE_UNLOAD */
1009 static inline void print_unload_info(struct seq_file *m, struct module *mod)
1010 {
1011 /* We don't know the usage count, or what modules are using. */
1012 seq_printf(m, " - -");
1013 }
1014
1015 static inline void module_unload_free(struct module *mod)
1016 {
1017 }
1018
1019 int ref_module(struct module *a, struct module *b)
1020 {
1021 return strong_try_module_get(b);
1022 }
1023 EXPORT_SYMBOL_GPL(ref_module);
1024
1025 static inline int module_unload_init(struct module *mod)
1026 {
1027 return 0;
1028 }
1029 #endif /* CONFIG_MODULE_UNLOAD */
1030
1031 static size_t module_flags_taint(struct module *mod, char *buf)
1032 {
1033 size_t l = 0;
1034
1035 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
1036 buf[l++] = 'P';
1037 if (mod->taints & (1 << TAINT_OOT_MODULE))
1038 buf[l++] = 'O';
1039 if (mod->taints & (1 << TAINT_FORCED_MODULE))
1040 buf[l++] = 'F';
1041 if (mod->taints & (1 << TAINT_CRAP))
1042 buf[l++] = 'C';
1043 /*
1044 * TAINT_FORCED_RMMOD: could be added.
1045 * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1046 * apply to modules.
1047 */
1048 return l;
1049 }
1050
1051 static ssize_t show_initstate(struct module_attribute *mattr,
1052 struct module_kobject *mk, char *buffer)
1053 {
1054 const char *state = "unknown";
1055
1056 switch (mk->mod->state) {
1057 case MODULE_STATE_LIVE:
1058 state = "live";
1059 break;
1060 case MODULE_STATE_COMING:
1061 state = "coming";
1062 break;
1063 case MODULE_STATE_GOING:
1064 state = "going";
1065 break;
1066 default:
1067 BUG();
1068 }
1069 return sprintf(buffer, "%s\n", state);
1070 }
1071
1072 static struct module_attribute modinfo_initstate =
1073 __ATTR(initstate, 0444, show_initstate, NULL);
1074
1075 static ssize_t store_uevent(struct module_attribute *mattr,
1076 struct module_kobject *mk,
1077 const char *buffer, size_t count)
1078 {
1079 enum kobject_action action;
1080
1081 if (kobject_action_type(buffer, count, &action) == 0)
1082 kobject_uevent(&mk->kobj, action);
1083 return count;
1084 }
1085
1086 struct module_attribute module_uevent =
1087 __ATTR(uevent, 0200, NULL, store_uevent);
1088
1089 static ssize_t show_coresize(struct module_attribute *mattr,
1090 struct module_kobject *mk, char *buffer)
1091 {
1092 return sprintf(buffer, "%u\n", mk->mod->core_size);
1093 }
1094
1095 static struct module_attribute modinfo_coresize =
1096 __ATTR(coresize, 0444, show_coresize, NULL);
1097
1098 static ssize_t show_initsize(struct module_attribute *mattr,
1099 struct module_kobject *mk, char *buffer)
1100 {
1101 return sprintf(buffer, "%u\n", mk->mod->init_size);
1102 }
1103
1104 static struct module_attribute modinfo_initsize =
1105 __ATTR(initsize, 0444, show_initsize, NULL);
1106
1107 static ssize_t show_taint(struct module_attribute *mattr,
1108 struct module_kobject *mk, char *buffer)
1109 {
1110 size_t l;
1111
1112 l = module_flags_taint(mk->mod, buffer);
1113 buffer[l++] = '\n';
1114 return l;
1115 }
1116
1117 static struct module_attribute modinfo_taint =
1118 __ATTR(taint, 0444, show_taint, NULL);
1119
1120 static struct module_attribute *modinfo_attrs[] = {
1121 &module_uevent,
1122 &modinfo_version,
1123 &modinfo_srcversion,
1124 &modinfo_initstate,
1125 &modinfo_coresize,
1126 &modinfo_initsize,
1127 &modinfo_taint,
1128 #ifdef CONFIG_MODULE_UNLOAD
1129 &modinfo_refcnt,
1130 #endif
1131 NULL,
1132 };
1133
1134 static const char vermagic[] = VERMAGIC_STRING;
1135
1136 static int try_to_force_load(struct module *mod, const char *reason)
1137 {
1138 #ifdef CONFIG_MODULE_FORCE_LOAD
1139 if (!test_taint(TAINT_FORCED_MODULE))
1140 printk(KERN_WARNING "%s: %s: kernel tainted.\n",
1141 mod->name, reason);
1142 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1143 return 0;
1144 #else
1145 return -ENOEXEC;
1146 #endif
1147 }
1148
1149 #ifdef CONFIG_MODVERSIONS
1150 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1151 static unsigned long maybe_relocated(unsigned long crc,
1152 const struct module *crc_owner)
1153 {
1154 #ifdef ARCH_RELOCATES_KCRCTAB
1155 if (crc_owner == NULL)
1156 return crc - (unsigned long)reloc_start;
1157 #endif
1158 return crc;
1159 }
1160
1161 static int check_version(Elf_Shdr *sechdrs,
1162 unsigned int versindex,
1163 const char *symname,
1164 struct module *mod,
1165 const unsigned long *crc,
1166 const struct module *crc_owner)
1167 {
1168 unsigned int i, num_versions;
1169 struct modversion_info *versions;
1170
1171 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1172 if (!crc)
1173 return 1;
1174
1175 /* No versions at all? modprobe --force does this. */
1176 if (versindex == 0)
1177 return try_to_force_load(mod, symname) == 0;
1178
1179 versions = (void *) sechdrs[versindex].sh_addr;
1180 num_versions = sechdrs[versindex].sh_size
1181 / sizeof(struct modversion_info);
1182
1183 for (i = 0; i < num_versions; i++) {
1184 if (strcmp(versions[i].name, symname) != 0)
1185 continue;
1186
1187 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1188 return 1;
1189 pr_debug("Found checksum %lX vs module %lX\n",
1190 maybe_relocated(*crc, crc_owner), versions[i].crc);
1191 goto bad_version;
1192 }
1193
1194 printk(KERN_WARNING "%s: no symbol version for %s\n",
1195 mod->name, symname);
1196 return 0;
1197
1198 bad_version:
1199 printk("%s: disagrees about version of symbol %s\n",
1200 mod->name, symname);
1201 return 0;
1202 }
1203
1204 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1205 unsigned int versindex,
1206 struct module *mod)
1207 {
1208 const unsigned long *crc;
1209
1210 /* Since this should be found in kernel (which can't be removed),
1211 * no locking is necessary. */
1212 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
1213 &crc, true, false))
1214 BUG();
1215 return check_version(sechdrs, versindex,
1216 VMLINUX_SYMBOL_STR(module_layout), mod, crc,
1217 NULL);
1218 }
1219
1220 /* First part is kernel version, which we ignore if module has crcs. */
1221 static inline int same_magic(const char *amagic, const char *bmagic,
1222 bool has_crcs)
1223 {
1224 if (has_crcs) {
1225 amagic += strcspn(amagic, " ");
1226 bmagic += strcspn(bmagic, " ");
1227 }
1228 return strcmp(amagic, bmagic) == 0;
1229 }
1230 #else
1231 static inline int check_version(Elf_Shdr *sechdrs,
1232 unsigned int versindex,
1233 const char *symname,
1234 struct module *mod,
1235 const unsigned long *crc,
1236 const struct module *crc_owner)
1237 {
1238 return 1;
1239 }
1240
1241 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1242 unsigned int versindex,
1243 struct module *mod)
1244 {
1245 return 1;
1246 }
1247
1248 static inline int same_magic(const char *amagic, const char *bmagic,
1249 bool has_crcs)
1250 {
1251 return strcmp(amagic, bmagic) == 0;
1252 }
1253 #endif /* CONFIG_MODVERSIONS */
1254
1255 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1256 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1257 const struct load_info *info,
1258 const char *name,
1259 char ownername[])
1260 {
1261 struct module *owner;
1262 const struct kernel_symbol *sym;
1263 const unsigned long *crc;
1264 int err;
1265
1266 mutex_lock(&module_mutex);
1267 sym = find_symbol(name, &owner, &crc,
1268 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1269 if (!sym)
1270 goto unlock;
1271
1272 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1273 owner)) {
1274 sym = ERR_PTR(-EINVAL);
1275 goto getname;
1276 }
1277
1278 err = ref_module(mod, owner);
1279 if (err) {
1280 sym = ERR_PTR(err);
1281 goto getname;
1282 }
1283
1284 getname:
1285 /* We must make copy under the lock if we failed to get ref. */
1286 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1287 unlock:
1288 mutex_unlock(&module_mutex);
1289 return sym;
1290 }
1291
1292 static const struct kernel_symbol *
1293 resolve_symbol_wait(struct module *mod,
1294 const struct load_info *info,
1295 const char *name)
1296 {
1297 const struct kernel_symbol *ksym;
1298 char owner[MODULE_NAME_LEN];
1299
1300 if (wait_event_interruptible_timeout(module_wq,
1301 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1302 || PTR_ERR(ksym) != -EBUSY,
1303 30 * HZ) <= 0) {
1304 printk(KERN_WARNING "%s: gave up waiting for init of module %s.\n",
1305 mod->name, owner);
1306 }
1307 return ksym;
1308 }
1309
1310 /*
1311 * /sys/module/foo/sections stuff
1312 * J. Corbet <corbet@lwn.net>
1313 */
1314 #ifdef CONFIG_SYSFS
1315
1316 #ifdef CONFIG_KALLSYMS
1317 static inline bool sect_empty(const Elf_Shdr *sect)
1318 {
1319 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1320 }
1321
1322 struct module_sect_attr
1323 {
1324 struct module_attribute mattr;
1325 char *name;
1326 unsigned long address;
1327 };
1328
1329 struct module_sect_attrs
1330 {
1331 struct attribute_group grp;
1332 unsigned int nsections;
1333 struct module_sect_attr attrs[0];
1334 };
1335
1336 static ssize_t module_sect_show(struct module_attribute *mattr,
1337 struct module_kobject *mk, char *buf)
1338 {
1339 struct module_sect_attr *sattr =
1340 container_of(mattr, struct module_sect_attr, mattr);
1341 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1342 }
1343
1344 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1345 {
1346 unsigned int section;
1347
1348 for (section = 0; section < sect_attrs->nsections; section++)
1349 kfree(sect_attrs->attrs[section].name);
1350 kfree(sect_attrs);
1351 }
1352
1353 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1354 {
1355 unsigned int nloaded = 0, i, size[2];
1356 struct module_sect_attrs *sect_attrs;
1357 struct module_sect_attr *sattr;
1358 struct attribute **gattr;
1359
1360 /* Count loaded sections and allocate structures */
1361 for (i = 0; i < info->hdr->e_shnum; i++)
1362 if (!sect_empty(&info->sechdrs[i]))
1363 nloaded++;
1364 size[0] = ALIGN(sizeof(*sect_attrs)
1365 + nloaded * sizeof(sect_attrs->attrs[0]),
1366 sizeof(sect_attrs->grp.attrs[0]));
1367 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1368 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1369 if (sect_attrs == NULL)
1370 return;
1371
1372 /* Setup section attributes. */
1373 sect_attrs->grp.name = "sections";
1374 sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1375
1376 sect_attrs->nsections = 0;
1377 sattr = &sect_attrs->attrs[0];
1378 gattr = &sect_attrs->grp.attrs[0];
1379 for (i = 0; i < info->hdr->e_shnum; i++) {
1380 Elf_Shdr *sec = &info->sechdrs[i];
1381 if (sect_empty(sec))
1382 continue;
1383 sattr->address = sec->sh_addr;
1384 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1385 GFP_KERNEL);
1386 if (sattr->name == NULL)
1387 goto out;
1388 sect_attrs->nsections++;
1389 sysfs_attr_init(&sattr->mattr.attr);
1390 sattr->mattr.show = module_sect_show;
1391 sattr->mattr.store = NULL;
1392 sattr->mattr.attr.name = sattr->name;
1393 sattr->mattr.attr.mode = S_IRUGO;
1394 *(gattr++) = &(sattr++)->mattr.attr;
1395 }
1396 *gattr = NULL;
1397
1398 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1399 goto out;
1400
1401 mod->sect_attrs = sect_attrs;
1402 return;
1403 out:
1404 free_sect_attrs(sect_attrs);
1405 }
1406
1407 static void remove_sect_attrs(struct module *mod)
1408 {
1409 if (mod->sect_attrs) {
1410 sysfs_remove_group(&mod->mkobj.kobj,
1411 &mod->sect_attrs->grp);
1412 /* We are positive that no one is using any sect attrs
1413 * at this point. Deallocate immediately. */
1414 free_sect_attrs(mod->sect_attrs);
1415 mod->sect_attrs = NULL;
1416 }
1417 }
1418
1419 /*
1420 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1421 */
1422
1423 struct module_notes_attrs {
1424 struct kobject *dir;
1425 unsigned int notes;
1426 struct bin_attribute attrs[0];
1427 };
1428
1429 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1430 struct bin_attribute *bin_attr,
1431 char *buf, loff_t pos, size_t count)
1432 {
1433 /*
1434 * The caller checked the pos and count against our size.
1435 */
1436 memcpy(buf, bin_attr->private + pos, count);
1437 return count;
1438 }
1439
1440 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1441 unsigned int i)
1442 {
1443 if (notes_attrs->dir) {
1444 while (i-- > 0)
1445 sysfs_remove_bin_file(notes_attrs->dir,
1446 &notes_attrs->attrs[i]);
1447 kobject_put(notes_attrs->dir);
1448 }
1449 kfree(notes_attrs);
1450 }
1451
1452 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1453 {
1454 unsigned int notes, loaded, i;
1455 struct module_notes_attrs *notes_attrs;
1456 struct bin_attribute *nattr;
1457
1458 /* failed to create section attributes, so can't create notes */
1459 if (!mod->sect_attrs)
1460 return;
1461
1462 /* Count notes sections and allocate structures. */
1463 notes = 0;
1464 for (i = 0; i < info->hdr->e_shnum; i++)
1465 if (!sect_empty(&info->sechdrs[i]) &&
1466 (info->sechdrs[i].sh_type == SHT_NOTE))
1467 ++notes;
1468
1469 if (notes == 0)
1470 return;
1471
1472 notes_attrs = kzalloc(sizeof(*notes_attrs)
1473 + notes * sizeof(notes_attrs->attrs[0]),
1474 GFP_KERNEL);
1475 if (notes_attrs == NULL)
1476 return;
1477
1478 notes_attrs->notes = notes;
1479 nattr = &notes_attrs->attrs[0];
1480 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1481 if (sect_empty(&info->sechdrs[i]))
1482 continue;
1483 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1484 sysfs_bin_attr_init(nattr);
1485 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1486 nattr->attr.mode = S_IRUGO;
1487 nattr->size = info->sechdrs[i].sh_size;
1488 nattr->private = (void *) info->sechdrs[i].sh_addr;
1489 nattr->read = module_notes_read;
1490 ++nattr;
1491 }
1492 ++loaded;
1493 }
1494
1495 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1496 if (!notes_attrs->dir)
1497 goto out;
1498
1499 for (i = 0; i < notes; ++i)
1500 if (sysfs_create_bin_file(notes_attrs->dir,
1501 &notes_attrs->attrs[i]))
1502 goto out;
1503
1504 mod->notes_attrs = notes_attrs;
1505 return;
1506
1507 out:
1508 free_notes_attrs(notes_attrs, i);
1509 }
1510
1511 static void remove_notes_attrs(struct module *mod)
1512 {
1513 if (mod->notes_attrs)
1514 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1515 }
1516
1517 #else
1518
1519 static inline void add_sect_attrs(struct module *mod,
1520 const struct load_info *info)
1521 {
1522 }
1523
1524 static inline void remove_sect_attrs(struct module *mod)
1525 {
1526 }
1527
1528 static inline void add_notes_attrs(struct module *mod,
1529 const struct load_info *info)
1530 {
1531 }
1532
1533 static inline void remove_notes_attrs(struct module *mod)
1534 {
1535 }
1536 #endif /* CONFIG_KALLSYMS */
1537
1538 static void add_usage_links(struct module *mod)
1539 {
1540 #ifdef CONFIG_MODULE_UNLOAD
1541 struct module_use *use;
1542 int nowarn;
1543
1544 mutex_lock(&module_mutex);
1545 list_for_each_entry(use, &mod->target_list, target_list) {
1546 nowarn = sysfs_create_link(use->target->holders_dir,
1547 &mod->mkobj.kobj, mod->name);
1548 }
1549 mutex_unlock(&module_mutex);
1550 #endif
1551 }
1552
1553 static void del_usage_links(struct module *mod)
1554 {
1555 #ifdef CONFIG_MODULE_UNLOAD
1556 struct module_use *use;
1557
1558 mutex_lock(&module_mutex);
1559 list_for_each_entry(use, &mod->target_list, target_list)
1560 sysfs_remove_link(use->target->holders_dir, mod->name);
1561 mutex_unlock(&module_mutex);
1562 #endif
1563 }
1564
1565 static int module_add_modinfo_attrs(struct module *mod)
1566 {
1567 struct module_attribute *attr;
1568 struct module_attribute *temp_attr;
1569 int error = 0;
1570 int i;
1571
1572 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1573 (ARRAY_SIZE(modinfo_attrs) + 1)),
1574 GFP_KERNEL);
1575 if (!mod->modinfo_attrs)
1576 return -ENOMEM;
1577
1578 temp_attr = mod->modinfo_attrs;
1579 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1580 if (!attr->test ||
1581 (attr->test && attr->test(mod))) {
1582 memcpy(temp_attr, attr, sizeof(*temp_attr));
1583 sysfs_attr_init(&temp_attr->attr);
1584 error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
1585 ++temp_attr;
1586 }
1587 }
1588 return error;
1589 }
1590
1591 static void module_remove_modinfo_attrs(struct module *mod)
1592 {
1593 struct module_attribute *attr;
1594 int i;
1595
1596 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1597 /* pick a field to test for end of list */
1598 if (!attr->attr.name)
1599 break;
1600 sysfs_remove_file(&mod->mkobj.kobj,&attr->attr);
1601 if (attr->free)
1602 attr->free(mod);
1603 }
1604 kfree(mod->modinfo_attrs);
1605 }
1606
1607 static int mod_sysfs_init(struct module *mod)
1608 {
1609 int err;
1610 struct kobject *kobj;
1611
1612 if (!module_sysfs_initialized) {
1613 printk(KERN_ERR "%s: module sysfs not initialized\n",
1614 mod->name);
1615 err = -EINVAL;
1616 goto out;
1617 }
1618
1619 kobj = kset_find_obj(module_kset, mod->name);
1620 if (kobj) {
1621 printk(KERN_ERR "%s: module is already loaded\n", mod->name);
1622 kobject_put(kobj);
1623 err = -EINVAL;
1624 goto out;
1625 }
1626
1627 mod->mkobj.mod = mod;
1628
1629 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1630 mod->mkobj.kobj.kset = module_kset;
1631 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1632 "%s", mod->name);
1633 if (err)
1634 kobject_put(&mod->mkobj.kobj);
1635
1636 /* delay uevent until full sysfs population */
1637 out:
1638 return err;
1639 }
1640
1641 static int mod_sysfs_setup(struct module *mod,
1642 const struct load_info *info,
1643 struct kernel_param *kparam,
1644 unsigned int num_params)
1645 {
1646 int err;
1647
1648 err = mod_sysfs_init(mod);
1649 if (err)
1650 goto out;
1651
1652 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1653 if (!mod->holders_dir) {
1654 err = -ENOMEM;
1655 goto out_unreg;
1656 }
1657
1658 err = module_param_sysfs_setup(mod, kparam, num_params);
1659 if (err)
1660 goto out_unreg_holders;
1661
1662 err = module_add_modinfo_attrs(mod);
1663 if (err)
1664 goto out_unreg_param;
1665
1666 add_usage_links(mod);
1667 add_sect_attrs(mod, info);
1668 add_notes_attrs(mod, info);
1669
1670 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1671 return 0;
1672
1673 out_unreg_param:
1674 module_param_sysfs_remove(mod);
1675 out_unreg_holders:
1676 kobject_put(mod->holders_dir);
1677 out_unreg:
1678 kobject_put(&mod->mkobj.kobj);
1679 out:
1680 return err;
1681 }
1682
1683 static void mod_sysfs_fini(struct module *mod)
1684 {
1685 remove_notes_attrs(mod);
1686 remove_sect_attrs(mod);
1687 kobject_put(&mod->mkobj.kobj);
1688 }
1689
1690 #else /* !CONFIG_SYSFS */
1691
1692 static int mod_sysfs_setup(struct module *mod,
1693 const struct load_info *info,
1694 struct kernel_param *kparam,
1695 unsigned int num_params)
1696 {
1697 return 0;
1698 }
1699
1700 static void mod_sysfs_fini(struct module *mod)
1701 {
1702 }
1703
1704 static void module_remove_modinfo_attrs(struct module *mod)
1705 {
1706 }
1707
1708 static void del_usage_links(struct module *mod)
1709 {
1710 }
1711
1712 #endif /* CONFIG_SYSFS */
1713
1714 static void mod_sysfs_teardown(struct module *mod)
1715 {
1716 del_usage_links(mod);
1717 module_remove_modinfo_attrs(mod);
1718 module_param_sysfs_remove(mod);
1719 kobject_put(mod->mkobj.drivers_dir);
1720 kobject_put(mod->holders_dir);
1721 mod_sysfs_fini(mod);
1722 }
1723
1724 /*
1725 * unlink the module with the whole machine is stopped with interrupts off
1726 * - this defends against kallsyms not taking locks
1727 */
1728 static int __unlink_module(void *_mod)
1729 {
1730 struct module *mod = _mod;
1731 list_del(&mod->list);
1732 module_bug_cleanup(mod);
1733 return 0;
1734 }
1735
1736 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1737 /*
1738 * LKM RO/NX protection: protect module's text/ro-data
1739 * from modification and any data from execution.
1740 */
1741 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages))
1742 {
1743 unsigned long begin_pfn = PFN_DOWN((unsigned long)start);
1744 unsigned long end_pfn = PFN_DOWN((unsigned long)end);
1745
1746 if (end_pfn > begin_pfn)
1747 set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1748 }
1749
1750 static void set_section_ro_nx(void *base,
1751 unsigned long text_size,
1752 unsigned long ro_size,
1753 unsigned long total_size)
1754 {
1755 /* begin and end PFNs of the current subsection */
1756 unsigned long begin_pfn;
1757 unsigned long end_pfn;
1758
1759 /*
1760 * Set RO for module text and RO-data:
1761 * - Always protect first page.
1762 * - Do not protect last partial page.
1763 */
1764 if (ro_size > 0)
1765 set_page_attributes(base, base + ro_size, set_memory_ro);
1766
1767 /*
1768 * Set NX permissions for module data:
1769 * - Do not protect first partial page.
1770 * - Always protect last page.
1771 */
1772 if (total_size > text_size) {
1773 begin_pfn = PFN_UP((unsigned long)base + text_size);
1774 end_pfn = PFN_UP((unsigned long)base + total_size);
1775 if (end_pfn > begin_pfn)
1776 set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1777 }
1778 }
1779
1780 static void unset_module_core_ro_nx(struct module *mod)
1781 {
1782 set_page_attributes(mod->module_core + mod->core_text_size,
1783 mod->module_core + mod->core_size,
1784 set_memory_x);
1785 set_page_attributes(mod->module_core,
1786 mod->module_core + mod->core_ro_size,
1787 set_memory_rw);
1788 }
1789
1790 static void unset_module_init_ro_nx(struct module *mod)
1791 {
1792 set_page_attributes(mod->module_init + mod->init_text_size,
1793 mod->module_init + mod->init_size,
1794 set_memory_x);
1795 set_page_attributes(mod->module_init,
1796 mod->module_init + mod->init_ro_size,
1797 set_memory_rw);
1798 }
1799
1800 /* Iterate through all modules and set each module's text as RW */
1801 void set_all_modules_text_rw(void)
1802 {
1803 struct module *mod;
1804
1805 mutex_lock(&module_mutex);
1806 list_for_each_entry_rcu(mod, &modules, list) {
1807 if (mod->state == MODULE_STATE_UNFORMED)
1808 continue;
1809 if ((mod->module_core) && (mod->core_text_size)) {
1810 set_page_attributes(mod->module_core,
1811 mod->module_core + mod->core_text_size,
1812 set_memory_rw);
1813 }
1814 if ((mod->module_init) && (mod->init_text_size)) {
1815 set_page_attributes(mod->module_init,
1816 mod->module_init + mod->init_text_size,
1817 set_memory_rw);
1818 }
1819 }
1820 mutex_unlock(&module_mutex);
1821 }
1822
1823 /* Iterate through all modules and set each module's text as RO */
1824 void set_all_modules_text_ro(void)
1825 {
1826 struct module *mod;
1827
1828 mutex_lock(&module_mutex);
1829 list_for_each_entry_rcu(mod, &modules, list) {
1830 if (mod->state == MODULE_STATE_UNFORMED)
1831 continue;
1832 if ((mod->module_core) && (mod->core_text_size)) {
1833 set_page_attributes(mod->module_core,
1834 mod->module_core + mod->core_text_size,
1835 set_memory_ro);
1836 }
1837 if ((mod->module_init) && (mod->init_text_size)) {
1838 set_page_attributes(mod->module_init,
1839 mod->module_init + mod->init_text_size,
1840 set_memory_ro);
1841 }
1842 }
1843 mutex_unlock(&module_mutex);
1844 }
1845 #else
1846 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
1847 static void unset_module_core_ro_nx(struct module *mod) { }
1848 static void unset_module_init_ro_nx(struct module *mod) { }
1849 #endif
1850
1851 void __weak module_free(struct module *mod, void *module_region)
1852 {
1853 vfree(module_region);
1854 }
1855
1856 void __weak module_arch_cleanup(struct module *mod)
1857 {
1858 }
1859
1860 /* Free a module, remove from lists, etc. */
1861 static void free_module(struct module *mod)
1862 {
1863 trace_module_free(mod);
1864
1865 mod_sysfs_teardown(mod);
1866
1867 /* We leave it in list to prevent duplicate loads, but make sure
1868 * that noone uses it while it's being deconstructed. */
1869 mutex_lock(&module_mutex);
1870 mod->state = MODULE_STATE_UNFORMED;
1871 mutex_unlock(&module_mutex);
1872
1873 /* Remove dynamic debug info */
1874 ddebug_remove_module(mod->name);
1875
1876 /* Arch-specific cleanup. */
1877 module_arch_cleanup(mod);
1878
1879 /* Module unload stuff */
1880 module_unload_free(mod);
1881
1882 /* Free any allocated parameters. */
1883 destroy_params(mod->kp, mod->num_kp);
1884
1885 /* Now we can delete it from the lists */
1886 mutex_lock(&module_mutex);
1887 stop_machine(__unlink_module, mod, NULL);
1888 mutex_unlock(&module_mutex);
1889
1890 /* This may be NULL, but that's OK */
1891 unset_module_init_ro_nx(mod);
1892 module_free(mod, mod->module_init);
1893 kfree(mod->args);
1894 percpu_modfree(mod);
1895
1896 /* Free lock-classes: */
1897 lockdep_free_key_range(mod->module_core, mod->core_size);
1898
1899 /* Finally, free the core (containing the module structure) */
1900 unset_module_core_ro_nx(mod);
1901 module_free(mod, mod->module_core);
1902
1903 #ifdef CONFIG_MPU
1904 update_protections(current->mm);
1905 #endif
1906 }
1907
1908 void *__symbol_get(const char *symbol)
1909 {
1910 struct module *owner;
1911 const struct kernel_symbol *sym;
1912
1913 preempt_disable();
1914 sym = find_symbol(symbol, &owner, NULL, true, true);
1915 if (sym && strong_try_module_get(owner))
1916 sym = NULL;
1917 preempt_enable();
1918
1919 return sym ? (void *)sym->value : NULL;
1920 }
1921 EXPORT_SYMBOL_GPL(__symbol_get);
1922
1923 /*
1924 * Ensure that an exported symbol [global namespace] does not already exist
1925 * in the kernel or in some other module's exported symbol table.
1926 *
1927 * You must hold the module_mutex.
1928 */
1929 static int verify_export_symbols(struct module *mod)
1930 {
1931 unsigned int i;
1932 struct module *owner;
1933 const struct kernel_symbol *s;
1934 struct {
1935 const struct kernel_symbol *sym;
1936 unsigned int num;
1937 } arr[] = {
1938 { mod->syms, mod->num_syms },
1939 { mod->gpl_syms, mod->num_gpl_syms },
1940 { mod->gpl_future_syms, mod->num_gpl_future_syms },
1941 #ifdef CONFIG_UNUSED_SYMBOLS
1942 { mod->unused_syms, mod->num_unused_syms },
1943 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1944 #endif
1945 };
1946
1947 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1948 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
1949 if (find_symbol(s->name, &owner, NULL, true, false)) {
1950 printk(KERN_ERR
1951 "%s: exports duplicate symbol %s"
1952 " (owned by %s)\n",
1953 mod->name, s->name, module_name(owner));
1954 return -ENOEXEC;
1955 }
1956 }
1957 }
1958 return 0;
1959 }
1960
1961 /* Change all symbols so that st_value encodes the pointer directly. */
1962 static int simplify_symbols(struct module *mod, const struct load_info *info)
1963 {
1964 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
1965 Elf_Sym *sym = (void *)symsec->sh_addr;
1966 unsigned long secbase;
1967 unsigned int i;
1968 int ret = 0;
1969 const struct kernel_symbol *ksym;
1970
1971 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
1972 const char *name = info->strtab + sym[i].st_name;
1973
1974 switch (sym[i].st_shndx) {
1975 case SHN_COMMON:
1976 /* We compiled with -fno-common. These are not
1977 supposed to happen. */
1978 pr_debug("Common symbol: %s\n", name);
1979 printk("%s: please compile with -fno-common\n",
1980 mod->name);
1981 ret = -ENOEXEC;
1982 break;
1983
1984 case SHN_ABS:
1985 /* Don't need to do anything */
1986 pr_debug("Absolute symbol: 0x%08lx\n",
1987 (long)sym[i].st_value);
1988 break;
1989
1990 case SHN_UNDEF:
1991 ksym = resolve_symbol_wait(mod, info, name);
1992 /* Ok if resolved. */
1993 if (ksym && !IS_ERR(ksym)) {
1994 sym[i].st_value = ksym->value;
1995 break;
1996 }
1997
1998 /* Ok if weak. */
1999 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
2000 break;
2001
2002 printk(KERN_WARNING "%s: Unknown symbol %s (err %li)\n",
2003 mod->name, name, PTR_ERR(ksym));
2004 ret = PTR_ERR(ksym) ?: -ENOENT;
2005 break;
2006
2007 default:
2008 /* Divert to percpu allocation if a percpu var. */
2009 if (sym[i].st_shndx == info->index.pcpu)
2010 secbase = (unsigned long)mod_percpu(mod);
2011 else
2012 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
2013 sym[i].st_value += secbase;
2014 break;
2015 }
2016 }
2017
2018 return ret;
2019 }
2020
2021 static int apply_relocations(struct module *mod, const struct load_info *info)
2022 {
2023 unsigned int i;
2024 int err = 0;
2025
2026 /* Now do relocations. */
2027 for (i = 1; i < info->hdr->e_shnum; i++) {
2028 unsigned int infosec = info->sechdrs[i].sh_info;
2029
2030 /* Not a valid relocation section? */
2031 if (infosec >= info->hdr->e_shnum)
2032 continue;
2033
2034 /* Don't bother with non-allocated sections */
2035 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2036 continue;
2037
2038 if (info->sechdrs[i].sh_type == SHT_REL)
2039 err = apply_relocate(info->sechdrs, info->strtab,
2040 info->index.sym, i, mod);
2041 else if (info->sechdrs[i].sh_type == SHT_RELA)
2042 err = apply_relocate_add(info->sechdrs, info->strtab,
2043 info->index.sym, i, mod);
2044 if (err < 0)
2045 break;
2046 }
2047 return err;
2048 }
2049
2050 /* Additional bytes needed by arch in front of individual sections */
2051 unsigned int __weak arch_mod_section_prepend(struct module *mod,
2052 unsigned int section)
2053 {
2054 /* default implementation just returns zero */
2055 return 0;
2056 }
2057
2058 /* Update size with this section: return offset. */
2059 static long get_offset(struct module *mod, unsigned int *size,
2060 Elf_Shdr *sechdr, unsigned int section)
2061 {
2062 long ret;
2063
2064 *size += arch_mod_section_prepend(mod, section);
2065 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2066 *size = ret + sechdr->sh_size;
2067 return ret;
2068 }
2069
2070 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2071 might -- code, read-only data, read-write data, small data. Tally
2072 sizes, and place the offsets into sh_entsize fields: high bit means it
2073 belongs in init. */
2074 static void layout_sections(struct module *mod, struct load_info *info)
2075 {
2076 static unsigned long const masks[][2] = {
2077 /* NOTE: all executable code must be the first section
2078 * in this array; otherwise modify the text_size
2079 * finder in the two loops below */
2080 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2081 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2082 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2083 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2084 };
2085 unsigned int m, i;
2086
2087 for (i = 0; i < info->hdr->e_shnum; i++)
2088 info->sechdrs[i].sh_entsize = ~0UL;
2089
2090 pr_debug("Core section allocation order:\n");
2091 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2092 for (i = 0; i < info->hdr->e_shnum; ++i) {
2093 Elf_Shdr *s = &info->sechdrs[i];
2094 const char *sname = info->secstrings + s->sh_name;
2095
2096 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2097 || (s->sh_flags & masks[m][1])
2098 || s->sh_entsize != ~0UL
2099 || strstarts(sname, ".init"))
2100 continue;
2101 s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
2102 pr_debug("\t%s\n", sname);
2103 }
2104 switch (m) {
2105 case 0: /* executable */
2106 mod->core_size = debug_align(mod->core_size);
2107 mod->core_text_size = mod->core_size;
2108 break;
2109 case 1: /* RO: text and ro-data */
2110 mod->core_size = debug_align(mod->core_size);
2111 mod->core_ro_size = mod->core_size;
2112 break;
2113 case 3: /* whole core */
2114 mod->core_size = debug_align(mod->core_size);
2115 break;
2116 }
2117 }
2118
2119 pr_debug("Init section allocation order:\n");
2120 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2121 for (i = 0; i < info->hdr->e_shnum; ++i) {
2122 Elf_Shdr *s = &info->sechdrs[i];
2123 const char *sname = info->secstrings + s->sh_name;
2124
2125 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2126 || (s->sh_flags & masks[m][1])
2127 || s->sh_entsize != ~0UL
2128 || !strstarts(sname, ".init"))
2129 continue;
2130 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
2131 | INIT_OFFSET_MASK);
2132 pr_debug("\t%s\n", sname);
2133 }
2134 switch (m) {
2135 case 0: /* executable */
2136 mod->init_size = debug_align(mod->init_size);
2137 mod->init_text_size = mod->init_size;
2138 break;
2139 case 1: /* RO: text and ro-data */
2140 mod->init_size = debug_align(mod->init_size);
2141 mod->init_ro_size = mod->init_size;
2142 break;
2143 case 3: /* whole init */
2144 mod->init_size = debug_align(mod->init_size);
2145 break;
2146 }
2147 }
2148 }
2149
2150 static void set_license(struct module *mod, const char *license)
2151 {
2152 if (!license)
2153 license = "unspecified";
2154
2155 if (!license_is_gpl_compatible(license)) {
2156 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2157 printk(KERN_WARNING "%s: module license '%s' taints "
2158 "kernel.\n", mod->name, license);
2159 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2160 LOCKDEP_NOW_UNRELIABLE);
2161 }
2162 }
2163
2164 /* Parse tag=value strings from .modinfo section */
2165 static char *next_string(char *string, unsigned long *secsize)
2166 {
2167 /* Skip non-zero chars */
2168 while (string[0]) {
2169 string++;
2170 if ((*secsize)-- <= 1)
2171 return NULL;
2172 }
2173
2174 /* Skip any zero padding. */
2175 while (!string[0]) {
2176 string++;
2177 if ((*secsize)-- <= 1)
2178 return NULL;
2179 }
2180 return string;
2181 }
2182
2183 static char *get_modinfo(struct load_info *info, const char *tag)
2184 {
2185 char *p;
2186 unsigned int taglen = strlen(tag);
2187 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2188 unsigned long size = infosec->sh_size;
2189
2190 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2191 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2192 return p + taglen + 1;
2193 }
2194 return NULL;
2195 }
2196
2197 static void setup_modinfo(struct module *mod, struct load_info *info)
2198 {
2199 struct module_attribute *attr;
2200 int i;
2201
2202 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2203 if (attr->setup)
2204 attr->setup(mod, get_modinfo(info, attr->attr.name));
2205 }
2206 }
2207
2208 static void free_modinfo(struct module *mod)
2209 {
2210 struct module_attribute *attr;
2211 int i;
2212
2213 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2214 if (attr->free)
2215 attr->free(mod);
2216 }
2217 }
2218
2219 #ifdef CONFIG_KALLSYMS
2220
2221 /* lookup symbol in given range of kernel_symbols */
2222 static const struct kernel_symbol *lookup_symbol(const char *name,
2223 const struct kernel_symbol *start,
2224 const struct kernel_symbol *stop)
2225 {
2226 return bsearch(name, start, stop - start,
2227 sizeof(struct kernel_symbol), cmp_name);
2228 }
2229
2230 static int is_exported(const char *name, unsigned long value,
2231 const struct module *mod)
2232 {
2233 const struct kernel_symbol *ks;
2234 if (!mod)
2235 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2236 else
2237 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2238 return ks != NULL && ks->value == value;
2239 }
2240
2241 /* As per nm */
2242 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2243 {
2244 const Elf_Shdr *sechdrs = info->sechdrs;
2245
2246 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2247 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2248 return 'v';
2249 else
2250 return 'w';
2251 }
2252 if (sym->st_shndx == SHN_UNDEF)
2253 return 'U';
2254 if (sym->st_shndx == SHN_ABS)
2255 return 'a';
2256 if (sym->st_shndx >= SHN_LORESERVE)
2257 return '?';
2258 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2259 return 't';
2260 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2261 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2262 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2263 return 'r';
2264 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2265 return 'g';
2266 else
2267 return 'd';
2268 }
2269 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2270 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2271 return 's';
2272 else
2273 return 'b';
2274 }
2275 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2276 ".debug")) {
2277 return 'n';
2278 }
2279 return '?';
2280 }
2281
2282 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2283 unsigned int shnum)
2284 {
2285 const Elf_Shdr *sec;
2286
2287 if (src->st_shndx == SHN_UNDEF
2288 || src->st_shndx >= shnum
2289 || !src->st_name)
2290 return false;
2291
2292 sec = sechdrs + src->st_shndx;
2293 if (!(sec->sh_flags & SHF_ALLOC)
2294 #ifndef CONFIG_KALLSYMS_ALL
2295 || !(sec->sh_flags & SHF_EXECINSTR)
2296 #endif
2297 || (sec->sh_entsize & INIT_OFFSET_MASK))
2298 return false;
2299
2300 return true;
2301 }
2302
2303 /*
2304 * We only allocate and copy the strings needed by the parts of symtab
2305 * we keep. This is simple, but has the effect of making multiple
2306 * copies of duplicates. We could be more sophisticated, see
2307 * linux-kernel thread starting with
2308 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2309 */
2310 static void layout_symtab(struct module *mod, struct load_info *info)
2311 {
2312 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2313 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2314 const Elf_Sym *src;
2315 unsigned int i, nsrc, ndst, strtab_size = 0;
2316
2317 /* Put symbol section at end of init part of module. */
2318 symsect->sh_flags |= SHF_ALLOC;
2319 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
2320 info->index.sym) | INIT_OFFSET_MASK;
2321 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2322
2323 src = (void *)info->hdr + symsect->sh_offset;
2324 nsrc = symsect->sh_size / sizeof(*src);
2325
2326 /* Compute total space required for the core symbols' strtab. */
2327 for (ndst = i = 0; i < nsrc; i++) {
2328 if (i == 0 ||
2329 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2330 strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2331 ndst++;
2332 }
2333 }
2334
2335 /* Append room for core symbols at end of core part. */
2336 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2337 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2338 mod->core_size += strtab_size;
2339
2340 /* Put string table section at end of init part of module. */
2341 strsect->sh_flags |= SHF_ALLOC;
2342 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2343 info->index.str) | INIT_OFFSET_MASK;
2344 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2345 }
2346
2347 static void add_kallsyms(struct module *mod, const struct load_info *info)
2348 {
2349 unsigned int i, ndst;
2350 const Elf_Sym *src;
2351 Elf_Sym *dst;
2352 char *s;
2353 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2354
2355 mod->symtab = (void *)symsec->sh_addr;
2356 mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2357 /* Make sure we get permanent strtab: don't use info->strtab. */
2358 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2359
2360 /* Set types up while we still have access to sections. */
2361 for (i = 0; i < mod->num_symtab; i++)
2362 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2363
2364 mod->core_symtab = dst = mod->module_core + info->symoffs;
2365 mod->core_strtab = s = mod->module_core + info->stroffs;
2366 src = mod->symtab;
2367 for (ndst = i = 0; i < mod->num_symtab; i++) {
2368 if (i == 0 ||
2369 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2370 dst[ndst] = src[i];
2371 dst[ndst++].st_name = s - mod->core_strtab;
2372 s += strlcpy(s, &mod->strtab[src[i].st_name],
2373 KSYM_NAME_LEN) + 1;
2374 }
2375 }
2376 mod->core_num_syms = ndst;
2377 }
2378 #else
2379 static inline void layout_symtab(struct module *mod, struct load_info *info)
2380 {
2381 }
2382
2383 static void add_kallsyms(struct module *mod, const struct load_info *info)
2384 {
2385 }
2386 #endif /* CONFIG_KALLSYMS */
2387
2388 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2389 {
2390 if (!debug)
2391 return;
2392 #ifdef CONFIG_DYNAMIC_DEBUG
2393 if (ddebug_add_module(debug, num, debug->modname))
2394 printk(KERN_ERR "dynamic debug error adding module: %s\n",
2395 debug->modname);
2396 #endif
2397 }
2398
2399 static void dynamic_debug_remove(struct _ddebug *debug)
2400 {
2401 if (debug)
2402 ddebug_remove_module(debug->modname);
2403 }
2404
2405 void * __weak module_alloc(unsigned long size)
2406 {
2407 return vmalloc_exec(size);
2408 }
2409
2410 static void *module_alloc_update_bounds(unsigned long size)
2411 {
2412 void *ret = module_alloc(size);
2413
2414 if (ret) {
2415 mutex_lock(&module_mutex);
2416 /* Update module bounds. */
2417 if ((unsigned long)ret < module_addr_min)
2418 module_addr_min = (unsigned long)ret;
2419 if ((unsigned long)ret + size > module_addr_max)
2420 module_addr_max = (unsigned long)ret + size;
2421 mutex_unlock(&module_mutex);
2422 }
2423 return ret;
2424 }
2425
2426 #ifdef CONFIG_DEBUG_KMEMLEAK
2427 static void kmemleak_load_module(const struct module *mod,
2428 const struct load_info *info)
2429 {
2430 unsigned int i;
2431
2432 /* only scan the sections containing data */
2433 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2434
2435 for (i = 1; i < info->hdr->e_shnum; i++) {
2436 /* Scan all writable sections that's not executable */
2437 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2438 !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2439 (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2440 continue;
2441
2442 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2443 info->sechdrs[i].sh_size, GFP_KERNEL);
2444 }
2445 }
2446 #else
2447 static inline void kmemleak_load_module(const struct module *mod,
2448 const struct load_info *info)
2449 {
2450 }
2451 #endif
2452
2453 #ifdef CONFIG_MODULE_SIG
2454 static int module_sig_check(struct load_info *info)
2455 {
2456 int err = -ENOKEY;
2457 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2458 const void *mod = info->hdr;
2459
2460 if (info->len > markerlen &&
2461 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2462 /* We truncate the module to discard the signature */
2463 info->len -= markerlen;
2464 err = mod_verify_sig(mod, &info->len);
2465 }
2466
2467 if (!err) {
2468 info->sig_ok = true;
2469 return 0;
2470 }
2471
2472 /* Not having a signature is only an error if we're strict. */
2473 if (err < 0 && fips_enabled)
2474 panic("Module verification failed with error %d in FIPS mode\n",
2475 err);
2476 if (err == -ENOKEY && !sig_enforce)
2477 err = 0;
2478
2479 return err;
2480 }
2481 #else /* !CONFIG_MODULE_SIG */
2482 static int module_sig_check(struct load_info *info)
2483 {
2484 return 0;
2485 }
2486 #endif /* !CONFIG_MODULE_SIG */
2487
2488 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2489 static int elf_header_check(struct load_info *info)
2490 {
2491 if (info->len < sizeof(*(info->hdr)))
2492 return -ENOEXEC;
2493
2494 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2495 || info->hdr->e_type != ET_REL
2496 || !elf_check_arch(info->hdr)
2497 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2498 return -ENOEXEC;
2499
2500 if (info->hdr->e_shoff >= info->len
2501 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2502 info->len - info->hdr->e_shoff))
2503 return -ENOEXEC;
2504
2505 return 0;
2506 }
2507
2508 /* Sets info->hdr and info->len. */
2509 static int copy_module_from_user(const void __user *umod, unsigned long len,
2510 struct load_info *info)
2511 {
2512 int err;
2513
2514 info->len = len;
2515 if (info->len < sizeof(*(info->hdr)))
2516 return -ENOEXEC;
2517
2518 err = security_kernel_module_from_file(NULL);
2519 if (err)
2520 return err;
2521
2522 /* Suck in entire file: we'll want most of it. */
2523 info->hdr = vmalloc(info->len);
2524 if (!info->hdr)
2525 return -ENOMEM;
2526
2527 if (copy_from_user(info->hdr, umod, info->len) != 0) {
2528 vfree(info->hdr);
2529 return -EFAULT;
2530 }
2531
2532 return 0;
2533 }
2534
2535 /* Sets info->hdr and info->len. */
2536 static int copy_module_from_fd(int fd, struct load_info *info)
2537 {
2538 struct file *file;
2539 int err;
2540 struct kstat stat;
2541 loff_t pos;
2542 ssize_t bytes = 0;
2543
2544 file = fget(fd);
2545 if (!file)
2546 return -ENOEXEC;
2547
2548 err = security_kernel_module_from_file(file);
2549 if (err)
2550 goto out;
2551
2552 err = vfs_getattr(&file->f_path, &stat);
2553 if (err)
2554 goto out;
2555
2556 if (stat.size > INT_MAX) {
2557 err = -EFBIG;
2558 goto out;
2559 }
2560
2561 /* Don't hand 0 to vmalloc, it whines. */
2562 if (stat.size == 0) {
2563 err = -EINVAL;
2564 goto out;
2565 }
2566
2567 info->hdr = vmalloc(stat.size);
2568 if (!info->hdr) {
2569 err = -ENOMEM;
2570 goto out;
2571 }
2572
2573 pos = 0;
2574 while (pos < stat.size) {
2575 bytes = kernel_read(file, pos, (char *)(info->hdr) + pos,
2576 stat.size - pos);
2577 if (bytes < 0) {
2578 vfree(info->hdr);
2579 err = bytes;
2580 goto out;
2581 }
2582 if (bytes == 0)
2583 break;
2584 pos += bytes;
2585 }
2586 info->len = pos;
2587
2588 out:
2589 fput(file);
2590 return err;
2591 }
2592
2593 static void free_copy(struct load_info *info)
2594 {
2595 vfree(info->hdr);
2596 }
2597
2598 static int rewrite_section_headers(struct load_info *info, int flags)
2599 {
2600 unsigned int i;
2601
2602 /* This should always be true, but let's be sure. */
2603 info->sechdrs[0].sh_addr = 0;
2604
2605 for (i = 1; i < info->hdr->e_shnum; i++) {
2606 Elf_Shdr *shdr = &info->sechdrs[i];
2607 if (shdr->sh_type != SHT_NOBITS
2608 && info->len < shdr->sh_offset + shdr->sh_size) {
2609 printk(KERN_ERR "Module len %lu truncated\n",
2610 info->len);
2611 return -ENOEXEC;
2612 }
2613
2614 /* Mark all sections sh_addr with their address in the
2615 temporary image. */
2616 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2617
2618 #ifndef CONFIG_MODULE_UNLOAD
2619 /* Don't load .exit sections */
2620 if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2621 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2622 #endif
2623 }
2624
2625 /* Track but don't keep modinfo and version sections. */
2626 if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2627 info->index.vers = 0; /* Pretend no __versions section! */
2628 else
2629 info->index.vers = find_sec(info, "__versions");
2630 info->index.info = find_sec(info, ".modinfo");
2631 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2632 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2633 return 0;
2634 }
2635
2636 /*
2637 * Set up our basic convenience variables (pointers to section headers,
2638 * search for module section index etc), and do some basic section
2639 * verification.
2640 *
2641 * Return the temporary module pointer (we'll replace it with the final
2642 * one when we move the module sections around).
2643 */
2644 static struct module *setup_load_info(struct load_info *info, int flags)
2645 {
2646 unsigned int i;
2647 int err;
2648 struct module *mod;
2649
2650 /* Set up the convenience variables */
2651 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2652 info->secstrings = (void *)info->hdr
2653 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2654
2655 err = rewrite_section_headers(info, flags);
2656 if (err)
2657 return ERR_PTR(err);
2658
2659 /* Find internal symbols and strings. */
2660 for (i = 1; i < info->hdr->e_shnum; i++) {
2661 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2662 info->index.sym = i;
2663 info->index.str = info->sechdrs[i].sh_link;
2664 info->strtab = (char *)info->hdr
2665 + info->sechdrs[info->index.str].sh_offset;
2666 break;
2667 }
2668 }
2669
2670 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2671 if (!info->index.mod) {
2672 printk(KERN_WARNING "No module found in object\n");
2673 return ERR_PTR(-ENOEXEC);
2674 }
2675 /* This is temporary: point mod into copy of data. */
2676 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2677
2678 if (info->index.sym == 0) {
2679 printk(KERN_WARNING "%s: module has no symbols (stripped?)\n",
2680 mod->name);
2681 return ERR_PTR(-ENOEXEC);
2682 }
2683
2684 info->index.pcpu = find_pcpusec(info);
2685
2686 /* Check module struct version now, before we try to use module. */
2687 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2688 return ERR_PTR(-ENOEXEC);
2689
2690 return mod;
2691 }
2692
2693 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2694 {
2695 const char *modmagic = get_modinfo(info, "vermagic");
2696 int err;
2697
2698 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2699 modmagic = NULL;
2700
2701 /* This is allowed: modprobe --force will invalidate it. */
2702 if (!modmagic) {
2703 err = try_to_force_load(mod, "bad vermagic");
2704 if (err)
2705 return err;
2706 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2707 printk(KERN_ERR "%s: version magic '%s' should be '%s'\n",
2708 mod->name, modmagic, vermagic);
2709 return -ENOEXEC;
2710 }
2711
2712 if (!get_modinfo(info, "intree"))
2713 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
2714
2715 if (get_modinfo(info, "staging")) {
2716 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
2717 printk(KERN_WARNING "%s: module is from the staging directory,"
2718 " the quality is unknown, you have been warned.\n",
2719 mod->name);
2720 }
2721
2722 /* Set up license info based on the info section */
2723 set_license(mod, get_modinfo(info, "license"));
2724
2725 return 0;
2726 }
2727
2728 static void find_module_sections(struct module *mod, struct load_info *info)
2729 {
2730 mod->kp = section_objs(info, "__param",
2731 sizeof(*mod->kp), &mod->num_kp);
2732 mod->syms = section_objs(info, "__ksymtab",
2733 sizeof(*mod->syms), &mod->num_syms);
2734 mod->crcs = section_addr(info, "__kcrctab");
2735 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2736 sizeof(*mod->gpl_syms),
2737 &mod->num_gpl_syms);
2738 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2739 mod->gpl_future_syms = section_objs(info,
2740 "__ksymtab_gpl_future",
2741 sizeof(*mod->gpl_future_syms),
2742 &mod->num_gpl_future_syms);
2743 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2744
2745 #ifdef CONFIG_UNUSED_SYMBOLS
2746 mod->unused_syms = section_objs(info, "__ksymtab_unused",
2747 sizeof(*mod->unused_syms),
2748 &mod->num_unused_syms);
2749 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2750 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2751 sizeof(*mod->unused_gpl_syms),
2752 &mod->num_unused_gpl_syms);
2753 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2754 #endif
2755 #ifdef CONFIG_CONSTRUCTORS
2756 mod->ctors = section_objs(info, ".ctors",
2757 sizeof(*mod->ctors), &mod->num_ctors);
2758 #endif
2759
2760 #ifdef CONFIG_TRACEPOINTS
2761 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2762 sizeof(*mod->tracepoints_ptrs),
2763 &mod->num_tracepoints);
2764 #endif
2765 #ifdef HAVE_JUMP_LABEL
2766 mod->jump_entries = section_objs(info, "__jump_table",
2767 sizeof(*mod->jump_entries),
2768 &mod->num_jump_entries);
2769 #endif
2770 #ifdef CONFIG_EVENT_TRACING
2771 mod->trace_events = section_objs(info, "_ftrace_events",
2772 sizeof(*mod->trace_events),
2773 &mod->num_trace_events);
2774 #endif
2775 #ifdef CONFIG_TRACING
2776 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2777 sizeof(*mod->trace_bprintk_fmt_start),
2778 &mod->num_trace_bprintk_fmt);
2779 #endif
2780 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2781 /* sechdrs[0].sh_size is always zero */
2782 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2783 sizeof(*mod->ftrace_callsites),
2784 &mod->num_ftrace_callsites);
2785 #endif
2786
2787 mod->extable = section_objs(info, "__ex_table",
2788 sizeof(*mod->extable), &mod->num_exentries);
2789
2790 if (section_addr(info, "__obsparm"))
2791 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
2792 mod->name);
2793
2794 info->debug = section_objs(info, "__verbose",
2795 sizeof(*info->debug), &info->num_debug);
2796 }
2797
2798 static int move_module(struct module *mod, struct load_info *info)
2799 {
2800 int i;
2801 void *ptr;
2802
2803 /* Do the allocs. */
2804 ptr = module_alloc_update_bounds(mod->core_size);
2805 /*
2806 * The pointer to this block is stored in the module structure
2807 * which is inside the block. Just mark it as not being a
2808 * leak.
2809 */
2810 kmemleak_not_leak(ptr);
2811 if (!ptr)
2812 return -ENOMEM;
2813
2814 memset(ptr, 0, mod->core_size);
2815 mod->module_core = ptr;
2816
2817 if (mod->init_size) {
2818 ptr = module_alloc_update_bounds(mod->init_size);
2819 /*
2820 * The pointer to this block is stored in the module structure
2821 * which is inside the block. This block doesn't need to be
2822 * scanned as it contains data and code that will be freed
2823 * after the module is initialized.
2824 */
2825 kmemleak_ignore(ptr);
2826 if (!ptr) {
2827 module_free(mod, mod->module_core);
2828 return -ENOMEM;
2829 }
2830 memset(ptr, 0, mod->init_size);
2831 mod->module_init = ptr;
2832 } else
2833 mod->module_init = NULL;
2834
2835 /* Transfer each section which specifies SHF_ALLOC */
2836 pr_debug("final section addresses:\n");
2837 for (i = 0; i < info->hdr->e_shnum; i++) {
2838 void *dest;
2839 Elf_Shdr *shdr = &info->sechdrs[i];
2840
2841 if (!(shdr->sh_flags & SHF_ALLOC))
2842 continue;
2843
2844 if (shdr->sh_entsize & INIT_OFFSET_MASK)
2845 dest = mod->module_init
2846 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
2847 else
2848 dest = mod->module_core + shdr->sh_entsize;
2849
2850 if (shdr->sh_type != SHT_NOBITS)
2851 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
2852 /* Update sh_addr to point to copy in image. */
2853 shdr->sh_addr = (unsigned long)dest;
2854 pr_debug("\t0x%lx %s\n",
2855 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
2856 }
2857
2858 return 0;
2859 }
2860
2861 static int check_module_license_and_versions(struct module *mod)
2862 {
2863 /*
2864 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2865 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2866 * using GPL-only symbols it needs.
2867 */
2868 if (strcmp(mod->name, "ndiswrapper") == 0)
2869 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
2870
2871 /* driverloader was caught wrongly pretending to be under GPL */
2872 if (strcmp(mod->name, "driverloader") == 0)
2873 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2874 LOCKDEP_NOW_UNRELIABLE);
2875
2876 /* lve claims to be GPL but upstream won't provide source */
2877 if (strcmp(mod->name, "lve") == 0)
2878 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2879 LOCKDEP_NOW_UNRELIABLE);
2880
2881 #ifdef CONFIG_MODVERSIONS
2882 if ((mod->num_syms && !mod->crcs)
2883 || (mod->num_gpl_syms && !mod->gpl_crcs)
2884 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
2885 #ifdef CONFIG_UNUSED_SYMBOLS
2886 || (mod->num_unused_syms && !mod->unused_crcs)
2887 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
2888 #endif
2889 ) {
2890 return try_to_force_load(mod,
2891 "no versions for exported symbols");
2892 }
2893 #endif
2894 return 0;
2895 }
2896
2897 static void flush_module_icache(const struct module *mod)
2898 {
2899 mm_segment_t old_fs;
2900
2901 /* flush the icache in correct context */
2902 old_fs = get_fs();
2903 set_fs(KERNEL_DS);
2904
2905 /*
2906 * Flush the instruction cache, since we've played with text.
2907 * Do it before processing of module parameters, so the module
2908 * can provide parameter accessor functions of its own.
2909 */
2910 if (mod->module_init)
2911 flush_icache_range((unsigned long)mod->module_init,
2912 (unsigned long)mod->module_init
2913 + mod->init_size);
2914 flush_icache_range((unsigned long)mod->module_core,
2915 (unsigned long)mod->module_core + mod->core_size);
2916
2917 set_fs(old_fs);
2918 }
2919
2920 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
2921 Elf_Shdr *sechdrs,
2922 char *secstrings,
2923 struct module *mod)
2924 {
2925 return 0;
2926 }
2927
2928 static struct module *layout_and_allocate(struct load_info *info, int flags)
2929 {
2930 /* Module within temporary copy. */
2931 struct module *mod;
2932 int err;
2933
2934 mod = setup_load_info(info, flags);
2935 if (IS_ERR(mod))
2936 return mod;
2937
2938 err = check_modinfo(mod, info, flags);
2939 if (err)
2940 return ERR_PTR(err);
2941
2942 /* Allow arches to frob section contents and sizes. */
2943 err = module_frob_arch_sections(info->hdr, info->sechdrs,
2944 info->secstrings, mod);
2945 if (err < 0)
2946 return ERR_PTR(err);
2947
2948 /* We will do a special allocation for per-cpu sections later. */
2949 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
2950
2951 /* Determine total sizes, and put offsets in sh_entsize. For now
2952 this is done generically; there doesn't appear to be any
2953 special cases for the architectures. */
2954 layout_sections(mod, info);
2955 layout_symtab(mod, info);
2956
2957 /* Allocate and move to the final place */
2958 err = move_module(mod, info);
2959 if (err)
2960 return ERR_PTR(err);
2961
2962 /* Module has been copied to its final place now: return it. */
2963 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2964 kmemleak_load_module(mod, info);
2965 return mod;
2966 }
2967
2968 static int alloc_module_percpu(struct module *mod, struct load_info *info)
2969 {
2970 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
2971 if (!pcpusec->sh_size)
2972 return 0;
2973
2974 /* We have a special allocation for this section. */
2975 return percpu_modalloc(mod, pcpusec->sh_size, pcpusec->sh_addralign);
2976 }
2977
2978 /* mod is no longer valid after this! */
2979 static void module_deallocate(struct module *mod, struct load_info *info)
2980 {
2981 percpu_modfree(mod);
2982 module_free(mod, mod->module_init);
2983 module_free(mod, mod->module_core);
2984 }
2985
2986 int __weak module_finalize(const Elf_Ehdr *hdr,
2987 const Elf_Shdr *sechdrs,
2988 struct module *me)
2989 {
2990 return 0;
2991 }
2992
2993 static int post_relocation(struct module *mod, const struct load_info *info)
2994 {
2995 /* Sort exception table now relocations are done. */
2996 sort_extable(mod->extable, mod->extable + mod->num_exentries);
2997
2998 /* Copy relocated percpu area over. */
2999 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
3000 info->sechdrs[info->index.pcpu].sh_size);
3001
3002 /* Setup kallsyms-specific fields. */
3003 add_kallsyms(mod, info);
3004
3005 /* Arch-specific module finalizing. */
3006 return module_finalize(info->hdr, info->sechdrs, mod);
3007 }
3008
3009 /* Is this module of this name done loading? No locks held. */
3010 static bool finished_loading(const char *name)
3011 {
3012 struct module *mod;
3013 bool ret;
3014
3015 mutex_lock(&module_mutex);
3016 mod = find_module_all(name, true);
3017 ret = !mod || mod->state == MODULE_STATE_LIVE
3018 || mod->state == MODULE_STATE_GOING;
3019 mutex_unlock(&module_mutex);
3020
3021 return ret;
3022 }
3023
3024 /* Call module constructors. */
3025 static void do_mod_ctors(struct module *mod)
3026 {
3027 #ifdef CONFIG_CONSTRUCTORS
3028 unsigned long i;
3029
3030 for (i = 0; i < mod->num_ctors; i++)
3031 mod->ctors[i]();
3032 #endif
3033 }
3034
3035 /* This is where the real work happens */
3036 static int do_init_module(struct module *mod)
3037 {
3038 int ret = 0;
3039
3040 /*
3041 * We want to find out whether @mod uses async during init. Clear
3042 * PF_USED_ASYNC. async_schedule*() will set it.
3043 */
3044 current->flags &= ~PF_USED_ASYNC;
3045
3046 blocking_notifier_call_chain(&module_notify_list,
3047 MODULE_STATE_COMING, mod);
3048
3049 /* Set RO and NX regions for core */
3050 set_section_ro_nx(mod->module_core,
3051 mod->core_text_size,
3052 mod->core_ro_size,
3053 mod->core_size);
3054
3055 /* Set RO and NX regions for init */
3056 set_section_ro_nx(mod->module_init,
3057 mod->init_text_size,
3058 mod->init_ro_size,
3059 mod->init_size);
3060
3061 do_mod_ctors(mod);
3062 /* Start the module */
3063 if (mod->init != NULL)
3064 ret = do_one_initcall(mod->init);
3065 if (ret < 0) {
3066 /* Init routine failed: abort. Try to protect us from
3067 buggy refcounters. */
3068 mod->state = MODULE_STATE_GOING;
3069 synchronize_sched();
3070 module_put(mod);
3071 blocking_notifier_call_chain(&module_notify_list,
3072 MODULE_STATE_GOING, mod);
3073 free_module(mod);
3074 wake_up_all(&module_wq);
3075 return ret;
3076 }
3077 if (ret > 0) {
3078 printk(KERN_WARNING
3079 "%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n"
3080 "%s: loading module anyway...\n",
3081 __func__, mod->name, ret,
3082 __func__);
3083 dump_stack();
3084 }
3085
3086 /* Now it's a first class citizen! */
3087 mod->state = MODULE_STATE_LIVE;
3088 blocking_notifier_call_chain(&module_notify_list,
3089 MODULE_STATE_LIVE, mod);
3090
3091 /*
3092 * We need to finish all async code before the module init sequence
3093 * is done. This has potential to deadlock. For example, a newly
3094 * detected block device can trigger request_module() of the
3095 * default iosched from async probing task. Once userland helper
3096 * reaches here, async_synchronize_full() will wait on the async
3097 * task waiting on request_module() and deadlock.
3098 *
3099 * This deadlock is avoided by perfomring async_synchronize_full()
3100 * iff module init queued any async jobs. This isn't a full
3101 * solution as it will deadlock the same if module loading from
3102 * async jobs nests more than once; however, due to the various
3103 * constraints, this hack seems to be the best option for now.
3104 * Please refer to the following thread for details.
3105 *
3106 * http://thread.gmane.org/gmane.linux.kernel/1420814
3107 */
3108 if (current->flags & PF_USED_ASYNC)
3109 async_synchronize_full();
3110
3111 mutex_lock(&module_mutex);
3112 /* Drop initial reference. */
3113 module_put(mod);
3114 trim_init_extable(mod);
3115 #ifdef CONFIG_KALLSYMS
3116 mod->num_symtab = mod->core_num_syms;
3117 mod->symtab = mod->core_symtab;
3118 mod->strtab = mod->core_strtab;
3119 #endif
3120 unset_module_init_ro_nx(mod);
3121 module_free(mod, mod->module_init);
3122 mod->module_init = NULL;
3123 mod->init_size = 0;
3124 mod->init_ro_size = 0;
3125 mod->init_text_size = 0;
3126 mutex_unlock(&module_mutex);
3127 wake_up_all(&module_wq);
3128
3129 return 0;
3130 }
3131
3132 static int may_init_module(void)
3133 {
3134 if (!capable(CAP_SYS_MODULE) || modules_disabled)
3135 return -EPERM;
3136
3137 return 0;
3138 }
3139
3140 /*
3141 * We try to place it in the list now to make sure it's unique before
3142 * we dedicate too many resources. In particular, temporary percpu
3143 * memory exhaustion.
3144 */
3145 static int add_unformed_module(struct module *mod)
3146 {
3147 int err;
3148 struct module *old;
3149
3150 mod->state = MODULE_STATE_UNFORMED;
3151
3152 again:
3153 mutex_lock(&module_mutex);
3154 if ((old = find_module_all(mod->name, true)) != NULL) {
3155 if (old->state == MODULE_STATE_COMING
3156 || old->state == MODULE_STATE_UNFORMED) {
3157 /* Wait in case it fails to load. */
3158 mutex_unlock(&module_mutex);
3159 err = wait_event_interruptible(module_wq,
3160 finished_loading(mod->name));
3161 if (err)
3162 goto out_unlocked;
3163 goto again;
3164 }
3165 err = -EEXIST;
3166 goto out;
3167 }
3168 list_add_rcu(&mod->list, &modules);
3169 err = 0;
3170
3171 out:
3172 mutex_unlock(&module_mutex);
3173 out_unlocked:
3174 return err;
3175 }
3176
3177 static int complete_formation(struct module *mod, struct load_info *info)
3178 {
3179 int err;
3180
3181 mutex_lock(&module_mutex);
3182
3183 /* Find duplicate symbols (must be called under lock). */
3184 err = verify_export_symbols(mod);
3185 if (err < 0)
3186 goto out;
3187
3188 /* This relies on module_mutex for list integrity. */
3189 module_bug_finalize(info->hdr, info->sechdrs, mod);
3190
3191 /* Mark state as coming so strong_try_module_get() ignores us,
3192 * but kallsyms etc. can see us. */
3193 mod->state = MODULE_STATE_COMING;
3194
3195 out:
3196 mutex_unlock(&module_mutex);
3197 return err;
3198 }
3199
3200 /* Allocate and load the module: note that size of section 0 is always
3201 zero, and we rely on this for optional sections. */
3202 static int load_module(struct load_info *info, const char __user *uargs,
3203 int flags)
3204 {
3205 struct module *mod;
3206 long err;
3207
3208 err = module_sig_check(info);
3209 if (err)
3210 goto free_copy;
3211
3212 err = elf_header_check(info);
3213 if (err)
3214 goto free_copy;
3215
3216 /* Figure out module layout, and allocate all the memory. */
3217 mod = layout_and_allocate(info, flags);
3218 if (IS_ERR(mod)) {
3219 err = PTR_ERR(mod);
3220 goto free_copy;
3221 }
3222
3223 /* Reserve our place in the list. */
3224 err = add_unformed_module(mod);
3225 if (err)
3226 goto free_module;
3227
3228 #ifdef CONFIG_MODULE_SIG
3229 mod->sig_ok = info->sig_ok;
3230 if (!mod->sig_ok) {
3231 printk_once(KERN_NOTICE
3232 "%s: module verification failed: signature and/or"
3233 " required key missing - tainting kernel\n",
3234 mod->name);
3235 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_STILL_OK);
3236 }
3237 #endif
3238
3239 /* To avoid stressing percpu allocator, do this once we're unique. */
3240 err = alloc_module_percpu(mod, info);
3241 if (err)
3242 goto unlink_mod;
3243
3244 /* Now module is in final location, initialize linked lists, etc. */
3245 err = module_unload_init(mod);
3246 if (err)
3247 goto unlink_mod;
3248
3249 /* Now we've got everything in the final locations, we can
3250 * find optional sections. */
3251 find_module_sections(mod, info);
3252
3253 err = check_module_license_and_versions(mod);
3254 if (err)
3255 goto free_unload;
3256
3257 /* Set up MODINFO_ATTR fields */
3258 setup_modinfo(mod, info);
3259
3260 /* Fix up syms, so that st_value is a pointer to location. */
3261 err = simplify_symbols(mod, info);
3262 if (err < 0)
3263 goto free_modinfo;
3264
3265 err = apply_relocations(mod, info);
3266 if (err < 0)
3267 goto free_modinfo;
3268
3269 err = post_relocation(mod, info);
3270 if (err < 0)
3271 goto free_modinfo;
3272
3273 flush_module_icache(mod);
3274
3275 /* Now copy in args */
3276 mod->args = strndup_user(uargs, ~0UL >> 1);
3277 if (IS_ERR(mod->args)) {
3278 err = PTR_ERR(mod->args);
3279 goto free_arch_cleanup;
3280 }
3281
3282 dynamic_debug_setup(info->debug, info->num_debug);
3283
3284 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3285 ftrace_module_init(mod);
3286
3287 /* Finally it's fully formed, ready to start executing. */
3288 err = complete_formation(mod, info);
3289 if (err)
3290 goto ddebug_cleanup;
3291
3292 /* Module is ready to execute: parsing args may do that. */
3293 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3294 -32768, 32767, &ddebug_dyndbg_module_param_cb);
3295 if (err < 0)
3296 goto bug_cleanup;
3297
3298 /* Link in to syfs. */
3299 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3300 if (err < 0)
3301 goto bug_cleanup;
3302
3303 /* Get rid of temporary copy. */
3304 free_copy(info);
3305
3306 /* Done! */
3307 trace_module_load(mod);
3308
3309 return do_init_module(mod);
3310
3311 bug_cleanup:
3312 /* module_bug_cleanup needs module_mutex protection */
3313 mutex_lock(&module_mutex);
3314 module_bug_cleanup(mod);
3315 mutex_unlock(&module_mutex);
3316 ddebug_cleanup:
3317 dynamic_debug_remove(info->debug);
3318 synchronize_sched();
3319 kfree(mod->args);
3320 free_arch_cleanup:
3321 module_arch_cleanup(mod);
3322 free_modinfo:
3323 free_modinfo(mod);
3324 free_unload:
3325 module_unload_free(mod);
3326 unlink_mod:
3327 mutex_lock(&module_mutex);
3328 /* Unlink carefully: kallsyms could be walking list. */
3329 list_del_rcu(&mod->list);
3330 wake_up_all(&module_wq);
3331 mutex_unlock(&module_mutex);
3332 free_module:
3333 module_deallocate(mod, info);
3334 free_copy:
3335 free_copy(info);
3336 return err;
3337 }
3338
3339 SYSCALL_DEFINE3(init_module, void __user *, umod,
3340 unsigned long, len, const char __user *, uargs)
3341 {
3342 int err;
3343 struct load_info info = { };
3344
3345 err = may_init_module();
3346 if (err)
3347 return err;
3348
3349 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3350 umod, len, uargs);
3351
3352 err = copy_module_from_user(umod, len, &info);
3353 if (err)
3354 return err;
3355
3356 return load_module(&info, uargs, 0);
3357 }
3358
3359 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3360 {
3361 int err;
3362 struct load_info info = { };
3363
3364 err = may_init_module();
3365 if (err)
3366 return err;
3367
3368 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3369
3370 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3371 |MODULE_INIT_IGNORE_VERMAGIC))
3372 return -EINVAL;
3373
3374 err = copy_module_from_fd(fd, &info);
3375 if (err)
3376 return err;
3377
3378 return load_module(&info, uargs, flags);
3379 }
3380
3381 static inline int within(unsigned long addr, void *start, unsigned long size)
3382 {
3383 return ((void *)addr >= start && (void *)addr < start + size);
3384 }
3385
3386 #ifdef CONFIG_KALLSYMS
3387 /*
3388 * This ignores the intensely annoying "mapping symbols" found
3389 * in ARM ELF files: $a, $t and $d.
3390 */
3391 static inline int is_arm_mapping_symbol(const char *str)
3392 {
3393 return str[0] == '$' && strchr("atd", str[1])
3394 && (str[2] == '\0' || str[2] == '.');
3395 }
3396
3397 static const char *get_ksymbol(struct module *mod,
3398 unsigned long addr,
3399 unsigned long *size,
3400 unsigned long *offset)
3401 {
3402 unsigned int i, best = 0;
3403 unsigned long nextval;
3404
3405 /* At worse, next value is at end of module */
3406 if (within_module_init(addr, mod))
3407 nextval = (unsigned long)mod->module_init+mod->init_text_size;
3408 else
3409 nextval = (unsigned long)mod->module_core+mod->core_text_size;
3410
3411 /* Scan for closest preceding symbol, and next symbol. (ELF
3412 starts real symbols at 1). */
3413 for (i = 1; i < mod->num_symtab; i++) {
3414 if (mod->symtab[i].st_shndx == SHN_UNDEF)
3415 continue;
3416
3417 /* We ignore unnamed symbols: they're uninformative
3418 * and inserted at a whim. */
3419 if (mod->symtab[i].st_value <= addr
3420 && mod->symtab[i].st_value > mod->symtab[best].st_value
3421 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3422 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3423 best = i;
3424 if (mod->symtab[i].st_value > addr
3425 && mod->symtab[i].st_value < nextval
3426 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3427 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3428 nextval = mod->symtab[i].st_value;
3429 }
3430
3431 if (!best)
3432 return NULL;
3433
3434 if (size)
3435 *size = nextval - mod->symtab[best].st_value;
3436 if (offset)
3437 *offset = addr - mod->symtab[best].st_value;
3438 return mod->strtab + mod->symtab[best].st_name;
3439 }
3440
3441 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3442 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3443 const char *module_address_lookup(unsigned long addr,
3444 unsigned long *size,
3445 unsigned long *offset,
3446 char **modname,
3447 char *namebuf)
3448 {
3449 struct module *mod;
3450 const char *ret = NULL;
3451
3452 preempt_disable();
3453 list_for_each_entry_rcu(mod, &modules, list) {
3454 if (mod->state == MODULE_STATE_UNFORMED)
3455 continue;
3456 if (within_module_init(addr, mod) ||
3457 within_module_core(addr, mod)) {
3458 if (modname)
3459 *modname = mod->name;
3460 ret = get_ksymbol(mod, addr, size, offset);
3461 break;
3462 }
3463 }
3464 /* Make a copy in here where it's safe */
3465 if (ret) {
3466 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3467 ret = namebuf;
3468 }
3469 preempt_enable();
3470 return ret;
3471 }
3472
3473 int lookup_module_symbol_name(unsigned long addr, char *symname)
3474 {
3475 struct module *mod;
3476
3477 preempt_disable();
3478 list_for_each_entry_rcu(mod, &modules, list) {
3479 if (mod->state == MODULE_STATE_UNFORMED)
3480 continue;
3481 if (within_module_init(addr, mod) ||
3482 within_module_core(addr, mod)) {
3483 const char *sym;
3484
3485 sym = get_ksymbol(mod, addr, NULL, NULL);
3486 if (!sym)
3487 goto out;
3488 strlcpy(symname, sym, KSYM_NAME_LEN);
3489 preempt_enable();
3490 return 0;
3491 }
3492 }
3493 out:
3494 preempt_enable();
3495 return -ERANGE;
3496 }
3497
3498 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3499 unsigned long *offset, char *modname, char *name)
3500 {
3501 struct module *mod;
3502
3503 preempt_disable();
3504 list_for_each_entry_rcu(mod, &modules, list) {
3505 if (mod->state == MODULE_STATE_UNFORMED)
3506 continue;
3507 if (within_module_init(addr, mod) ||
3508 within_module_core(addr, mod)) {
3509 const char *sym;
3510
3511 sym = get_ksymbol(mod, addr, size, offset);
3512 if (!sym)
3513 goto out;
3514 if (modname)
3515 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3516 if (name)
3517 strlcpy(name, sym, KSYM_NAME_LEN);
3518 preempt_enable();
3519 return 0;
3520 }
3521 }
3522 out:
3523 preempt_enable();
3524 return -ERANGE;
3525 }
3526
3527 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3528 char *name, char *module_name, int *exported)
3529 {
3530 struct module *mod;
3531
3532 preempt_disable();
3533 list_for_each_entry_rcu(mod, &modules, list) {
3534 if (mod->state == MODULE_STATE_UNFORMED)
3535 continue;
3536 if (symnum < mod->num_symtab) {
3537 *value = mod->symtab[symnum].st_value;
3538 *type = mod->symtab[symnum].st_info;
3539 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
3540 KSYM_NAME_LEN);
3541 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3542 *exported = is_exported(name, *value, mod);
3543 preempt_enable();
3544 return 0;
3545 }
3546 symnum -= mod->num_symtab;
3547 }
3548 preempt_enable();
3549 return -ERANGE;
3550 }
3551
3552 static unsigned long mod_find_symname(struct module *mod, const char *name)
3553 {
3554 unsigned int i;
3555
3556 for (i = 0; i < mod->num_symtab; i++)
3557 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
3558 mod->symtab[i].st_info != 'U')
3559 return mod->symtab[i].st_value;
3560 return 0;
3561 }
3562
3563 /* Look for this name: can be of form module:name. */
3564 unsigned long module_kallsyms_lookup_name(const char *name)
3565 {
3566 struct module *mod;
3567 char *colon;
3568 unsigned long ret = 0;
3569
3570 /* Don't lock: we're in enough trouble already. */
3571 preempt_disable();
3572 if ((colon = strchr(name, ':')) != NULL) {
3573 *colon = '\0';
3574 if ((mod = find_module(name)) != NULL)
3575 ret = mod_find_symname(mod, colon+1);
3576 *colon = ':';
3577 } else {
3578 list_for_each_entry_rcu(mod, &modules, list) {
3579 if (mod->state == MODULE_STATE_UNFORMED)
3580 continue;
3581 if ((ret = mod_find_symname(mod, name)) != 0)
3582 break;
3583 }
3584 }
3585 preempt_enable();
3586 return ret;
3587 }
3588
3589 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3590 struct module *, unsigned long),
3591 void *data)
3592 {
3593 struct module *mod;
3594 unsigned int i;
3595 int ret;
3596
3597 list_for_each_entry(mod, &modules, list) {
3598 if (mod->state == MODULE_STATE_UNFORMED)
3599 continue;
3600 for (i = 0; i < mod->num_symtab; i++) {
3601 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3602 mod, mod->symtab[i].st_value);
3603 if (ret != 0)
3604 return ret;
3605 }
3606 }
3607 return 0;
3608 }
3609 #endif /* CONFIG_KALLSYMS */
3610
3611 static char *module_flags(struct module *mod, char *buf)
3612 {
3613 int bx = 0;
3614
3615 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
3616 if (mod->taints ||
3617 mod->state == MODULE_STATE_GOING ||
3618 mod->state == MODULE_STATE_COMING) {
3619 buf[bx++] = '(';
3620 bx += module_flags_taint(mod, buf + bx);
3621 /* Show a - for module-is-being-unloaded */
3622 if (mod->state == MODULE_STATE_GOING)
3623 buf[bx++] = '-';
3624 /* Show a + for module-is-being-loaded */
3625 if (mod->state == MODULE_STATE_COMING)
3626 buf[bx++] = '+';
3627 buf[bx++] = ')';
3628 }
3629 buf[bx] = '\0';
3630
3631 return buf;
3632 }
3633
3634 #ifdef CONFIG_PROC_FS
3635 /* Called by the /proc file system to return a list of modules. */
3636 static void *m_start(struct seq_file *m, loff_t *pos)
3637 {
3638 mutex_lock(&module_mutex);
3639 return seq_list_start(&modules, *pos);
3640 }
3641
3642 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3643 {
3644 return seq_list_next(p, &modules, pos);
3645 }
3646
3647 static void m_stop(struct seq_file *m, void *p)
3648 {
3649 mutex_unlock(&module_mutex);
3650 }
3651
3652 static int m_show(struct seq_file *m, void *p)
3653 {
3654 struct module *mod = list_entry(p, struct module, list);
3655 char buf[8];
3656
3657 /* We always ignore unformed modules. */
3658 if (mod->state == MODULE_STATE_UNFORMED)
3659 return 0;
3660
3661 seq_printf(m, "%s %u",
3662 mod->name, mod->init_size + mod->core_size);
3663 print_unload_info(m, mod);
3664
3665 /* Informative for users. */
3666 seq_printf(m, " %s",
3667 mod->state == MODULE_STATE_GOING ? "Unloading":
3668 mod->state == MODULE_STATE_COMING ? "Loading":
3669 "Live");
3670 /* Used by oprofile and other similar tools. */
3671 seq_printf(m, " 0x%pK", mod->module_core);
3672
3673 /* Taints info */
3674 if (mod->taints)
3675 seq_printf(m, " %s", module_flags(mod, buf));
3676
3677 seq_printf(m, "\n");
3678 return 0;
3679 }
3680
3681 /* Format: modulename size refcount deps address
3682
3683 Where refcount is a number or -, and deps is a comma-separated list
3684 of depends or -.
3685 */
3686 static const struct seq_operations modules_op = {
3687 .start = m_start,
3688 .next = m_next,
3689 .stop = m_stop,
3690 .show = m_show
3691 };
3692
3693 static int modules_open(struct inode *inode, struct file *file)
3694 {
3695 return seq_open(file, &modules_op);
3696 }
3697
3698 static const struct file_operations proc_modules_operations = {
3699 .open = modules_open,
3700 .read = seq_read,
3701 .llseek = seq_lseek,
3702 .release = seq_release,
3703 };
3704
3705 static int __init proc_modules_init(void)
3706 {
3707 proc_create("modules", 0, NULL, &proc_modules_operations);
3708 return 0;
3709 }
3710 module_init(proc_modules_init);
3711 #endif
3712
3713 /* Given an address, look for it in the module exception tables. */
3714 const struct exception_table_entry *search_module_extables(unsigned long addr)
3715 {
3716 const struct exception_table_entry *e = NULL;
3717 struct module *mod;
3718
3719 preempt_disable();
3720 list_for_each_entry_rcu(mod, &modules, list) {
3721 if (mod->state == MODULE_STATE_UNFORMED)
3722 continue;
3723 if (mod->num_exentries == 0)
3724 continue;
3725
3726 e = search_extable(mod->extable,
3727 mod->extable + mod->num_exentries - 1,
3728 addr);
3729 if (e)
3730 break;
3731 }
3732 preempt_enable();
3733
3734 /* Now, if we found one, we are running inside it now, hence
3735 we cannot unload the module, hence no refcnt needed. */
3736 return e;
3737 }
3738
3739 /*
3740 * is_module_address - is this address inside a module?
3741 * @addr: the address to check.
3742 *
3743 * See is_module_text_address() if you simply want to see if the address
3744 * is code (not data).
3745 */
3746 bool is_module_address(unsigned long addr)
3747 {
3748 bool ret;
3749
3750 preempt_disable();
3751 ret = __module_address(addr) != NULL;
3752 preempt_enable();
3753
3754 return ret;
3755 }
3756
3757 /*
3758 * __module_address - get the module which contains an address.
3759 * @addr: the address.
3760 *
3761 * Must be called with preempt disabled or module mutex held so that
3762 * module doesn't get freed during this.
3763 */
3764 struct module *__module_address(unsigned long addr)
3765 {
3766 struct module *mod;
3767
3768 if (addr < module_addr_min || addr > module_addr_max)
3769 return NULL;
3770
3771 list_for_each_entry_rcu(mod, &modules, list) {
3772 if (mod->state == MODULE_STATE_UNFORMED)
3773 continue;
3774 if (within_module_core(addr, mod)
3775 || within_module_init(addr, mod))
3776 return mod;
3777 }
3778 return NULL;
3779 }
3780 EXPORT_SYMBOL_GPL(__module_address);
3781
3782 /*
3783 * is_module_text_address - is this address inside module code?
3784 * @addr: the address to check.
3785 *
3786 * See is_module_address() if you simply want to see if the address is
3787 * anywhere in a module. See kernel_text_address() for testing if an
3788 * address corresponds to kernel or module code.
3789 */
3790 bool is_module_text_address(unsigned long addr)
3791 {
3792 bool ret;
3793
3794 preempt_disable();
3795 ret = __module_text_address(addr) != NULL;
3796 preempt_enable();
3797
3798 return ret;
3799 }
3800
3801 /*
3802 * __module_text_address - get the module whose code contains an address.
3803 * @addr: the address.
3804 *
3805 * Must be called with preempt disabled or module mutex held so that
3806 * module doesn't get freed during this.
3807 */
3808 struct module *__module_text_address(unsigned long addr)
3809 {
3810 struct module *mod = __module_address(addr);
3811 if (mod) {
3812 /* Make sure it's within the text section. */
3813 if (!within(addr, mod->module_init, mod->init_text_size)
3814 && !within(addr, mod->module_core, mod->core_text_size))
3815 mod = NULL;
3816 }
3817 return mod;
3818 }
3819 EXPORT_SYMBOL_GPL(__module_text_address);
3820
3821 /* Don't grab lock, we're oopsing. */
3822 void print_modules(void)
3823 {
3824 struct module *mod;
3825 char buf[8];
3826
3827 printk(KERN_DEFAULT "Modules linked in:");
3828 /* Most callers should already have preempt disabled, but make sure */
3829 preempt_disable();
3830 list_for_each_entry_rcu(mod, &modules, list) {
3831 if (mod->state == MODULE_STATE_UNFORMED)
3832 continue;
3833 printk(" %s %p %s", mod->name, mod->module_core, module_flags(mod, buf));
3834 }
3835 preempt_enable();
3836 if (last_unloaded_module[0])
3837 printk(" [last unloaded: %s]", last_unloaded_module);
3838 printk("\n");
3839 }
3840
3841 #ifdef CONFIG_MODVERSIONS
3842 /* Generate the signature for all relevant module structures here.
3843 * If these change, we don't want to try to parse the module. */
3844 void module_layout(struct module *mod,
3845 struct modversion_info *ver,
3846 struct kernel_param *kp,
3847 struct kernel_symbol *ks,
3848 struct tracepoint * const *tp)
3849 {
3850 }
3851 EXPORT_SYMBOL(module_layout);
3852 #endif