Merge tag 'pm+acpi-3.20-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafae...
[deliverable/linux.git] / kernel / module.c
1 /*
2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/ftrace_event.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/sysfs.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/elf.h>
31 #include <linux/proc_fs.h>
32 #include <linux/security.h>
33 #include <linux/seq_file.h>
34 #include <linux/syscalls.h>
35 #include <linux/fcntl.h>
36 #include <linux/rcupdate.h>
37 #include <linux/capability.h>
38 #include <linux/cpu.h>
39 #include <linux/moduleparam.h>
40 #include <linux/errno.h>
41 #include <linux/err.h>
42 #include <linux/vermagic.h>
43 #include <linux/notifier.h>
44 #include <linux/sched.h>
45 #include <linux/device.h>
46 #include <linux/string.h>
47 #include <linux/mutex.h>
48 #include <linux/rculist.h>
49 #include <asm/uaccess.h>
50 #include <asm/cacheflush.h>
51 #include <asm/mmu_context.h>
52 #include <linux/license.h>
53 #include <asm/sections.h>
54 #include <linux/tracepoint.h>
55 #include <linux/ftrace.h>
56 #include <linux/async.h>
57 #include <linux/percpu.h>
58 #include <linux/kmemleak.h>
59 #include <linux/jump_label.h>
60 #include <linux/pfn.h>
61 #include <linux/bsearch.h>
62 #include <uapi/linux/module.h>
63 #include "module-internal.h"
64
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/module.h>
67
68 #ifndef ARCH_SHF_SMALL
69 #define ARCH_SHF_SMALL 0
70 #endif
71
72 /*
73 * Modules' sections will be aligned on page boundaries
74 * to ensure complete separation of code and data, but
75 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
76 */
77 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
78 # define debug_align(X) ALIGN(X, PAGE_SIZE)
79 #else
80 # define debug_align(X) (X)
81 #endif
82
83 /*
84 * Given BASE and SIZE this macro calculates the number of pages the
85 * memory regions occupies
86 */
87 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
88 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
89 PFN_DOWN((unsigned long)BASE) + 1) \
90 : (0UL))
91
92 /* If this is set, the section belongs in the init part of the module */
93 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
94
95 /*
96 * Mutex protects:
97 * 1) List of modules (also safely readable with preempt_disable),
98 * 2) module_use links,
99 * 3) module_addr_min/module_addr_max.
100 * (delete and add uses RCU list operations). */
101 DEFINE_MUTEX(module_mutex);
102 EXPORT_SYMBOL_GPL(module_mutex);
103 static LIST_HEAD(modules);
104 #ifdef CONFIG_KGDB_KDB
105 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
106 #endif /* CONFIG_KGDB_KDB */
107
108 #ifdef CONFIG_MODULE_SIG
109 #ifdef CONFIG_MODULE_SIG_FORCE
110 static bool sig_enforce = true;
111 #else
112 static bool sig_enforce = false;
113
114 static int param_set_bool_enable_only(const char *val,
115 const struct kernel_param *kp)
116 {
117 int err;
118 bool test;
119 struct kernel_param dummy_kp = *kp;
120
121 dummy_kp.arg = &test;
122
123 err = param_set_bool(val, &dummy_kp);
124 if (err)
125 return err;
126
127 /* Don't let them unset it once it's set! */
128 if (!test && sig_enforce)
129 return -EROFS;
130
131 if (test)
132 sig_enforce = true;
133 return 0;
134 }
135
136 static const struct kernel_param_ops param_ops_bool_enable_only = {
137 .flags = KERNEL_PARAM_OPS_FL_NOARG,
138 .set = param_set_bool_enable_only,
139 .get = param_get_bool,
140 };
141 #define param_check_bool_enable_only param_check_bool
142
143 module_param(sig_enforce, bool_enable_only, 0644);
144 #endif /* !CONFIG_MODULE_SIG_FORCE */
145 #endif /* CONFIG_MODULE_SIG */
146
147 /* Block module loading/unloading? */
148 int modules_disabled = 0;
149 core_param(nomodule, modules_disabled, bint, 0);
150
151 /* Waiting for a module to finish initializing? */
152 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
153
154 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
155
156 /* Bounds of module allocation, for speeding __module_address.
157 * Protected by module_mutex. */
158 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
159
160 int register_module_notifier(struct notifier_block *nb)
161 {
162 return blocking_notifier_chain_register(&module_notify_list, nb);
163 }
164 EXPORT_SYMBOL(register_module_notifier);
165
166 int unregister_module_notifier(struct notifier_block *nb)
167 {
168 return blocking_notifier_chain_unregister(&module_notify_list, nb);
169 }
170 EXPORT_SYMBOL(unregister_module_notifier);
171
172 struct load_info {
173 Elf_Ehdr *hdr;
174 unsigned long len;
175 Elf_Shdr *sechdrs;
176 char *secstrings, *strtab;
177 unsigned long symoffs, stroffs;
178 struct _ddebug *debug;
179 unsigned int num_debug;
180 bool sig_ok;
181 struct {
182 unsigned int sym, str, mod, vers, info, pcpu;
183 } index;
184 };
185
186 /* We require a truly strong try_module_get(): 0 means failure due to
187 ongoing or failed initialization etc. */
188 static inline int strong_try_module_get(struct module *mod)
189 {
190 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
191 if (mod && mod->state == MODULE_STATE_COMING)
192 return -EBUSY;
193 if (try_module_get(mod))
194 return 0;
195 else
196 return -ENOENT;
197 }
198
199 static inline void add_taint_module(struct module *mod, unsigned flag,
200 enum lockdep_ok lockdep_ok)
201 {
202 add_taint(flag, lockdep_ok);
203 mod->taints |= (1U << flag);
204 }
205
206 /*
207 * A thread that wants to hold a reference to a module only while it
208 * is running can call this to safely exit. nfsd and lockd use this.
209 */
210 void __module_put_and_exit(struct module *mod, long code)
211 {
212 module_put(mod);
213 do_exit(code);
214 }
215 EXPORT_SYMBOL(__module_put_and_exit);
216
217 /* Find a module section: 0 means not found. */
218 static unsigned int find_sec(const struct load_info *info, const char *name)
219 {
220 unsigned int i;
221
222 for (i = 1; i < info->hdr->e_shnum; i++) {
223 Elf_Shdr *shdr = &info->sechdrs[i];
224 /* Alloc bit cleared means "ignore it." */
225 if ((shdr->sh_flags & SHF_ALLOC)
226 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
227 return i;
228 }
229 return 0;
230 }
231
232 /* Find a module section, or NULL. */
233 static void *section_addr(const struct load_info *info, const char *name)
234 {
235 /* Section 0 has sh_addr 0. */
236 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
237 }
238
239 /* Find a module section, or NULL. Fill in number of "objects" in section. */
240 static void *section_objs(const struct load_info *info,
241 const char *name,
242 size_t object_size,
243 unsigned int *num)
244 {
245 unsigned int sec = find_sec(info, name);
246
247 /* Section 0 has sh_addr 0 and sh_size 0. */
248 *num = info->sechdrs[sec].sh_size / object_size;
249 return (void *)info->sechdrs[sec].sh_addr;
250 }
251
252 /* Provided by the linker */
253 extern const struct kernel_symbol __start___ksymtab[];
254 extern const struct kernel_symbol __stop___ksymtab[];
255 extern const struct kernel_symbol __start___ksymtab_gpl[];
256 extern const struct kernel_symbol __stop___ksymtab_gpl[];
257 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
258 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
259 extern const unsigned long __start___kcrctab[];
260 extern const unsigned long __start___kcrctab_gpl[];
261 extern const unsigned long __start___kcrctab_gpl_future[];
262 #ifdef CONFIG_UNUSED_SYMBOLS
263 extern const struct kernel_symbol __start___ksymtab_unused[];
264 extern const struct kernel_symbol __stop___ksymtab_unused[];
265 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
266 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
267 extern const unsigned long __start___kcrctab_unused[];
268 extern const unsigned long __start___kcrctab_unused_gpl[];
269 #endif
270
271 #ifndef CONFIG_MODVERSIONS
272 #define symversion(base, idx) NULL
273 #else
274 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
275 #endif
276
277 static bool each_symbol_in_section(const struct symsearch *arr,
278 unsigned int arrsize,
279 struct module *owner,
280 bool (*fn)(const struct symsearch *syms,
281 struct module *owner,
282 void *data),
283 void *data)
284 {
285 unsigned int j;
286
287 for (j = 0; j < arrsize; j++) {
288 if (fn(&arr[j], owner, data))
289 return true;
290 }
291
292 return false;
293 }
294
295 /* Returns true as soon as fn returns true, otherwise false. */
296 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
297 struct module *owner,
298 void *data),
299 void *data)
300 {
301 struct module *mod;
302 static const struct symsearch arr[] = {
303 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
304 NOT_GPL_ONLY, false },
305 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
306 __start___kcrctab_gpl,
307 GPL_ONLY, false },
308 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
309 __start___kcrctab_gpl_future,
310 WILL_BE_GPL_ONLY, false },
311 #ifdef CONFIG_UNUSED_SYMBOLS
312 { __start___ksymtab_unused, __stop___ksymtab_unused,
313 __start___kcrctab_unused,
314 NOT_GPL_ONLY, true },
315 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
316 __start___kcrctab_unused_gpl,
317 GPL_ONLY, true },
318 #endif
319 };
320
321 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
322 return true;
323
324 list_for_each_entry_rcu(mod, &modules, list) {
325 struct symsearch arr[] = {
326 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
327 NOT_GPL_ONLY, false },
328 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
329 mod->gpl_crcs,
330 GPL_ONLY, false },
331 { mod->gpl_future_syms,
332 mod->gpl_future_syms + mod->num_gpl_future_syms,
333 mod->gpl_future_crcs,
334 WILL_BE_GPL_ONLY, false },
335 #ifdef CONFIG_UNUSED_SYMBOLS
336 { mod->unused_syms,
337 mod->unused_syms + mod->num_unused_syms,
338 mod->unused_crcs,
339 NOT_GPL_ONLY, true },
340 { mod->unused_gpl_syms,
341 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
342 mod->unused_gpl_crcs,
343 GPL_ONLY, true },
344 #endif
345 };
346
347 if (mod->state == MODULE_STATE_UNFORMED)
348 continue;
349
350 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
351 return true;
352 }
353 return false;
354 }
355 EXPORT_SYMBOL_GPL(each_symbol_section);
356
357 struct find_symbol_arg {
358 /* Input */
359 const char *name;
360 bool gplok;
361 bool warn;
362
363 /* Output */
364 struct module *owner;
365 const unsigned long *crc;
366 const struct kernel_symbol *sym;
367 };
368
369 static bool check_symbol(const struct symsearch *syms,
370 struct module *owner,
371 unsigned int symnum, void *data)
372 {
373 struct find_symbol_arg *fsa = data;
374
375 if (!fsa->gplok) {
376 if (syms->licence == GPL_ONLY)
377 return false;
378 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
379 pr_warn("Symbol %s is being used by a non-GPL module, "
380 "which will not be allowed in the future\n",
381 fsa->name);
382 }
383 }
384
385 #ifdef CONFIG_UNUSED_SYMBOLS
386 if (syms->unused && fsa->warn) {
387 pr_warn("Symbol %s is marked as UNUSED, however this module is "
388 "using it.\n", fsa->name);
389 pr_warn("This symbol will go away in the future.\n");
390 pr_warn("Please evalute if this is the right api to use and if "
391 "it really is, submit a report the linux kernel "
392 "mailinglist together with submitting your code for "
393 "inclusion.\n");
394 }
395 #endif
396
397 fsa->owner = owner;
398 fsa->crc = symversion(syms->crcs, symnum);
399 fsa->sym = &syms->start[symnum];
400 return true;
401 }
402
403 static int cmp_name(const void *va, const void *vb)
404 {
405 const char *a;
406 const struct kernel_symbol *b;
407 a = va; b = vb;
408 return strcmp(a, b->name);
409 }
410
411 static bool find_symbol_in_section(const struct symsearch *syms,
412 struct module *owner,
413 void *data)
414 {
415 struct find_symbol_arg *fsa = data;
416 struct kernel_symbol *sym;
417
418 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
419 sizeof(struct kernel_symbol), cmp_name);
420
421 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
422 return true;
423
424 return false;
425 }
426
427 /* Find a symbol and return it, along with, (optional) crc and
428 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
429 const struct kernel_symbol *find_symbol(const char *name,
430 struct module **owner,
431 const unsigned long **crc,
432 bool gplok,
433 bool warn)
434 {
435 struct find_symbol_arg fsa;
436
437 fsa.name = name;
438 fsa.gplok = gplok;
439 fsa.warn = warn;
440
441 if (each_symbol_section(find_symbol_in_section, &fsa)) {
442 if (owner)
443 *owner = fsa.owner;
444 if (crc)
445 *crc = fsa.crc;
446 return fsa.sym;
447 }
448
449 pr_debug("Failed to find symbol %s\n", name);
450 return NULL;
451 }
452 EXPORT_SYMBOL_GPL(find_symbol);
453
454 /* Search for module by name: must hold module_mutex. */
455 static struct module *find_module_all(const char *name, size_t len,
456 bool even_unformed)
457 {
458 struct module *mod;
459
460 list_for_each_entry(mod, &modules, list) {
461 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
462 continue;
463 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
464 return mod;
465 }
466 return NULL;
467 }
468
469 struct module *find_module(const char *name)
470 {
471 return find_module_all(name, strlen(name), false);
472 }
473 EXPORT_SYMBOL_GPL(find_module);
474
475 #ifdef CONFIG_SMP
476
477 static inline void __percpu *mod_percpu(struct module *mod)
478 {
479 return mod->percpu;
480 }
481
482 static int percpu_modalloc(struct module *mod, struct load_info *info)
483 {
484 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
485 unsigned long align = pcpusec->sh_addralign;
486
487 if (!pcpusec->sh_size)
488 return 0;
489
490 if (align > PAGE_SIZE) {
491 pr_warn("%s: per-cpu alignment %li > %li\n",
492 mod->name, align, PAGE_SIZE);
493 align = PAGE_SIZE;
494 }
495
496 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
497 if (!mod->percpu) {
498 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
499 mod->name, (unsigned long)pcpusec->sh_size);
500 return -ENOMEM;
501 }
502 mod->percpu_size = pcpusec->sh_size;
503 return 0;
504 }
505
506 static void percpu_modfree(struct module *mod)
507 {
508 free_percpu(mod->percpu);
509 }
510
511 static unsigned int find_pcpusec(struct load_info *info)
512 {
513 return find_sec(info, ".data..percpu");
514 }
515
516 static void percpu_modcopy(struct module *mod,
517 const void *from, unsigned long size)
518 {
519 int cpu;
520
521 for_each_possible_cpu(cpu)
522 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
523 }
524
525 /**
526 * is_module_percpu_address - test whether address is from module static percpu
527 * @addr: address to test
528 *
529 * Test whether @addr belongs to module static percpu area.
530 *
531 * RETURNS:
532 * %true if @addr is from module static percpu area
533 */
534 bool is_module_percpu_address(unsigned long addr)
535 {
536 struct module *mod;
537 unsigned int cpu;
538
539 preempt_disable();
540
541 list_for_each_entry_rcu(mod, &modules, list) {
542 if (mod->state == MODULE_STATE_UNFORMED)
543 continue;
544 if (!mod->percpu_size)
545 continue;
546 for_each_possible_cpu(cpu) {
547 void *start = per_cpu_ptr(mod->percpu, cpu);
548
549 if ((void *)addr >= start &&
550 (void *)addr < start + mod->percpu_size) {
551 preempt_enable();
552 return true;
553 }
554 }
555 }
556
557 preempt_enable();
558 return false;
559 }
560
561 #else /* ... !CONFIG_SMP */
562
563 static inline void __percpu *mod_percpu(struct module *mod)
564 {
565 return NULL;
566 }
567 static int percpu_modalloc(struct module *mod, struct load_info *info)
568 {
569 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
570 if (info->sechdrs[info->index.pcpu].sh_size != 0)
571 return -ENOMEM;
572 return 0;
573 }
574 static inline void percpu_modfree(struct module *mod)
575 {
576 }
577 static unsigned int find_pcpusec(struct load_info *info)
578 {
579 return 0;
580 }
581 static inline void percpu_modcopy(struct module *mod,
582 const void *from, unsigned long size)
583 {
584 /* pcpusec should be 0, and size of that section should be 0. */
585 BUG_ON(size != 0);
586 }
587 bool is_module_percpu_address(unsigned long addr)
588 {
589 return false;
590 }
591
592 #endif /* CONFIG_SMP */
593
594 #define MODINFO_ATTR(field) \
595 static void setup_modinfo_##field(struct module *mod, const char *s) \
596 { \
597 mod->field = kstrdup(s, GFP_KERNEL); \
598 } \
599 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
600 struct module_kobject *mk, char *buffer) \
601 { \
602 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
603 } \
604 static int modinfo_##field##_exists(struct module *mod) \
605 { \
606 return mod->field != NULL; \
607 } \
608 static void free_modinfo_##field(struct module *mod) \
609 { \
610 kfree(mod->field); \
611 mod->field = NULL; \
612 } \
613 static struct module_attribute modinfo_##field = { \
614 .attr = { .name = __stringify(field), .mode = 0444 }, \
615 .show = show_modinfo_##field, \
616 .setup = setup_modinfo_##field, \
617 .test = modinfo_##field##_exists, \
618 .free = free_modinfo_##field, \
619 };
620
621 MODINFO_ATTR(version);
622 MODINFO_ATTR(srcversion);
623
624 static char last_unloaded_module[MODULE_NAME_LEN+1];
625
626 #ifdef CONFIG_MODULE_UNLOAD
627
628 EXPORT_TRACEPOINT_SYMBOL(module_get);
629
630 /* MODULE_REF_BASE is the base reference count by kmodule loader. */
631 #define MODULE_REF_BASE 1
632
633 /* Init the unload section of the module. */
634 static int module_unload_init(struct module *mod)
635 {
636 /*
637 * Initialize reference counter to MODULE_REF_BASE.
638 * refcnt == 0 means module is going.
639 */
640 atomic_set(&mod->refcnt, MODULE_REF_BASE);
641
642 INIT_LIST_HEAD(&mod->source_list);
643 INIT_LIST_HEAD(&mod->target_list);
644
645 /* Hold reference count during initialization. */
646 atomic_inc(&mod->refcnt);
647
648 return 0;
649 }
650
651 /* Does a already use b? */
652 static int already_uses(struct module *a, struct module *b)
653 {
654 struct module_use *use;
655
656 list_for_each_entry(use, &b->source_list, source_list) {
657 if (use->source == a) {
658 pr_debug("%s uses %s!\n", a->name, b->name);
659 return 1;
660 }
661 }
662 pr_debug("%s does not use %s!\n", a->name, b->name);
663 return 0;
664 }
665
666 /*
667 * Module a uses b
668 * - we add 'a' as a "source", 'b' as a "target" of module use
669 * - the module_use is added to the list of 'b' sources (so
670 * 'b' can walk the list to see who sourced them), and of 'a'
671 * targets (so 'a' can see what modules it targets).
672 */
673 static int add_module_usage(struct module *a, struct module *b)
674 {
675 struct module_use *use;
676
677 pr_debug("Allocating new usage for %s.\n", a->name);
678 use = kmalloc(sizeof(*use), GFP_ATOMIC);
679 if (!use) {
680 pr_warn("%s: out of memory loading\n", a->name);
681 return -ENOMEM;
682 }
683
684 use->source = a;
685 use->target = b;
686 list_add(&use->source_list, &b->source_list);
687 list_add(&use->target_list, &a->target_list);
688 return 0;
689 }
690
691 /* Module a uses b: caller needs module_mutex() */
692 int ref_module(struct module *a, struct module *b)
693 {
694 int err;
695
696 if (b == NULL || already_uses(a, b))
697 return 0;
698
699 /* If module isn't available, we fail. */
700 err = strong_try_module_get(b);
701 if (err)
702 return err;
703
704 err = add_module_usage(a, b);
705 if (err) {
706 module_put(b);
707 return err;
708 }
709 return 0;
710 }
711 EXPORT_SYMBOL_GPL(ref_module);
712
713 /* Clear the unload stuff of the module. */
714 static void module_unload_free(struct module *mod)
715 {
716 struct module_use *use, *tmp;
717
718 mutex_lock(&module_mutex);
719 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
720 struct module *i = use->target;
721 pr_debug("%s unusing %s\n", mod->name, i->name);
722 module_put(i);
723 list_del(&use->source_list);
724 list_del(&use->target_list);
725 kfree(use);
726 }
727 mutex_unlock(&module_mutex);
728 }
729
730 #ifdef CONFIG_MODULE_FORCE_UNLOAD
731 static inline int try_force_unload(unsigned int flags)
732 {
733 int ret = (flags & O_TRUNC);
734 if (ret)
735 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
736 return ret;
737 }
738 #else
739 static inline int try_force_unload(unsigned int flags)
740 {
741 return 0;
742 }
743 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
744
745 /* Try to release refcount of module, 0 means success. */
746 static int try_release_module_ref(struct module *mod)
747 {
748 int ret;
749
750 /* Try to decrement refcnt which we set at loading */
751 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
752 BUG_ON(ret < 0);
753 if (ret)
754 /* Someone can put this right now, recover with checking */
755 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
756
757 return ret;
758 }
759
760 static int try_stop_module(struct module *mod, int flags, int *forced)
761 {
762 /* If it's not unused, quit unless we're forcing. */
763 if (try_release_module_ref(mod) != 0) {
764 *forced = try_force_unload(flags);
765 if (!(*forced))
766 return -EWOULDBLOCK;
767 }
768
769 /* Mark it as dying. */
770 mod->state = MODULE_STATE_GOING;
771
772 return 0;
773 }
774
775 /**
776 * module_refcount - return the refcount or -1 if unloading
777 *
778 * @mod: the module we're checking
779 *
780 * Returns:
781 * -1 if the module is in the process of unloading
782 * otherwise the number of references in the kernel to the module
783 */
784 int module_refcount(struct module *mod)
785 {
786 return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
787 }
788 EXPORT_SYMBOL(module_refcount);
789
790 /* This exists whether we can unload or not */
791 static void free_module(struct module *mod);
792
793 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
794 unsigned int, flags)
795 {
796 struct module *mod;
797 char name[MODULE_NAME_LEN];
798 int ret, forced = 0;
799
800 if (!capable(CAP_SYS_MODULE) || modules_disabled)
801 return -EPERM;
802
803 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
804 return -EFAULT;
805 name[MODULE_NAME_LEN-1] = '\0';
806
807 if (mutex_lock_interruptible(&module_mutex) != 0)
808 return -EINTR;
809
810 mod = find_module(name);
811 if (!mod) {
812 ret = -ENOENT;
813 goto out;
814 }
815
816 if (!list_empty(&mod->source_list)) {
817 /* Other modules depend on us: get rid of them first. */
818 ret = -EWOULDBLOCK;
819 goto out;
820 }
821
822 /* Doing init or already dying? */
823 if (mod->state != MODULE_STATE_LIVE) {
824 /* FIXME: if (force), slam module count damn the torpedoes */
825 pr_debug("%s already dying\n", mod->name);
826 ret = -EBUSY;
827 goto out;
828 }
829
830 /* If it has an init func, it must have an exit func to unload */
831 if (mod->init && !mod->exit) {
832 forced = try_force_unload(flags);
833 if (!forced) {
834 /* This module can't be removed */
835 ret = -EBUSY;
836 goto out;
837 }
838 }
839
840 /* Stop the machine so refcounts can't move and disable module. */
841 ret = try_stop_module(mod, flags, &forced);
842 if (ret != 0)
843 goto out;
844
845 mutex_unlock(&module_mutex);
846 /* Final destruction now no one is using it. */
847 if (mod->exit != NULL)
848 mod->exit();
849 blocking_notifier_call_chain(&module_notify_list,
850 MODULE_STATE_GOING, mod);
851 async_synchronize_full();
852
853 /* Store the name of the last unloaded module for diagnostic purposes */
854 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
855
856 free_module(mod);
857 return 0;
858 out:
859 mutex_unlock(&module_mutex);
860 return ret;
861 }
862
863 static inline void print_unload_info(struct seq_file *m, struct module *mod)
864 {
865 struct module_use *use;
866 int printed_something = 0;
867
868 seq_printf(m, " %i ", module_refcount(mod));
869
870 /*
871 * Always include a trailing , so userspace can differentiate
872 * between this and the old multi-field proc format.
873 */
874 list_for_each_entry(use, &mod->source_list, source_list) {
875 printed_something = 1;
876 seq_printf(m, "%s,", use->source->name);
877 }
878
879 if (mod->init != NULL && mod->exit == NULL) {
880 printed_something = 1;
881 seq_puts(m, "[permanent],");
882 }
883
884 if (!printed_something)
885 seq_puts(m, "-");
886 }
887
888 void __symbol_put(const char *symbol)
889 {
890 struct module *owner;
891
892 preempt_disable();
893 if (!find_symbol(symbol, &owner, NULL, true, false))
894 BUG();
895 module_put(owner);
896 preempt_enable();
897 }
898 EXPORT_SYMBOL(__symbol_put);
899
900 /* Note this assumes addr is a function, which it currently always is. */
901 void symbol_put_addr(void *addr)
902 {
903 struct module *modaddr;
904 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
905
906 if (core_kernel_text(a))
907 return;
908
909 /* module_text_address is safe here: we're supposed to have reference
910 * to module from symbol_get, so it can't go away. */
911 modaddr = __module_text_address(a);
912 BUG_ON(!modaddr);
913 module_put(modaddr);
914 }
915 EXPORT_SYMBOL_GPL(symbol_put_addr);
916
917 static ssize_t show_refcnt(struct module_attribute *mattr,
918 struct module_kobject *mk, char *buffer)
919 {
920 return sprintf(buffer, "%i\n", module_refcount(mk->mod));
921 }
922
923 static struct module_attribute modinfo_refcnt =
924 __ATTR(refcnt, 0444, show_refcnt, NULL);
925
926 void __module_get(struct module *module)
927 {
928 if (module) {
929 preempt_disable();
930 atomic_inc(&module->refcnt);
931 trace_module_get(module, _RET_IP_);
932 preempt_enable();
933 }
934 }
935 EXPORT_SYMBOL(__module_get);
936
937 bool try_module_get(struct module *module)
938 {
939 bool ret = true;
940
941 if (module) {
942 preempt_disable();
943 /* Note: here, we can fail to get a reference */
944 if (likely(module_is_live(module) &&
945 atomic_inc_not_zero(&module->refcnt) != 0))
946 trace_module_get(module, _RET_IP_);
947 else
948 ret = false;
949
950 preempt_enable();
951 }
952 return ret;
953 }
954 EXPORT_SYMBOL(try_module_get);
955
956 void module_put(struct module *module)
957 {
958 int ret;
959
960 if (module) {
961 preempt_disable();
962 ret = atomic_dec_if_positive(&module->refcnt);
963 WARN_ON(ret < 0); /* Failed to put refcount */
964 trace_module_put(module, _RET_IP_);
965 preempt_enable();
966 }
967 }
968 EXPORT_SYMBOL(module_put);
969
970 #else /* !CONFIG_MODULE_UNLOAD */
971 static inline void print_unload_info(struct seq_file *m, struct module *mod)
972 {
973 /* We don't know the usage count, or what modules are using. */
974 seq_puts(m, " - -");
975 }
976
977 static inline void module_unload_free(struct module *mod)
978 {
979 }
980
981 int ref_module(struct module *a, struct module *b)
982 {
983 return strong_try_module_get(b);
984 }
985 EXPORT_SYMBOL_GPL(ref_module);
986
987 static inline int module_unload_init(struct module *mod)
988 {
989 return 0;
990 }
991 #endif /* CONFIG_MODULE_UNLOAD */
992
993 static size_t module_flags_taint(struct module *mod, char *buf)
994 {
995 size_t l = 0;
996
997 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
998 buf[l++] = 'P';
999 if (mod->taints & (1 << TAINT_OOT_MODULE))
1000 buf[l++] = 'O';
1001 if (mod->taints & (1 << TAINT_FORCED_MODULE))
1002 buf[l++] = 'F';
1003 if (mod->taints & (1 << TAINT_CRAP))
1004 buf[l++] = 'C';
1005 if (mod->taints & (1 << TAINT_UNSIGNED_MODULE))
1006 buf[l++] = 'E';
1007 /*
1008 * TAINT_FORCED_RMMOD: could be added.
1009 * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1010 * apply to modules.
1011 */
1012 return l;
1013 }
1014
1015 static ssize_t show_initstate(struct module_attribute *mattr,
1016 struct module_kobject *mk, char *buffer)
1017 {
1018 const char *state = "unknown";
1019
1020 switch (mk->mod->state) {
1021 case MODULE_STATE_LIVE:
1022 state = "live";
1023 break;
1024 case MODULE_STATE_COMING:
1025 state = "coming";
1026 break;
1027 case MODULE_STATE_GOING:
1028 state = "going";
1029 break;
1030 default:
1031 BUG();
1032 }
1033 return sprintf(buffer, "%s\n", state);
1034 }
1035
1036 static struct module_attribute modinfo_initstate =
1037 __ATTR(initstate, 0444, show_initstate, NULL);
1038
1039 static ssize_t store_uevent(struct module_attribute *mattr,
1040 struct module_kobject *mk,
1041 const char *buffer, size_t count)
1042 {
1043 enum kobject_action action;
1044
1045 if (kobject_action_type(buffer, count, &action) == 0)
1046 kobject_uevent(&mk->kobj, action);
1047 return count;
1048 }
1049
1050 struct module_attribute module_uevent =
1051 __ATTR(uevent, 0200, NULL, store_uevent);
1052
1053 static ssize_t show_coresize(struct module_attribute *mattr,
1054 struct module_kobject *mk, char *buffer)
1055 {
1056 return sprintf(buffer, "%u\n", mk->mod->core_size);
1057 }
1058
1059 static struct module_attribute modinfo_coresize =
1060 __ATTR(coresize, 0444, show_coresize, NULL);
1061
1062 static ssize_t show_initsize(struct module_attribute *mattr,
1063 struct module_kobject *mk, char *buffer)
1064 {
1065 return sprintf(buffer, "%u\n", mk->mod->init_size);
1066 }
1067
1068 static struct module_attribute modinfo_initsize =
1069 __ATTR(initsize, 0444, show_initsize, NULL);
1070
1071 static ssize_t show_taint(struct module_attribute *mattr,
1072 struct module_kobject *mk, char *buffer)
1073 {
1074 size_t l;
1075
1076 l = module_flags_taint(mk->mod, buffer);
1077 buffer[l++] = '\n';
1078 return l;
1079 }
1080
1081 static struct module_attribute modinfo_taint =
1082 __ATTR(taint, 0444, show_taint, NULL);
1083
1084 static struct module_attribute *modinfo_attrs[] = {
1085 &module_uevent,
1086 &modinfo_version,
1087 &modinfo_srcversion,
1088 &modinfo_initstate,
1089 &modinfo_coresize,
1090 &modinfo_initsize,
1091 &modinfo_taint,
1092 #ifdef CONFIG_MODULE_UNLOAD
1093 &modinfo_refcnt,
1094 #endif
1095 NULL,
1096 };
1097
1098 static const char vermagic[] = VERMAGIC_STRING;
1099
1100 static int try_to_force_load(struct module *mod, const char *reason)
1101 {
1102 #ifdef CONFIG_MODULE_FORCE_LOAD
1103 if (!test_taint(TAINT_FORCED_MODULE))
1104 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1105 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1106 return 0;
1107 #else
1108 return -ENOEXEC;
1109 #endif
1110 }
1111
1112 #ifdef CONFIG_MODVERSIONS
1113 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1114 static unsigned long maybe_relocated(unsigned long crc,
1115 const struct module *crc_owner)
1116 {
1117 #ifdef ARCH_RELOCATES_KCRCTAB
1118 if (crc_owner == NULL)
1119 return crc - (unsigned long)reloc_start;
1120 #endif
1121 return crc;
1122 }
1123
1124 static int check_version(Elf_Shdr *sechdrs,
1125 unsigned int versindex,
1126 const char *symname,
1127 struct module *mod,
1128 const unsigned long *crc,
1129 const struct module *crc_owner)
1130 {
1131 unsigned int i, num_versions;
1132 struct modversion_info *versions;
1133
1134 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1135 if (!crc)
1136 return 1;
1137
1138 /* No versions at all? modprobe --force does this. */
1139 if (versindex == 0)
1140 return try_to_force_load(mod, symname) == 0;
1141
1142 versions = (void *) sechdrs[versindex].sh_addr;
1143 num_versions = sechdrs[versindex].sh_size
1144 / sizeof(struct modversion_info);
1145
1146 for (i = 0; i < num_versions; i++) {
1147 if (strcmp(versions[i].name, symname) != 0)
1148 continue;
1149
1150 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1151 return 1;
1152 pr_debug("Found checksum %lX vs module %lX\n",
1153 maybe_relocated(*crc, crc_owner), versions[i].crc);
1154 goto bad_version;
1155 }
1156
1157 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
1158 return 0;
1159
1160 bad_version:
1161 pr_warn("%s: disagrees about version of symbol %s\n",
1162 mod->name, symname);
1163 return 0;
1164 }
1165
1166 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1167 unsigned int versindex,
1168 struct module *mod)
1169 {
1170 const unsigned long *crc;
1171
1172 /* Since this should be found in kernel (which can't be removed),
1173 * no locking is necessary. */
1174 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
1175 &crc, true, false))
1176 BUG();
1177 return check_version(sechdrs, versindex,
1178 VMLINUX_SYMBOL_STR(module_layout), mod, crc,
1179 NULL);
1180 }
1181
1182 /* First part is kernel version, which we ignore if module has crcs. */
1183 static inline int same_magic(const char *amagic, const char *bmagic,
1184 bool has_crcs)
1185 {
1186 if (has_crcs) {
1187 amagic += strcspn(amagic, " ");
1188 bmagic += strcspn(bmagic, " ");
1189 }
1190 return strcmp(amagic, bmagic) == 0;
1191 }
1192 #else
1193 static inline int check_version(Elf_Shdr *sechdrs,
1194 unsigned int versindex,
1195 const char *symname,
1196 struct module *mod,
1197 const unsigned long *crc,
1198 const struct module *crc_owner)
1199 {
1200 return 1;
1201 }
1202
1203 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1204 unsigned int versindex,
1205 struct module *mod)
1206 {
1207 return 1;
1208 }
1209
1210 static inline int same_magic(const char *amagic, const char *bmagic,
1211 bool has_crcs)
1212 {
1213 return strcmp(amagic, bmagic) == 0;
1214 }
1215 #endif /* CONFIG_MODVERSIONS */
1216
1217 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1218 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1219 const struct load_info *info,
1220 const char *name,
1221 char ownername[])
1222 {
1223 struct module *owner;
1224 const struct kernel_symbol *sym;
1225 const unsigned long *crc;
1226 int err;
1227
1228 /*
1229 * The module_mutex should not be a heavily contended lock;
1230 * if we get the occasional sleep here, we'll go an extra iteration
1231 * in the wait_event_interruptible(), which is harmless.
1232 */
1233 sched_annotate_sleep();
1234 mutex_lock(&module_mutex);
1235 sym = find_symbol(name, &owner, &crc,
1236 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1237 if (!sym)
1238 goto unlock;
1239
1240 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1241 owner)) {
1242 sym = ERR_PTR(-EINVAL);
1243 goto getname;
1244 }
1245
1246 err = ref_module(mod, owner);
1247 if (err) {
1248 sym = ERR_PTR(err);
1249 goto getname;
1250 }
1251
1252 getname:
1253 /* We must make copy under the lock if we failed to get ref. */
1254 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1255 unlock:
1256 mutex_unlock(&module_mutex);
1257 return sym;
1258 }
1259
1260 static const struct kernel_symbol *
1261 resolve_symbol_wait(struct module *mod,
1262 const struct load_info *info,
1263 const char *name)
1264 {
1265 const struct kernel_symbol *ksym;
1266 char owner[MODULE_NAME_LEN];
1267
1268 if (wait_event_interruptible_timeout(module_wq,
1269 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1270 || PTR_ERR(ksym) != -EBUSY,
1271 30 * HZ) <= 0) {
1272 pr_warn("%s: gave up waiting for init of module %s.\n",
1273 mod->name, owner);
1274 }
1275 return ksym;
1276 }
1277
1278 /*
1279 * /sys/module/foo/sections stuff
1280 * J. Corbet <corbet@lwn.net>
1281 */
1282 #ifdef CONFIG_SYSFS
1283
1284 #ifdef CONFIG_KALLSYMS
1285 static inline bool sect_empty(const Elf_Shdr *sect)
1286 {
1287 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1288 }
1289
1290 struct module_sect_attr {
1291 struct module_attribute mattr;
1292 char *name;
1293 unsigned long address;
1294 };
1295
1296 struct module_sect_attrs {
1297 struct attribute_group grp;
1298 unsigned int nsections;
1299 struct module_sect_attr attrs[0];
1300 };
1301
1302 static ssize_t module_sect_show(struct module_attribute *mattr,
1303 struct module_kobject *mk, char *buf)
1304 {
1305 struct module_sect_attr *sattr =
1306 container_of(mattr, struct module_sect_attr, mattr);
1307 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1308 }
1309
1310 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1311 {
1312 unsigned int section;
1313
1314 for (section = 0; section < sect_attrs->nsections; section++)
1315 kfree(sect_attrs->attrs[section].name);
1316 kfree(sect_attrs);
1317 }
1318
1319 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1320 {
1321 unsigned int nloaded = 0, i, size[2];
1322 struct module_sect_attrs *sect_attrs;
1323 struct module_sect_attr *sattr;
1324 struct attribute **gattr;
1325
1326 /* Count loaded sections and allocate structures */
1327 for (i = 0; i < info->hdr->e_shnum; i++)
1328 if (!sect_empty(&info->sechdrs[i]))
1329 nloaded++;
1330 size[0] = ALIGN(sizeof(*sect_attrs)
1331 + nloaded * sizeof(sect_attrs->attrs[0]),
1332 sizeof(sect_attrs->grp.attrs[0]));
1333 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1334 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1335 if (sect_attrs == NULL)
1336 return;
1337
1338 /* Setup section attributes. */
1339 sect_attrs->grp.name = "sections";
1340 sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1341
1342 sect_attrs->nsections = 0;
1343 sattr = &sect_attrs->attrs[0];
1344 gattr = &sect_attrs->grp.attrs[0];
1345 for (i = 0; i < info->hdr->e_shnum; i++) {
1346 Elf_Shdr *sec = &info->sechdrs[i];
1347 if (sect_empty(sec))
1348 continue;
1349 sattr->address = sec->sh_addr;
1350 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1351 GFP_KERNEL);
1352 if (sattr->name == NULL)
1353 goto out;
1354 sect_attrs->nsections++;
1355 sysfs_attr_init(&sattr->mattr.attr);
1356 sattr->mattr.show = module_sect_show;
1357 sattr->mattr.store = NULL;
1358 sattr->mattr.attr.name = sattr->name;
1359 sattr->mattr.attr.mode = S_IRUGO;
1360 *(gattr++) = &(sattr++)->mattr.attr;
1361 }
1362 *gattr = NULL;
1363
1364 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1365 goto out;
1366
1367 mod->sect_attrs = sect_attrs;
1368 return;
1369 out:
1370 free_sect_attrs(sect_attrs);
1371 }
1372
1373 static void remove_sect_attrs(struct module *mod)
1374 {
1375 if (mod->sect_attrs) {
1376 sysfs_remove_group(&mod->mkobj.kobj,
1377 &mod->sect_attrs->grp);
1378 /* We are positive that no one is using any sect attrs
1379 * at this point. Deallocate immediately. */
1380 free_sect_attrs(mod->sect_attrs);
1381 mod->sect_attrs = NULL;
1382 }
1383 }
1384
1385 /*
1386 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1387 */
1388
1389 struct module_notes_attrs {
1390 struct kobject *dir;
1391 unsigned int notes;
1392 struct bin_attribute attrs[0];
1393 };
1394
1395 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1396 struct bin_attribute *bin_attr,
1397 char *buf, loff_t pos, size_t count)
1398 {
1399 /*
1400 * The caller checked the pos and count against our size.
1401 */
1402 memcpy(buf, bin_attr->private + pos, count);
1403 return count;
1404 }
1405
1406 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1407 unsigned int i)
1408 {
1409 if (notes_attrs->dir) {
1410 while (i-- > 0)
1411 sysfs_remove_bin_file(notes_attrs->dir,
1412 &notes_attrs->attrs[i]);
1413 kobject_put(notes_attrs->dir);
1414 }
1415 kfree(notes_attrs);
1416 }
1417
1418 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1419 {
1420 unsigned int notes, loaded, i;
1421 struct module_notes_attrs *notes_attrs;
1422 struct bin_attribute *nattr;
1423
1424 /* failed to create section attributes, so can't create notes */
1425 if (!mod->sect_attrs)
1426 return;
1427
1428 /* Count notes sections and allocate structures. */
1429 notes = 0;
1430 for (i = 0; i < info->hdr->e_shnum; i++)
1431 if (!sect_empty(&info->sechdrs[i]) &&
1432 (info->sechdrs[i].sh_type == SHT_NOTE))
1433 ++notes;
1434
1435 if (notes == 0)
1436 return;
1437
1438 notes_attrs = kzalloc(sizeof(*notes_attrs)
1439 + notes * sizeof(notes_attrs->attrs[0]),
1440 GFP_KERNEL);
1441 if (notes_attrs == NULL)
1442 return;
1443
1444 notes_attrs->notes = notes;
1445 nattr = &notes_attrs->attrs[0];
1446 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1447 if (sect_empty(&info->sechdrs[i]))
1448 continue;
1449 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1450 sysfs_bin_attr_init(nattr);
1451 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1452 nattr->attr.mode = S_IRUGO;
1453 nattr->size = info->sechdrs[i].sh_size;
1454 nattr->private = (void *) info->sechdrs[i].sh_addr;
1455 nattr->read = module_notes_read;
1456 ++nattr;
1457 }
1458 ++loaded;
1459 }
1460
1461 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1462 if (!notes_attrs->dir)
1463 goto out;
1464
1465 for (i = 0; i < notes; ++i)
1466 if (sysfs_create_bin_file(notes_attrs->dir,
1467 &notes_attrs->attrs[i]))
1468 goto out;
1469
1470 mod->notes_attrs = notes_attrs;
1471 return;
1472
1473 out:
1474 free_notes_attrs(notes_attrs, i);
1475 }
1476
1477 static void remove_notes_attrs(struct module *mod)
1478 {
1479 if (mod->notes_attrs)
1480 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1481 }
1482
1483 #else
1484
1485 static inline void add_sect_attrs(struct module *mod,
1486 const struct load_info *info)
1487 {
1488 }
1489
1490 static inline void remove_sect_attrs(struct module *mod)
1491 {
1492 }
1493
1494 static inline void add_notes_attrs(struct module *mod,
1495 const struct load_info *info)
1496 {
1497 }
1498
1499 static inline void remove_notes_attrs(struct module *mod)
1500 {
1501 }
1502 #endif /* CONFIG_KALLSYMS */
1503
1504 static void add_usage_links(struct module *mod)
1505 {
1506 #ifdef CONFIG_MODULE_UNLOAD
1507 struct module_use *use;
1508 int nowarn;
1509
1510 mutex_lock(&module_mutex);
1511 list_for_each_entry(use, &mod->target_list, target_list) {
1512 nowarn = sysfs_create_link(use->target->holders_dir,
1513 &mod->mkobj.kobj, mod->name);
1514 }
1515 mutex_unlock(&module_mutex);
1516 #endif
1517 }
1518
1519 static void del_usage_links(struct module *mod)
1520 {
1521 #ifdef CONFIG_MODULE_UNLOAD
1522 struct module_use *use;
1523
1524 mutex_lock(&module_mutex);
1525 list_for_each_entry(use, &mod->target_list, target_list)
1526 sysfs_remove_link(use->target->holders_dir, mod->name);
1527 mutex_unlock(&module_mutex);
1528 #endif
1529 }
1530
1531 static int module_add_modinfo_attrs(struct module *mod)
1532 {
1533 struct module_attribute *attr;
1534 struct module_attribute *temp_attr;
1535 int error = 0;
1536 int i;
1537
1538 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1539 (ARRAY_SIZE(modinfo_attrs) + 1)),
1540 GFP_KERNEL);
1541 if (!mod->modinfo_attrs)
1542 return -ENOMEM;
1543
1544 temp_attr = mod->modinfo_attrs;
1545 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1546 if (!attr->test ||
1547 (attr->test && attr->test(mod))) {
1548 memcpy(temp_attr, attr, sizeof(*temp_attr));
1549 sysfs_attr_init(&temp_attr->attr);
1550 error = sysfs_create_file(&mod->mkobj.kobj,
1551 &temp_attr->attr);
1552 ++temp_attr;
1553 }
1554 }
1555 return error;
1556 }
1557
1558 static void module_remove_modinfo_attrs(struct module *mod)
1559 {
1560 struct module_attribute *attr;
1561 int i;
1562
1563 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1564 /* pick a field to test for end of list */
1565 if (!attr->attr.name)
1566 break;
1567 sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
1568 if (attr->free)
1569 attr->free(mod);
1570 }
1571 kfree(mod->modinfo_attrs);
1572 }
1573
1574 static void mod_kobject_put(struct module *mod)
1575 {
1576 DECLARE_COMPLETION_ONSTACK(c);
1577 mod->mkobj.kobj_completion = &c;
1578 kobject_put(&mod->mkobj.kobj);
1579 wait_for_completion(&c);
1580 }
1581
1582 static int mod_sysfs_init(struct module *mod)
1583 {
1584 int err;
1585 struct kobject *kobj;
1586
1587 if (!module_sysfs_initialized) {
1588 pr_err("%s: module sysfs not initialized\n", mod->name);
1589 err = -EINVAL;
1590 goto out;
1591 }
1592
1593 kobj = kset_find_obj(module_kset, mod->name);
1594 if (kobj) {
1595 pr_err("%s: module is already loaded\n", mod->name);
1596 kobject_put(kobj);
1597 err = -EINVAL;
1598 goto out;
1599 }
1600
1601 mod->mkobj.mod = mod;
1602
1603 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1604 mod->mkobj.kobj.kset = module_kset;
1605 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1606 "%s", mod->name);
1607 if (err)
1608 mod_kobject_put(mod);
1609
1610 /* delay uevent until full sysfs population */
1611 out:
1612 return err;
1613 }
1614
1615 static int mod_sysfs_setup(struct module *mod,
1616 const struct load_info *info,
1617 struct kernel_param *kparam,
1618 unsigned int num_params)
1619 {
1620 int err;
1621
1622 err = mod_sysfs_init(mod);
1623 if (err)
1624 goto out;
1625
1626 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1627 if (!mod->holders_dir) {
1628 err = -ENOMEM;
1629 goto out_unreg;
1630 }
1631
1632 err = module_param_sysfs_setup(mod, kparam, num_params);
1633 if (err)
1634 goto out_unreg_holders;
1635
1636 err = module_add_modinfo_attrs(mod);
1637 if (err)
1638 goto out_unreg_param;
1639
1640 add_usage_links(mod);
1641 add_sect_attrs(mod, info);
1642 add_notes_attrs(mod, info);
1643
1644 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1645 return 0;
1646
1647 out_unreg_param:
1648 module_param_sysfs_remove(mod);
1649 out_unreg_holders:
1650 kobject_put(mod->holders_dir);
1651 out_unreg:
1652 mod_kobject_put(mod);
1653 out:
1654 return err;
1655 }
1656
1657 static void mod_sysfs_fini(struct module *mod)
1658 {
1659 remove_notes_attrs(mod);
1660 remove_sect_attrs(mod);
1661 mod_kobject_put(mod);
1662 }
1663
1664 #else /* !CONFIG_SYSFS */
1665
1666 static int mod_sysfs_setup(struct module *mod,
1667 const struct load_info *info,
1668 struct kernel_param *kparam,
1669 unsigned int num_params)
1670 {
1671 return 0;
1672 }
1673
1674 static void mod_sysfs_fini(struct module *mod)
1675 {
1676 }
1677
1678 static void module_remove_modinfo_attrs(struct module *mod)
1679 {
1680 }
1681
1682 static void del_usage_links(struct module *mod)
1683 {
1684 }
1685
1686 #endif /* CONFIG_SYSFS */
1687
1688 static void mod_sysfs_teardown(struct module *mod)
1689 {
1690 del_usage_links(mod);
1691 module_remove_modinfo_attrs(mod);
1692 module_param_sysfs_remove(mod);
1693 kobject_put(mod->mkobj.drivers_dir);
1694 kobject_put(mod->holders_dir);
1695 mod_sysfs_fini(mod);
1696 }
1697
1698 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1699 /*
1700 * LKM RO/NX protection: protect module's text/ro-data
1701 * from modification and any data from execution.
1702 */
1703 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages))
1704 {
1705 unsigned long begin_pfn = PFN_DOWN((unsigned long)start);
1706 unsigned long end_pfn = PFN_DOWN((unsigned long)end);
1707
1708 if (end_pfn > begin_pfn)
1709 set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1710 }
1711
1712 static void set_section_ro_nx(void *base,
1713 unsigned long text_size,
1714 unsigned long ro_size,
1715 unsigned long total_size)
1716 {
1717 /* begin and end PFNs of the current subsection */
1718 unsigned long begin_pfn;
1719 unsigned long end_pfn;
1720
1721 /*
1722 * Set RO for module text and RO-data:
1723 * - Always protect first page.
1724 * - Do not protect last partial page.
1725 */
1726 if (ro_size > 0)
1727 set_page_attributes(base, base + ro_size, set_memory_ro);
1728
1729 /*
1730 * Set NX permissions for module data:
1731 * - Do not protect first partial page.
1732 * - Always protect last page.
1733 */
1734 if (total_size > text_size) {
1735 begin_pfn = PFN_UP((unsigned long)base + text_size);
1736 end_pfn = PFN_UP((unsigned long)base + total_size);
1737 if (end_pfn > begin_pfn)
1738 set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1739 }
1740 }
1741
1742 static void unset_module_core_ro_nx(struct module *mod)
1743 {
1744 set_page_attributes(mod->module_core + mod->core_text_size,
1745 mod->module_core + mod->core_size,
1746 set_memory_x);
1747 set_page_attributes(mod->module_core,
1748 mod->module_core + mod->core_ro_size,
1749 set_memory_rw);
1750 }
1751
1752 static void unset_module_init_ro_nx(struct module *mod)
1753 {
1754 set_page_attributes(mod->module_init + mod->init_text_size,
1755 mod->module_init + mod->init_size,
1756 set_memory_x);
1757 set_page_attributes(mod->module_init,
1758 mod->module_init + mod->init_ro_size,
1759 set_memory_rw);
1760 }
1761
1762 /* Iterate through all modules and set each module's text as RW */
1763 void set_all_modules_text_rw(void)
1764 {
1765 struct module *mod;
1766
1767 mutex_lock(&module_mutex);
1768 list_for_each_entry_rcu(mod, &modules, list) {
1769 if (mod->state == MODULE_STATE_UNFORMED)
1770 continue;
1771 if ((mod->module_core) && (mod->core_text_size)) {
1772 set_page_attributes(mod->module_core,
1773 mod->module_core + mod->core_text_size,
1774 set_memory_rw);
1775 }
1776 if ((mod->module_init) && (mod->init_text_size)) {
1777 set_page_attributes(mod->module_init,
1778 mod->module_init + mod->init_text_size,
1779 set_memory_rw);
1780 }
1781 }
1782 mutex_unlock(&module_mutex);
1783 }
1784
1785 /* Iterate through all modules and set each module's text as RO */
1786 void set_all_modules_text_ro(void)
1787 {
1788 struct module *mod;
1789
1790 mutex_lock(&module_mutex);
1791 list_for_each_entry_rcu(mod, &modules, list) {
1792 if (mod->state == MODULE_STATE_UNFORMED)
1793 continue;
1794 if ((mod->module_core) && (mod->core_text_size)) {
1795 set_page_attributes(mod->module_core,
1796 mod->module_core + mod->core_text_size,
1797 set_memory_ro);
1798 }
1799 if ((mod->module_init) && (mod->init_text_size)) {
1800 set_page_attributes(mod->module_init,
1801 mod->module_init + mod->init_text_size,
1802 set_memory_ro);
1803 }
1804 }
1805 mutex_unlock(&module_mutex);
1806 }
1807 #else
1808 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
1809 static void unset_module_core_ro_nx(struct module *mod) { }
1810 static void unset_module_init_ro_nx(struct module *mod) { }
1811 #endif
1812
1813 void __weak module_memfree(void *module_region)
1814 {
1815 vfree(module_region);
1816 }
1817
1818 void __weak module_arch_cleanup(struct module *mod)
1819 {
1820 }
1821
1822 void __weak module_arch_freeing_init(struct module *mod)
1823 {
1824 }
1825
1826 /* Free a module, remove from lists, etc. */
1827 static void free_module(struct module *mod)
1828 {
1829 trace_module_free(mod);
1830
1831 mod_sysfs_teardown(mod);
1832
1833 /* We leave it in list to prevent duplicate loads, but make sure
1834 * that noone uses it while it's being deconstructed. */
1835 mutex_lock(&module_mutex);
1836 mod->state = MODULE_STATE_UNFORMED;
1837 mutex_unlock(&module_mutex);
1838
1839 /* Remove dynamic debug info */
1840 ddebug_remove_module(mod->name);
1841
1842 /* Arch-specific cleanup. */
1843 module_arch_cleanup(mod);
1844
1845 /* Module unload stuff */
1846 module_unload_free(mod);
1847
1848 /* Free any allocated parameters. */
1849 destroy_params(mod->kp, mod->num_kp);
1850
1851 /* Now we can delete it from the lists */
1852 mutex_lock(&module_mutex);
1853 /* Unlink carefully: kallsyms could be walking list. */
1854 list_del_rcu(&mod->list);
1855 /* Remove this module from bug list, this uses list_del_rcu */
1856 module_bug_cleanup(mod);
1857 /* Wait for RCU synchronizing before releasing mod->list and buglist. */
1858 synchronize_rcu();
1859 mutex_unlock(&module_mutex);
1860
1861 /* This may be NULL, but that's OK */
1862 unset_module_init_ro_nx(mod);
1863 module_arch_freeing_init(mod);
1864 module_memfree(mod->module_init);
1865 kfree(mod->args);
1866 percpu_modfree(mod);
1867
1868 /* Free lock-classes: */
1869 lockdep_free_key_range(mod->module_core, mod->core_size);
1870
1871 /* Finally, free the core (containing the module structure) */
1872 unset_module_core_ro_nx(mod);
1873 module_memfree(mod->module_core);
1874
1875 #ifdef CONFIG_MPU
1876 update_protections(current->mm);
1877 #endif
1878 }
1879
1880 void *__symbol_get(const char *symbol)
1881 {
1882 struct module *owner;
1883 const struct kernel_symbol *sym;
1884
1885 preempt_disable();
1886 sym = find_symbol(symbol, &owner, NULL, true, true);
1887 if (sym && strong_try_module_get(owner))
1888 sym = NULL;
1889 preempt_enable();
1890
1891 return sym ? (void *)sym->value : NULL;
1892 }
1893 EXPORT_SYMBOL_GPL(__symbol_get);
1894
1895 /*
1896 * Ensure that an exported symbol [global namespace] does not already exist
1897 * in the kernel or in some other module's exported symbol table.
1898 *
1899 * You must hold the module_mutex.
1900 */
1901 static int verify_export_symbols(struct module *mod)
1902 {
1903 unsigned int i;
1904 struct module *owner;
1905 const struct kernel_symbol *s;
1906 struct {
1907 const struct kernel_symbol *sym;
1908 unsigned int num;
1909 } arr[] = {
1910 { mod->syms, mod->num_syms },
1911 { mod->gpl_syms, mod->num_gpl_syms },
1912 { mod->gpl_future_syms, mod->num_gpl_future_syms },
1913 #ifdef CONFIG_UNUSED_SYMBOLS
1914 { mod->unused_syms, mod->num_unused_syms },
1915 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1916 #endif
1917 };
1918
1919 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1920 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
1921 if (find_symbol(s->name, &owner, NULL, true, false)) {
1922 pr_err("%s: exports duplicate symbol %s"
1923 " (owned by %s)\n",
1924 mod->name, s->name, module_name(owner));
1925 return -ENOEXEC;
1926 }
1927 }
1928 }
1929 return 0;
1930 }
1931
1932 /* Change all symbols so that st_value encodes the pointer directly. */
1933 static int simplify_symbols(struct module *mod, const struct load_info *info)
1934 {
1935 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
1936 Elf_Sym *sym = (void *)symsec->sh_addr;
1937 unsigned long secbase;
1938 unsigned int i;
1939 int ret = 0;
1940 const struct kernel_symbol *ksym;
1941
1942 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
1943 const char *name = info->strtab + sym[i].st_name;
1944
1945 switch (sym[i].st_shndx) {
1946 case SHN_COMMON:
1947 /* Ignore common symbols */
1948 if (!strncmp(name, "__gnu_lto", 9))
1949 break;
1950
1951 /* We compiled with -fno-common. These are not
1952 supposed to happen. */
1953 pr_debug("Common symbol: %s\n", name);
1954 pr_warn("%s: please compile with -fno-common\n",
1955 mod->name);
1956 ret = -ENOEXEC;
1957 break;
1958
1959 case SHN_ABS:
1960 /* Don't need to do anything */
1961 pr_debug("Absolute symbol: 0x%08lx\n",
1962 (long)sym[i].st_value);
1963 break;
1964
1965 case SHN_UNDEF:
1966 ksym = resolve_symbol_wait(mod, info, name);
1967 /* Ok if resolved. */
1968 if (ksym && !IS_ERR(ksym)) {
1969 sym[i].st_value = ksym->value;
1970 break;
1971 }
1972
1973 /* Ok if weak. */
1974 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
1975 break;
1976
1977 pr_warn("%s: Unknown symbol %s (err %li)\n",
1978 mod->name, name, PTR_ERR(ksym));
1979 ret = PTR_ERR(ksym) ?: -ENOENT;
1980 break;
1981
1982 default:
1983 /* Divert to percpu allocation if a percpu var. */
1984 if (sym[i].st_shndx == info->index.pcpu)
1985 secbase = (unsigned long)mod_percpu(mod);
1986 else
1987 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
1988 sym[i].st_value += secbase;
1989 break;
1990 }
1991 }
1992
1993 return ret;
1994 }
1995
1996 static int apply_relocations(struct module *mod, const struct load_info *info)
1997 {
1998 unsigned int i;
1999 int err = 0;
2000
2001 /* Now do relocations. */
2002 for (i = 1; i < info->hdr->e_shnum; i++) {
2003 unsigned int infosec = info->sechdrs[i].sh_info;
2004
2005 /* Not a valid relocation section? */
2006 if (infosec >= info->hdr->e_shnum)
2007 continue;
2008
2009 /* Don't bother with non-allocated sections */
2010 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2011 continue;
2012
2013 if (info->sechdrs[i].sh_type == SHT_REL)
2014 err = apply_relocate(info->sechdrs, info->strtab,
2015 info->index.sym, i, mod);
2016 else if (info->sechdrs[i].sh_type == SHT_RELA)
2017 err = apply_relocate_add(info->sechdrs, info->strtab,
2018 info->index.sym, i, mod);
2019 if (err < 0)
2020 break;
2021 }
2022 return err;
2023 }
2024
2025 /* Additional bytes needed by arch in front of individual sections */
2026 unsigned int __weak arch_mod_section_prepend(struct module *mod,
2027 unsigned int section)
2028 {
2029 /* default implementation just returns zero */
2030 return 0;
2031 }
2032
2033 /* Update size with this section: return offset. */
2034 static long get_offset(struct module *mod, unsigned int *size,
2035 Elf_Shdr *sechdr, unsigned int section)
2036 {
2037 long ret;
2038
2039 *size += arch_mod_section_prepend(mod, section);
2040 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2041 *size = ret + sechdr->sh_size;
2042 return ret;
2043 }
2044
2045 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2046 might -- code, read-only data, read-write data, small data. Tally
2047 sizes, and place the offsets into sh_entsize fields: high bit means it
2048 belongs in init. */
2049 static void layout_sections(struct module *mod, struct load_info *info)
2050 {
2051 static unsigned long const masks[][2] = {
2052 /* NOTE: all executable code must be the first section
2053 * in this array; otherwise modify the text_size
2054 * finder in the two loops below */
2055 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2056 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2057 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2058 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2059 };
2060 unsigned int m, i;
2061
2062 for (i = 0; i < info->hdr->e_shnum; i++)
2063 info->sechdrs[i].sh_entsize = ~0UL;
2064
2065 pr_debug("Core section allocation order:\n");
2066 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2067 for (i = 0; i < info->hdr->e_shnum; ++i) {
2068 Elf_Shdr *s = &info->sechdrs[i];
2069 const char *sname = info->secstrings + s->sh_name;
2070
2071 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2072 || (s->sh_flags & masks[m][1])
2073 || s->sh_entsize != ~0UL
2074 || strstarts(sname, ".init"))
2075 continue;
2076 s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
2077 pr_debug("\t%s\n", sname);
2078 }
2079 switch (m) {
2080 case 0: /* executable */
2081 mod->core_size = debug_align(mod->core_size);
2082 mod->core_text_size = mod->core_size;
2083 break;
2084 case 1: /* RO: text and ro-data */
2085 mod->core_size = debug_align(mod->core_size);
2086 mod->core_ro_size = mod->core_size;
2087 break;
2088 case 3: /* whole core */
2089 mod->core_size = debug_align(mod->core_size);
2090 break;
2091 }
2092 }
2093
2094 pr_debug("Init section allocation order:\n");
2095 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2096 for (i = 0; i < info->hdr->e_shnum; ++i) {
2097 Elf_Shdr *s = &info->sechdrs[i];
2098 const char *sname = info->secstrings + s->sh_name;
2099
2100 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2101 || (s->sh_flags & masks[m][1])
2102 || s->sh_entsize != ~0UL
2103 || !strstarts(sname, ".init"))
2104 continue;
2105 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
2106 | INIT_OFFSET_MASK);
2107 pr_debug("\t%s\n", sname);
2108 }
2109 switch (m) {
2110 case 0: /* executable */
2111 mod->init_size = debug_align(mod->init_size);
2112 mod->init_text_size = mod->init_size;
2113 break;
2114 case 1: /* RO: text and ro-data */
2115 mod->init_size = debug_align(mod->init_size);
2116 mod->init_ro_size = mod->init_size;
2117 break;
2118 case 3: /* whole init */
2119 mod->init_size = debug_align(mod->init_size);
2120 break;
2121 }
2122 }
2123 }
2124
2125 static void set_license(struct module *mod, const char *license)
2126 {
2127 if (!license)
2128 license = "unspecified";
2129
2130 if (!license_is_gpl_compatible(license)) {
2131 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2132 pr_warn("%s: module license '%s' taints kernel.\n",
2133 mod->name, license);
2134 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2135 LOCKDEP_NOW_UNRELIABLE);
2136 }
2137 }
2138
2139 /* Parse tag=value strings from .modinfo section */
2140 static char *next_string(char *string, unsigned long *secsize)
2141 {
2142 /* Skip non-zero chars */
2143 while (string[0]) {
2144 string++;
2145 if ((*secsize)-- <= 1)
2146 return NULL;
2147 }
2148
2149 /* Skip any zero padding. */
2150 while (!string[0]) {
2151 string++;
2152 if ((*secsize)-- <= 1)
2153 return NULL;
2154 }
2155 return string;
2156 }
2157
2158 static char *get_modinfo(struct load_info *info, const char *tag)
2159 {
2160 char *p;
2161 unsigned int taglen = strlen(tag);
2162 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2163 unsigned long size = infosec->sh_size;
2164
2165 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2166 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2167 return p + taglen + 1;
2168 }
2169 return NULL;
2170 }
2171
2172 static void setup_modinfo(struct module *mod, struct load_info *info)
2173 {
2174 struct module_attribute *attr;
2175 int i;
2176
2177 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2178 if (attr->setup)
2179 attr->setup(mod, get_modinfo(info, attr->attr.name));
2180 }
2181 }
2182
2183 static void free_modinfo(struct module *mod)
2184 {
2185 struct module_attribute *attr;
2186 int i;
2187
2188 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2189 if (attr->free)
2190 attr->free(mod);
2191 }
2192 }
2193
2194 #ifdef CONFIG_KALLSYMS
2195
2196 /* lookup symbol in given range of kernel_symbols */
2197 static const struct kernel_symbol *lookup_symbol(const char *name,
2198 const struct kernel_symbol *start,
2199 const struct kernel_symbol *stop)
2200 {
2201 return bsearch(name, start, stop - start,
2202 sizeof(struct kernel_symbol), cmp_name);
2203 }
2204
2205 static int is_exported(const char *name, unsigned long value,
2206 const struct module *mod)
2207 {
2208 const struct kernel_symbol *ks;
2209 if (!mod)
2210 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2211 else
2212 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2213 return ks != NULL && ks->value == value;
2214 }
2215
2216 /* As per nm */
2217 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2218 {
2219 const Elf_Shdr *sechdrs = info->sechdrs;
2220
2221 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2222 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2223 return 'v';
2224 else
2225 return 'w';
2226 }
2227 if (sym->st_shndx == SHN_UNDEF)
2228 return 'U';
2229 if (sym->st_shndx == SHN_ABS)
2230 return 'a';
2231 if (sym->st_shndx >= SHN_LORESERVE)
2232 return '?';
2233 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2234 return 't';
2235 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2236 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2237 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2238 return 'r';
2239 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2240 return 'g';
2241 else
2242 return 'd';
2243 }
2244 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2245 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2246 return 's';
2247 else
2248 return 'b';
2249 }
2250 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2251 ".debug")) {
2252 return 'n';
2253 }
2254 return '?';
2255 }
2256
2257 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2258 unsigned int shnum)
2259 {
2260 const Elf_Shdr *sec;
2261
2262 if (src->st_shndx == SHN_UNDEF
2263 || src->st_shndx >= shnum
2264 || !src->st_name)
2265 return false;
2266
2267 sec = sechdrs + src->st_shndx;
2268 if (!(sec->sh_flags & SHF_ALLOC)
2269 #ifndef CONFIG_KALLSYMS_ALL
2270 || !(sec->sh_flags & SHF_EXECINSTR)
2271 #endif
2272 || (sec->sh_entsize & INIT_OFFSET_MASK))
2273 return false;
2274
2275 return true;
2276 }
2277
2278 /*
2279 * We only allocate and copy the strings needed by the parts of symtab
2280 * we keep. This is simple, but has the effect of making multiple
2281 * copies of duplicates. We could be more sophisticated, see
2282 * linux-kernel thread starting with
2283 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2284 */
2285 static void layout_symtab(struct module *mod, struct load_info *info)
2286 {
2287 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2288 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2289 const Elf_Sym *src;
2290 unsigned int i, nsrc, ndst, strtab_size = 0;
2291
2292 /* Put symbol section at end of init part of module. */
2293 symsect->sh_flags |= SHF_ALLOC;
2294 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
2295 info->index.sym) | INIT_OFFSET_MASK;
2296 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2297
2298 src = (void *)info->hdr + symsect->sh_offset;
2299 nsrc = symsect->sh_size / sizeof(*src);
2300
2301 /* Compute total space required for the core symbols' strtab. */
2302 for (ndst = i = 0; i < nsrc; i++) {
2303 if (i == 0 ||
2304 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2305 strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2306 ndst++;
2307 }
2308 }
2309
2310 /* Append room for core symbols at end of core part. */
2311 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2312 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2313 mod->core_size += strtab_size;
2314
2315 /* Put string table section at end of init part of module. */
2316 strsect->sh_flags |= SHF_ALLOC;
2317 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2318 info->index.str) | INIT_OFFSET_MASK;
2319 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2320 }
2321
2322 static void add_kallsyms(struct module *mod, const struct load_info *info)
2323 {
2324 unsigned int i, ndst;
2325 const Elf_Sym *src;
2326 Elf_Sym *dst;
2327 char *s;
2328 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2329
2330 mod->symtab = (void *)symsec->sh_addr;
2331 mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2332 /* Make sure we get permanent strtab: don't use info->strtab. */
2333 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2334
2335 /* Set types up while we still have access to sections. */
2336 for (i = 0; i < mod->num_symtab; i++)
2337 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2338
2339 mod->core_symtab = dst = mod->module_core + info->symoffs;
2340 mod->core_strtab = s = mod->module_core + info->stroffs;
2341 src = mod->symtab;
2342 for (ndst = i = 0; i < mod->num_symtab; i++) {
2343 if (i == 0 ||
2344 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2345 dst[ndst] = src[i];
2346 dst[ndst++].st_name = s - mod->core_strtab;
2347 s += strlcpy(s, &mod->strtab[src[i].st_name],
2348 KSYM_NAME_LEN) + 1;
2349 }
2350 }
2351 mod->core_num_syms = ndst;
2352 }
2353 #else
2354 static inline void layout_symtab(struct module *mod, struct load_info *info)
2355 {
2356 }
2357
2358 static void add_kallsyms(struct module *mod, const struct load_info *info)
2359 {
2360 }
2361 #endif /* CONFIG_KALLSYMS */
2362
2363 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2364 {
2365 if (!debug)
2366 return;
2367 #ifdef CONFIG_DYNAMIC_DEBUG
2368 if (ddebug_add_module(debug, num, debug->modname))
2369 pr_err("dynamic debug error adding module: %s\n",
2370 debug->modname);
2371 #endif
2372 }
2373
2374 static void dynamic_debug_remove(struct _ddebug *debug)
2375 {
2376 if (debug)
2377 ddebug_remove_module(debug->modname);
2378 }
2379
2380 void * __weak module_alloc(unsigned long size)
2381 {
2382 return vmalloc_exec(size);
2383 }
2384
2385 static void *module_alloc_update_bounds(unsigned long size)
2386 {
2387 void *ret = module_alloc(size);
2388
2389 if (ret) {
2390 mutex_lock(&module_mutex);
2391 /* Update module bounds. */
2392 if ((unsigned long)ret < module_addr_min)
2393 module_addr_min = (unsigned long)ret;
2394 if ((unsigned long)ret + size > module_addr_max)
2395 module_addr_max = (unsigned long)ret + size;
2396 mutex_unlock(&module_mutex);
2397 }
2398 return ret;
2399 }
2400
2401 #ifdef CONFIG_DEBUG_KMEMLEAK
2402 static void kmemleak_load_module(const struct module *mod,
2403 const struct load_info *info)
2404 {
2405 unsigned int i;
2406
2407 /* only scan the sections containing data */
2408 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2409
2410 for (i = 1; i < info->hdr->e_shnum; i++) {
2411 /* Scan all writable sections that's not executable */
2412 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2413 !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2414 (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2415 continue;
2416
2417 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2418 info->sechdrs[i].sh_size, GFP_KERNEL);
2419 }
2420 }
2421 #else
2422 static inline void kmemleak_load_module(const struct module *mod,
2423 const struct load_info *info)
2424 {
2425 }
2426 #endif
2427
2428 #ifdef CONFIG_MODULE_SIG
2429 static int module_sig_check(struct load_info *info)
2430 {
2431 int err = -ENOKEY;
2432 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2433 const void *mod = info->hdr;
2434
2435 if (info->len > markerlen &&
2436 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2437 /* We truncate the module to discard the signature */
2438 info->len -= markerlen;
2439 err = mod_verify_sig(mod, &info->len);
2440 }
2441
2442 if (!err) {
2443 info->sig_ok = true;
2444 return 0;
2445 }
2446
2447 /* Not having a signature is only an error if we're strict. */
2448 if (err == -ENOKEY && !sig_enforce)
2449 err = 0;
2450
2451 return err;
2452 }
2453 #else /* !CONFIG_MODULE_SIG */
2454 static int module_sig_check(struct load_info *info)
2455 {
2456 return 0;
2457 }
2458 #endif /* !CONFIG_MODULE_SIG */
2459
2460 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2461 static int elf_header_check(struct load_info *info)
2462 {
2463 if (info->len < sizeof(*(info->hdr)))
2464 return -ENOEXEC;
2465
2466 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2467 || info->hdr->e_type != ET_REL
2468 || !elf_check_arch(info->hdr)
2469 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2470 return -ENOEXEC;
2471
2472 if (info->hdr->e_shoff >= info->len
2473 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2474 info->len - info->hdr->e_shoff))
2475 return -ENOEXEC;
2476
2477 return 0;
2478 }
2479
2480 /* Sets info->hdr and info->len. */
2481 static int copy_module_from_user(const void __user *umod, unsigned long len,
2482 struct load_info *info)
2483 {
2484 int err;
2485
2486 info->len = len;
2487 if (info->len < sizeof(*(info->hdr)))
2488 return -ENOEXEC;
2489
2490 err = security_kernel_module_from_file(NULL);
2491 if (err)
2492 return err;
2493
2494 /* Suck in entire file: we'll want most of it. */
2495 info->hdr = vmalloc(info->len);
2496 if (!info->hdr)
2497 return -ENOMEM;
2498
2499 if (copy_from_user(info->hdr, umod, info->len) != 0) {
2500 vfree(info->hdr);
2501 return -EFAULT;
2502 }
2503
2504 return 0;
2505 }
2506
2507 /* Sets info->hdr and info->len. */
2508 static int copy_module_from_fd(int fd, struct load_info *info)
2509 {
2510 struct fd f = fdget(fd);
2511 int err;
2512 struct kstat stat;
2513 loff_t pos;
2514 ssize_t bytes = 0;
2515
2516 if (!f.file)
2517 return -ENOEXEC;
2518
2519 err = security_kernel_module_from_file(f.file);
2520 if (err)
2521 goto out;
2522
2523 err = vfs_getattr(&f.file->f_path, &stat);
2524 if (err)
2525 goto out;
2526
2527 if (stat.size > INT_MAX) {
2528 err = -EFBIG;
2529 goto out;
2530 }
2531
2532 /* Don't hand 0 to vmalloc, it whines. */
2533 if (stat.size == 0) {
2534 err = -EINVAL;
2535 goto out;
2536 }
2537
2538 info->hdr = vmalloc(stat.size);
2539 if (!info->hdr) {
2540 err = -ENOMEM;
2541 goto out;
2542 }
2543
2544 pos = 0;
2545 while (pos < stat.size) {
2546 bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos,
2547 stat.size - pos);
2548 if (bytes < 0) {
2549 vfree(info->hdr);
2550 err = bytes;
2551 goto out;
2552 }
2553 if (bytes == 0)
2554 break;
2555 pos += bytes;
2556 }
2557 info->len = pos;
2558
2559 out:
2560 fdput(f);
2561 return err;
2562 }
2563
2564 static void free_copy(struct load_info *info)
2565 {
2566 vfree(info->hdr);
2567 }
2568
2569 static int rewrite_section_headers(struct load_info *info, int flags)
2570 {
2571 unsigned int i;
2572
2573 /* This should always be true, but let's be sure. */
2574 info->sechdrs[0].sh_addr = 0;
2575
2576 for (i = 1; i < info->hdr->e_shnum; i++) {
2577 Elf_Shdr *shdr = &info->sechdrs[i];
2578 if (shdr->sh_type != SHT_NOBITS
2579 && info->len < shdr->sh_offset + shdr->sh_size) {
2580 pr_err("Module len %lu truncated\n", info->len);
2581 return -ENOEXEC;
2582 }
2583
2584 /* Mark all sections sh_addr with their address in the
2585 temporary image. */
2586 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2587
2588 #ifndef CONFIG_MODULE_UNLOAD
2589 /* Don't load .exit sections */
2590 if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2591 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2592 #endif
2593 }
2594
2595 /* Track but don't keep modinfo and version sections. */
2596 if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2597 info->index.vers = 0; /* Pretend no __versions section! */
2598 else
2599 info->index.vers = find_sec(info, "__versions");
2600 info->index.info = find_sec(info, ".modinfo");
2601 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2602 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2603 return 0;
2604 }
2605
2606 /*
2607 * Set up our basic convenience variables (pointers to section headers,
2608 * search for module section index etc), and do some basic section
2609 * verification.
2610 *
2611 * Return the temporary module pointer (we'll replace it with the final
2612 * one when we move the module sections around).
2613 */
2614 static struct module *setup_load_info(struct load_info *info, int flags)
2615 {
2616 unsigned int i;
2617 int err;
2618 struct module *mod;
2619
2620 /* Set up the convenience variables */
2621 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2622 info->secstrings = (void *)info->hdr
2623 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2624
2625 err = rewrite_section_headers(info, flags);
2626 if (err)
2627 return ERR_PTR(err);
2628
2629 /* Find internal symbols and strings. */
2630 for (i = 1; i < info->hdr->e_shnum; i++) {
2631 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2632 info->index.sym = i;
2633 info->index.str = info->sechdrs[i].sh_link;
2634 info->strtab = (char *)info->hdr
2635 + info->sechdrs[info->index.str].sh_offset;
2636 break;
2637 }
2638 }
2639
2640 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2641 if (!info->index.mod) {
2642 pr_warn("No module found in object\n");
2643 return ERR_PTR(-ENOEXEC);
2644 }
2645 /* This is temporary: point mod into copy of data. */
2646 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2647
2648 if (info->index.sym == 0) {
2649 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
2650 return ERR_PTR(-ENOEXEC);
2651 }
2652
2653 info->index.pcpu = find_pcpusec(info);
2654
2655 /* Check module struct version now, before we try to use module. */
2656 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2657 return ERR_PTR(-ENOEXEC);
2658
2659 return mod;
2660 }
2661
2662 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2663 {
2664 const char *modmagic = get_modinfo(info, "vermagic");
2665 int err;
2666
2667 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2668 modmagic = NULL;
2669
2670 /* This is allowed: modprobe --force will invalidate it. */
2671 if (!modmagic) {
2672 err = try_to_force_load(mod, "bad vermagic");
2673 if (err)
2674 return err;
2675 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2676 pr_err("%s: version magic '%s' should be '%s'\n",
2677 mod->name, modmagic, vermagic);
2678 return -ENOEXEC;
2679 }
2680
2681 if (!get_modinfo(info, "intree"))
2682 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
2683
2684 if (get_modinfo(info, "staging")) {
2685 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
2686 pr_warn("%s: module is from the staging directory, the quality "
2687 "is unknown, you have been warned.\n", mod->name);
2688 }
2689
2690 /* Set up license info based on the info section */
2691 set_license(mod, get_modinfo(info, "license"));
2692
2693 return 0;
2694 }
2695
2696 static int find_module_sections(struct module *mod, struct load_info *info)
2697 {
2698 mod->kp = section_objs(info, "__param",
2699 sizeof(*mod->kp), &mod->num_kp);
2700 mod->syms = section_objs(info, "__ksymtab",
2701 sizeof(*mod->syms), &mod->num_syms);
2702 mod->crcs = section_addr(info, "__kcrctab");
2703 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2704 sizeof(*mod->gpl_syms),
2705 &mod->num_gpl_syms);
2706 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2707 mod->gpl_future_syms = section_objs(info,
2708 "__ksymtab_gpl_future",
2709 sizeof(*mod->gpl_future_syms),
2710 &mod->num_gpl_future_syms);
2711 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2712
2713 #ifdef CONFIG_UNUSED_SYMBOLS
2714 mod->unused_syms = section_objs(info, "__ksymtab_unused",
2715 sizeof(*mod->unused_syms),
2716 &mod->num_unused_syms);
2717 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2718 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2719 sizeof(*mod->unused_gpl_syms),
2720 &mod->num_unused_gpl_syms);
2721 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2722 #endif
2723 #ifdef CONFIG_CONSTRUCTORS
2724 mod->ctors = section_objs(info, ".ctors",
2725 sizeof(*mod->ctors), &mod->num_ctors);
2726 if (!mod->ctors)
2727 mod->ctors = section_objs(info, ".init_array",
2728 sizeof(*mod->ctors), &mod->num_ctors);
2729 else if (find_sec(info, ".init_array")) {
2730 /*
2731 * This shouldn't happen with same compiler and binutils
2732 * building all parts of the module.
2733 */
2734 pr_warn("%s: has both .ctors and .init_array.\n",
2735 mod->name);
2736 return -EINVAL;
2737 }
2738 #endif
2739
2740 #ifdef CONFIG_TRACEPOINTS
2741 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2742 sizeof(*mod->tracepoints_ptrs),
2743 &mod->num_tracepoints);
2744 #endif
2745 #ifdef HAVE_JUMP_LABEL
2746 mod->jump_entries = section_objs(info, "__jump_table",
2747 sizeof(*mod->jump_entries),
2748 &mod->num_jump_entries);
2749 #endif
2750 #ifdef CONFIG_EVENT_TRACING
2751 mod->trace_events = section_objs(info, "_ftrace_events",
2752 sizeof(*mod->trace_events),
2753 &mod->num_trace_events);
2754 #endif
2755 #ifdef CONFIG_TRACING
2756 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2757 sizeof(*mod->trace_bprintk_fmt_start),
2758 &mod->num_trace_bprintk_fmt);
2759 #endif
2760 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2761 /* sechdrs[0].sh_size is always zero */
2762 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2763 sizeof(*mod->ftrace_callsites),
2764 &mod->num_ftrace_callsites);
2765 #endif
2766
2767 mod->extable = section_objs(info, "__ex_table",
2768 sizeof(*mod->extable), &mod->num_exentries);
2769
2770 if (section_addr(info, "__obsparm"))
2771 pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
2772
2773 info->debug = section_objs(info, "__verbose",
2774 sizeof(*info->debug), &info->num_debug);
2775
2776 return 0;
2777 }
2778
2779 static int move_module(struct module *mod, struct load_info *info)
2780 {
2781 int i;
2782 void *ptr;
2783
2784 /* Do the allocs. */
2785 ptr = module_alloc_update_bounds(mod->core_size);
2786 /*
2787 * The pointer to this block is stored in the module structure
2788 * which is inside the block. Just mark it as not being a
2789 * leak.
2790 */
2791 kmemleak_not_leak(ptr);
2792 if (!ptr)
2793 return -ENOMEM;
2794
2795 memset(ptr, 0, mod->core_size);
2796 mod->module_core = ptr;
2797
2798 if (mod->init_size) {
2799 ptr = module_alloc_update_bounds(mod->init_size);
2800 /*
2801 * The pointer to this block is stored in the module structure
2802 * which is inside the block. This block doesn't need to be
2803 * scanned as it contains data and code that will be freed
2804 * after the module is initialized.
2805 */
2806 kmemleak_ignore(ptr);
2807 if (!ptr) {
2808 module_memfree(mod->module_core);
2809 return -ENOMEM;
2810 }
2811 memset(ptr, 0, mod->init_size);
2812 mod->module_init = ptr;
2813 } else
2814 mod->module_init = NULL;
2815
2816 /* Transfer each section which specifies SHF_ALLOC */
2817 pr_debug("final section addresses:\n");
2818 for (i = 0; i < info->hdr->e_shnum; i++) {
2819 void *dest;
2820 Elf_Shdr *shdr = &info->sechdrs[i];
2821
2822 if (!(shdr->sh_flags & SHF_ALLOC))
2823 continue;
2824
2825 if (shdr->sh_entsize & INIT_OFFSET_MASK)
2826 dest = mod->module_init
2827 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
2828 else
2829 dest = mod->module_core + shdr->sh_entsize;
2830
2831 if (shdr->sh_type != SHT_NOBITS)
2832 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
2833 /* Update sh_addr to point to copy in image. */
2834 shdr->sh_addr = (unsigned long)dest;
2835 pr_debug("\t0x%lx %s\n",
2836 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
2837 }
2838
2839 return 0;
2840 }
2841
2842 static int check_module_license_and_versions(struct module *mod)
2843 {
2844 /*
2845 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2846 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2847 * using GPL-only symbols it needs.
2848 */
2849 if (strcmp(mod->name, "ndiswrapper") == 0)
2850 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
2851
2852 /* driverloader was caught wrongly pretending to be under GPL */
2853 if (strcmp(mod->name, "driverloader") == 0)
2854 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2855 LOCKDEP_NOW_UNRELIABLE);
2856
2857 /* lve claims to be GPL but upstream won't provide source */
2858 if (strcmp(mod->name, "lve") == 0)
2859 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2860 LOCKDEP_NOW_UNRELIABLE);
2861
2862 #ifdef CONFIG_MODVERSIONS
2863 if ((mod->num_syms && !mod->crcs)
2864 || (mod->num_gpl_syms && !mod->gpl_crcs)
2865 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
2866 #ifdef CONFIG_UNUSED_SYMBOLS
2867 || (mod->num_unused_syms && !mod->unused_crcs)
2868 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
2869 #endif
2870 ) {
2871 return try_to_force_load(mod,
2872 "no versions for exported symbols");
2873 }
2874 #endif
2875 return 0;
2876 }
2877
2878 static void flush_module_icache(const struct module *mod)
2879 {
2880 mm_segment_t old_fs;
2881
2882 /* flush the icache in correct context */
2883 old_fs = get_fs();
2884 set_fs(KERNEL_DS);
2885
2886 /*
2887 * Flush the instruction cache, since we've played with text.
2888 * Do it before processing of module parameters, so the module
2889 * can provide parameter accessor functions of its own.
2890 */
2891 if (mod->module_init)
2892 flush_icache_range((unsigned long)mod->module_init,
2893 (unsigned long)mod->module_init
2894 + mod->init_size);
2895 flush_icache_range((unsigned long)mod->module_core,
2896 (unsigned long)mod->module_core + mod->core_size);
2897
2898 set_fs(old_fs);
2899 }
2900
2901 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
2902 Elf_Shdr *sechdrs,
2903 char *secstrings,
2904 struct module *mod)
2905 {
2906 return 0;
2907 }
2908
2909 static struct module *layout_and_allocate(struct load_info *info, int flags)
2910 {
2911 /* Module within temporary copy. */
2912 struct module *mod;
2913 int err;
2914
2915 mod = setup_load_info(info, flags);
2916 if (IS_ERR(mod))
2917 return mod;
2918
2919 err = check_modinfo(mod, info, flags);
2920 if (err)
2921 return ERR_PTR(err);
2922
2923 /* Allow arches to frob section contents and sizes. */
2924 err = module_frob_arch_sections(info->hdr, info->sechdrs,
2925 info->secstrings, mod);
2926 if (err < 0)
2927 return ERR_PTR(err);
2928
2929 /* We will do a special allocation for per-cpu sections later. */
2930 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
2931
2932 /* Determine total sizes, and put offsets in sh_entsize. For now
2933 this is done generically; there doesn't appear to be any
2934 special cases for the architectures. */
2935 layout_sections(mod, info);
2936 layout_symtab(mod, info);
2937
2938 /* Allocate and move to the final place */
2939 err = move_module(mod, info);
2940 if (err)
2941 return ERR_PTR(err);
2942
2943 /* Module has been copied to its final place now: return it. */
2944 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2945 kmemleak_load_module(mod, info);
2946 return mod;
2947 }
2948
2949 /* mod is no longer valid after this! */
2950 static void module_deallocate(struct module *mod, struct load_info *info)
2951 {
2952 percpu_modfree(mod);
2953 module_arch_freeing_init(mod);
2954 module_memfree(mod->module_init);
2955 module_memfree(mod->module_core);
2956 }
2957
2958 int __weak module_finalize(const Elf_Ehdr *hdr,
2959 const Elf_Shdr *sechdrs,
2960 struct module *me)
2961 {
2962 return 0;
2963 }
2964
2965 static int post_relocation(struct module *mod, const struct load_info *info)
2966 {
2967 /* Sort exception table now relocations are done. */
2968 sort_extable(mod->extable, mod->extable + mod->num_exentries);
2969
2970 /* Copy relocated percpu area over. */
2971 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
2972 info->sechdrs[info->index.pcpu].sh_size);
2973
2974 /* Setup kallsyms-specific fields. */
2975 add_kallsyms(mod, info);
2976
2977 /* Arch-specific module finalizing. */
2978 return module_finalize(info->hdr, info->sechdrs, mod);
2979 }
2980
2981 /* Is this module of this name done loading? No locks held. */
2982 static bool finished_loading(const char *name)
2983 {
2984 struct module *mod;
2985 bool ret;
2986
2987 /*
2988 * The module_mutex should not be a heavily contended lock;
2989 * if we get the occasional sleep here, we'll go an extra iteration
2990 * in the wait_event_interruptible(), which is harmless.
2991 */
2992 sched_annotate_sleep();
2993 mutex_lock(&module_mutex);
2994 mod = find_module_all(name, strlen(name), true);
2995 ret = !mod || mod->state == MODULE_STATE_LIVE
2996 || mod->state == MODULE_STATE_GOING;
2997 mutex_unlock(&module_mutex);
2998
2999 return ret;
3000 }
3001
3002 /* Call module constructors. */
3003 static void do_mod_ctors(struct module *mod)
3004 {
3005 #ifdef CONFIG_CONSTRUCTORS
3006 unsigned long i;
3007
3008 for (i = 0; i < mod->num_ctors; i++)
3009 mod->ctors[i]();
3010 #endif
3011 }
3012
3013 /* For freeing module_init on success, in case kallsyms traversing */
3014 struct mod_initfree {
3015 struct rcu_head rcu;
3016 void *module_init;
3017 };
3018
3019 static void do_free_init(struct rcu_head *head)
3020 {
3021 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
3022 module_memfree(m->module_init);
3023 kfree(m);
3024 }
3025
3026 /* This is where the real work happens */
3027 static int do_init_module(struct module *mod)
3028 {
3029 int ret = 0;
3030 struct mod_initfree *freeinit;
3031
3032 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
3033 if (!freeinit) {
3034 ret = -ENOMEM;
3035 goto fail;
3036 }
3037 freeinit->module_init = mod->module_init;
3038
3039 /*
3040 * We want to find out whether @mod uses async during init. Clear
3041 * PF_USED_ASYNC. async_schedule*() will set it.
3042 */
3043 current->flags &= ~PF_USED_ASYNC;
3044
3045 do_mod_ctors(mod);
3046 /* Start the module */
3047 if (mod->init != NULL)
3048 ret = do_one_initcall(mod->init);
3049 if (ret < 0) {
3050 goto fail_free_freeinit;
3051 }
3052 if (ret > 0) {
3053 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3054 "follow 0/-E convention\n"
3055 "%s: loading module anyway...\n",
3056 __func__, mod->name, ret, __func__);
3057 dump_stack();
3058 }
3059
3060 /* Now it's a first class citizen! */
3061 mod->state = MODULE_STATE_LIVE;
3062 blocking_notifier_call_chain(&module_notify_list,
3063 MODULE_STATE_LIVE, mod);
3064
3065 /*
3066 * We need to finish all async code before the module init sequence
3067 * is done. This has potential to deadlock. For example, a newly
3068 * detected block device can trigger request_module() of the
3069 * default iosched from async probing task. Once userland helper
3070 * reaches here, async_synchronize_full() will wait on the async
3071 * task waiting on request_module() and deadlock.
3072 *
3073 * This deadlock is avoided by perfomring async_synchronize_full()
3074 * iff module init queued any async jobs. This isn't a full
3075 * solution as it will deadlock the same if module loading from
3076 * async jobs nests more than once; however, due to the various
3077 * constraints, this hack seems to be the best option for now.
3078 * Please refer to the following thread for details.
3079 *
3080 * http://thread.gmane.org/gmane.linux.kernel/1420814
3081 */
3082 if (current->flags & PF_USED_ASYNC)
3083 async_synchronize_full();
3084
3085 mutex_lock(&module_mutex);
3086 /* Drop initial reference. */
3087 module_put(mod);
3088 trim_init_extable(mod);
3089 #ifdef CONFIG_KALLSYMS
3090 mod->num_symtab = mod->core_num_syms;
3091 mod->symtab = mod->core_symtab;
3092 mod->strtab = mod->core_strtab;
3093 #endif
3094 unset_module_init_ro_nx(mod);
3095 module_arch_freeing_init(mod);
3096 mod->module_init = NULL;
3097 mod->init_size = 0;
3098 mod->init_ro_size = 0;
3099 mod->init_text_size = 0;
3100 /*
3101 * We want to free module_init, but be aware that kallsyms may be
3102 * walking this with preempt disabled. In all the failure paths,
3103 * we call synchronize_rcu/synchronize_sched, but we don't want
3104 * to slow down the success path, so use actual RCU here.
3105 */
3106 call_rcu(&freeinit->rcu, do_free_init);
3107 mutex_unlock(&module_mutex);
3108 wake_up_all(&module_wq);
3109
3110 return 0;
3111
3112 fail_free_freeinit:
3113 kfree(freeinit);
3114 fail:
3115 /* Try to protect us from buggy refcounters. */
3116 mod->state = MODULE_STATE_GOING;
3117 synchronize_sched();
3118 module_put(mod);
3119 blocking_notifier_call_chain(&module_notify_list,
3120 MODULE_STATE_GOING, mod);
3121 free_module(mod);
3122 wake_up_all(&module_wq);
3123 return ret;
3124 }
3125
3126 static int may_init_module(void)
3127 {
3128 if (!capable(CAP_SYS_MODULE) || modules_disabled)
3129 return -EPERM;
3130
3131 return 0;
3132 }
3133
3134 /*
3135 * We try to place it in the list now to make sure it's unique before
3136 * we dedicate too many resources. In particular, temporary percpu
3137 * memory exhaustion.
3138 */
3139 static int add_unformed_module(struct module *mod)
3140 {
3141 int err;
3142 struct module *old;
3143
3144 mod->state = MODULE_STATE_UNFORMED;
3145
3146 again:
3147 mutex_lock(&module_mutex);
3148 old = find_module_all(mod->name, strlen(mod->name), true);
3149 if (old != NULL) {
3150 if (old->state == MODULE_STATE_COMING
3151 || old->state == MODULE_STATE_UNFORMED) {
3152 /* Wait in case it fails to load. */
3153 mutex_unlock(&module_mutex);
3154 err = wait_event_interruptible(module_wq,
3155 finished_loading(mod->name));
3156 if (err)
3157 goto out_unlocked;
3158 goto again;
3159 }
3160 err = -EEXIST;
3161 goto out;
3162 }
3163 list_add_rcu(&mod->list, &modules);
3164 err = 0;
3165
3166 out:
3167 mutex_unlock(&module_mutex);
3168 out_unlocked:
3169 return err;
3170 }
3171
3172 static int complete_formation(struct module *mod, struct load_info *info)
3173 {
3174 int err;
3175
3176 mutex_lock(&module_mutex);
3177
3178 /* Find duplicate symbols (must be called under lock). */
3179 err = verify_export_symbols(mod);
3180 if (err < 0)
3181 goto out;
3182
3183 /* This relies on module_mutex for list integrity. */
3184 module_bug_finalize(info->hdr, info->sechdrs, mod);
3185
3186 /* Set RO and NX regions for core */
3187 set_section_ro_nx(mod->module_core,
3188 mod->core_text_size,
3189 mod->core_ro_size,
3190 mod->core_size);
3191
3192 /* Set RO and NX regions for init */
3193 set_section_ro_nx(mod->module_init,
3194 mod->init_text_size,
3195 mod->init_ro_size,
3196 mod->init_size);
3197
3198 /* Mark state as coming so strong_try_module_get() ignores us,
3199 * but kallsyms etc. can see us. */
3200 mod->state = MODULE_STATE_COMING;
3201 mutex_unlock(&module_mutex);
3202
3203 blocking_notifier_call_chain(&module_notify_list,
3204 MODULE_STATE_COMING, mod);
3205 return 0;
3206
3207 out:
3208 mutex_unlock(&module_mutex);
3209 return err;
3210 }
3211
3212 static int unknown_module_param_cb(char *param, char *val, const char *modname)
3213 {
3214 /* Check for magic 'dyndbg' arg */
3215 int ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3216 if (ret != 0)
3217 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3218 return 0;
3219 }
3220
3221 /* Allocate and load the module: note that size of section 0 is always
3222 zero, and we rely on this for optional sections. */
3223 static int load_module(struct load_info *info, const char __user *uargs,
3224 int flags)
3225 {
3226 struct module *mod;
3227 long err;
3228 char *after_dashes;
3229
3230 err = module_sig_check(info);
3231 if (err)
3232 goto free_copy;
3233
3234 err = elf_header_check(info);
3235 if (err)
3236 goto free_copy;
3237
3238 /* Figure out module layout, and allocate all the memory. */
3239 mod = layout_and_allocate(info, flags);
3240 if (IS_ERR(mod)) {
3241 err = PTR_ERR(mod);
3242 goto free_copy;
3243 }
3244
3245 /* Reserve our place in the list. */
3246 err = add_unformed_module(mod);
3247 if (err)
3248 goto free_module;
3249
3250 #ifdef CONFIG_MODULE_SIG
3251 mod->sig_ok = info->sig_ok;
3252 if (!mod->sig_ok) {
3253 pr_notice_once("%s: module verification failed: signature "
3254 "and/or required key missing - tainting "
3255 "kernel\n", mod->name);
3256 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
3257 }
3258 #endif
3259
3260 /* To avoid stressing percpu allocator, do this once we're unique. */
3261 err = percpu_modalloc(mod, info);
3262 if (err)
3263 goto unlink_mod;
3264
3265 /* Now module is in final location, initialize linked lists, etc. */
3266 err = module_unload_init(mod);
3267 if (err)
3268 goto unlink_mod;
3269
3270 /* Now we've got everything in the final locations, we can
3271 * find optional sections. */
3272 err = find_module_sections(mod, info);
3273 if (err)
3274 goto free_unload;
3275
3276 err = check_module_license_and_versions(mod);
3277 if (err)
3278 goto free_unload;
3279
3280 /* Set up MODINFO_ATTR fields */
3281 setup_modinfo(mod, info);
3282
3283 /* Fix up syms, so that st_value is a pointer to location. */
3284 err = simplify_symbols(mod, info);
3285 if (err < 0)
3286 goto free_modinfo;
3287
3288 err = apply_relocations(mod, info);
3289 if (err < 0)
3290 goto free_modinfo;
3291
3292 err = post_relocation(mod, info);
3293 if (err < 0)
3294 goto free_modinfo;
3295
3296 flush_module_icache(mod);
3297
3298 /* Now copy in args */
3299 mod->args = strndup_user(uargs, ~0UL >> 1);
3300 if (IS_ERR(mod->args)) {
3301 err = PTR_ERR(mod->args);
3302 goto free_arch_cleanup;
3303 }
3304
3305 dynamic_debug_setup(info->debug, info->num_debug);
3306
3307 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3308 ftrace_module_init(mod);
3309
3310 /* Finally it's fully formed, ready to start executing. */
3311 err = complete_formation(mod, info);
3312 if (err)
3313 goto ddebug_cleanup;
3314
3315 /* Module is ready to execute: parsing args may do that. */
3316 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3317 -32768, 32767, unknown_module_param_cb);
3318 if (IS_ERR(after_dashes)) {
3319 err = PTR_ERR(after_dashes);
3320 goto bug_cleanup;
3321 } else if (after_dashes) {
3322 pr_warn("%s: parameters '%s' after `--' ignored\n",
3323 mod->name, after_dashes);
3324 }
3325
3326 /* Link in to syfs. */
3327 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3328 if (err < 0)
3329 goto bug_cleanup;
3330
3331 /* Get rid of temporary copy. */
3332 free_copy(info);
3333
3334 /* Done! */
3335 trace_module_load(mod);
3336
3337 return do_init_module(mod);
3338
3339 bug_cleanup:
3340 /* module_bug_cleanup needs module_mutex protection */
3341 mutex_lock(&module_mutex);
3342 module_bug_cleanup(mod);
3343 mutex_unlock(&module_mutex);
3344
3345 /* Free lock-classes: */
3346 lockdep_free_key_range(mod->module_core, mod->core_size);
3347
3348 /* we can't deallocate the module until we clear memory protection */
3349 unset_module_init_ro_nx(mod);
3350 unset_module_core_ro_nx(mod);
3351
3352 ddebug_cleanup:
3353 dynamic_debug_remove(info->debug);
3354 synchronize_sched();
3355 kfree(mod->args);
3356 free_arch_cleanup:
3357 module_arch_cleanup(mod);
3358 free_modinfo:
3359 free_modinfo(mod);
3360 free_unload:
3361 module_unload_free(mod);
3362 unlink_mod:
3363 mutex_lock(&module_mutex);
3364 /* Unlink carefully: kallsyms could be walking list. */
3365 list_del_rcu(&mod->list);
3366 wake_up_all(&module_wq);
3367 /* Wait for RCU synchronizing before releasing mod->list. */
3368 synchronize_rcu();
3369 mutex_unlock(&module_mutex);
3370 free_module:
3371 module_deallocate(mod, info);
3372 free_copy:
3373 free_copy(info);
3374 return err;
3375 }
3376
3377 SYSCALL_DEFINE3(init_module, void __user *, umod,
3378 unsigned long, len, const char __user *, uargs)
3379 {
3380 int err;
3381 struct load_info info = { };
3382
3383 err = may_init_module();
3384 if (err)
3385 return err;
3386
3387 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3388 umod, len, uargs);
3389
3390 err = copy_module_from_user(umod, len, &info);
3391 if (err)
3392 return err;
3393
3394 return load_module(&info, uargs, 0);
3395 }
3396
3397 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3398 {
3399 int err;
3400 struct load_info info = { };
3401
3402 err = may_init_module();
3403 if (err)
3404 return err;
3405
3406 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3407
3408 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3409 |MODULE_INIT_IGNORE_VERMAGIC))
3410 return -EINVAL;
3411
3412 err = copy_module_from_fd(fd, &info);
3413 if (err)
3414 return err;
3415
3416 return load_module(&info, uargs, flags);
3417 }
3418
3419 static inline int within(unsigned long addr, void *start, unsigned long size)
3420 {
3421 return ((void *)addr >= start && (void *)addr < start + size);
3422 }
3423
3424 #ifdef CONFIG_KALLSYMS
3425 /*
3426 * This ignores the intensely annoying "mapping symbols" found
3427 * in ARM ELF files: $a, $t and $d.
3428 */
3429 static inline int is_arm_mapping_symbol(const char *str)
3430 {
3431 if (str[0] == '.' && str[1] == 'L')
3432 return true;
3433 return str[0] == '$' && strchr("axtd", str[1])
3434 && (str[2] == '\0' || str[2] == '.');
3435 }
3436
3437 static const char *get_ksymbol(struct module *mod,
3438 unsigned long addr,
3439 unsigned long *size,
3440 unsigned long *offset)
3441 {
3442 unsigned int i, best = 0;
3443 unsigned long nextval;
3444
3445 /* At worse, next value is at end of module */
3446 if (within_module_init(addr, mod))
3447 nextval = (unsigned long)mod->module_init+mod->init_text_size;
3448 else
3449 nextval = (unsigned long)mod->module_core+mod->core_text_size;
3450
3451 /* Scan for closest preceding symbol, and next symbol. (ELF
3452 starts real symbols at 1). */
3453 for (i = 1; i < mod->num_symtab; i++) {
3454 if (mod->symtab[i].st_shndx == SHN_UNDEF)
3455 continue;
3456
3457 /* We ignore unnamed symbols: they're uninformative
3458 * and inserted at a whim. */
3459 if (mod->symtab[i].st_value <= addr
3460 && mod->symtab[i].st_value > mod->symtab[best].st_value
3461 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3462 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3463 best = i;
3464 if (mod->symtab[i].st_value > addr
3465 && mod->symtab[i].st_value < nextval
3466 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3467 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3468 nextval = mod->symtab[i].st_value;
3469 }
3470
3471 if (!best)
3472 return NULL;
3473
3474 if (size)
3475 *size = nextval - mod->symtab[best].st_value;
3476 if (offset)
3477 *offset = addr - mod->symtab[best].st_value;
3478 return mod->strtab + mod->symtab[best].st_name;
3479 }
3480
3481 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3482 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3483 const char *module_address_lookup(unsigned long addr,
3484 unsigned long *size,
3485 unsigned long *offset,
3486 char **modname,
3487 char *namebuf)
3488 {
3489 struct module *mod;
3490 const char *ret = NULL;
3491
3492 preempt_disable();
3493 list_for_each_entry_rcu(mod, &modules, list) {
3494 if (mod->state == MODULE_STATE_UNFORMED)
3495 continue;
3496 if (within_module(addr, mod)) {
3497 if (modname)
3498 *modname = mod->name;
3499 ret = get_ksymbol(mod, addr, size, offset);
3500 break;
3501 }
3502 }
3503 /* Make a copy in here where it's safe */
3504 if (ret) {
3505 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3506 ret = namebuf;
3507 }
3508 preempt_enable();
3509 return ret;
3510 }
3511
3512 int lookup_module_symbol_name(unsigned long addr, char *symname)
3513 {
3514 struct module *mod;
3515
3516 preempt_disable();
3517 list_for_each_entry_rcu(mod, &modules, list) {
3518 if (mod->state == MODULE_STATE_UNFORMED)
3519 continue;
3520 if (within_module(addr, mod)) {
3521 const char *sym;
3522
3523 sym = get_ksymbol(mod, addr, NULL, NULL);
3524 if (!sym)
3525 goto out;
3526 strlcpy(symname, sym, KSYM_NAME_LEN);
3527 preempt_enable();
3528 return 0;
3529 }
3530 }
3531 out:
3532 preempt_enable();
3533 return -ERANGE;
3534 }
3535
3536 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3537 unsigned long *offset, char *modname, char *name)
3538 {
3539 struct module *mod;
3540
3541 preempt_disable();
3542 list_for_each_entry_rcu(mod, &modules, list) {
3543 if (mod->state == MODULE_STATE_UNFORMED)
3544 continue;
3545 if (within_module(addr, mod)) {
3546 const char *sym;
3547
3548 sym = get_ksymbol(mod, addr, size, offset);
3549 if (!sym)
3550 goto out;
3551 if (modname)
3552 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3553 if (name)
3554 strlcpy(name, sym, KSYM_NAME_LEN);
3555 preempt_enable();
3556 return 0;
3557 }
3558 }
3559 out:
3560 preempt_enable();
3561 return -ERANGE;
3562 }
3563
3564 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3565 char *name, char *module_name, int *exported)
3566 {
3567 struct module *mod;
3568
3569 preempt_disable();
3570 list_for_each_entry_rcu(mod, &modules, list) {
3571 if (mod->state == MODULE_STATE_UNFORMED)
3572 continue;
3573 if (symnum < mod->num_symtab) {
3574 *value = mod->symtab[symnum].st_value;
3575 *type = mod->symtab[symnum].st_info;
3576 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
3577 KSYM_NAME_LEN);
3578 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3579 *exported = is_exported(name, *value, mod);
3580 preempt_enable();
3581 return 0;
3582 }
3583 symnum -= mod->num_symtab;
3584 }
3585 preempt_enable();
3586 return -ERANGE;
3587 }
3588
3589 static unsigned long mod_find_symname(struct module *mod, const char *name)
3590 {
3591 unsigned int i;
3592
3593 for (i = 0; i < mod->num_symtab; i++)
3594 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
3595 mod->symtab[i].st_info != 'U')
3596 return mod->symtab[i].st_value;
3597 return 0;
3598 }
3599
3600 /* Look for this name: can be of form module:name. */
3601 unsigned long module_kallsyms_lookup_name(const char *name)
3602 {
3603 struct module *mod;
3604 char *colon;
3605 unsigned long ret = 0;
3606
3607 /* Don't lock: we're in enough trouble already. */
3608 preempt_disable();
3609 if ((colon = strchr(name, ':')) != NULL) {
3610 if ((mod = find_module_all(name, colon - name, false)) != NULL)
3611 ret = mod_find_symname(mod, colon+1);
3612 } else {
3613 list_for_each_entry_rcu(mod, &modules, list) {
3614 if (mod->state == MODULE_STATE_UNFORMED)
3615 continue;
3616 if ((ret = mod_find_symname(mod, name)) != 0)
3617 break;
3618 }
3619 }
3620 preempt_enable();
3621 return ret;
3622 }
3623
3624 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3625 struct module *, unsigned long),
3626 void *data)
3627 {
3628 struct module *mod;
3629 unsigned int i;
3630 int ret;
3631
3632 list_for_each_entry(mod, &modules, list) {
3633 if (mod->state == MODULE_STATE_UNFORMED)
3634 continue;
3635 for (i = 0; i < mod->num_symtab; i++) {
3636 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3637 mod, mod->symtab[i].st_value);
3638 if (ret != 0)
3639 return ret;
3640 }
3641 }
3642 return 0;
3643 }
3644 #endif /* CONFIG_KALLSYMS */
3645
3646 static char *module_flags(struct module *mod, char *buf)
3647 {
3648 int bx = 0;
3649
3650 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
3651 if (mod->taints ||
3652 mod->state == MODULE_STATE_GOING ||
3653 mod->state == MODULE_STATE_COMING) {
3654 buf[bx++] = '(';
3655 bx += module_flags_taint(mod, buf + bx);
3656 /* Show a - for module-is-being-unloaded */
3657 if (mod->state == MODULE_STATE_GOING)
3658 buf[bx++] = '-';
3659 /* Show a + for module-is-being-loaded */
3660 if (mod->state == MODULE_STATE_COMING)
3661 buf[bx++] = '+';
3662 buf[bx++] = ')';
3663 }
3664 buf[bx] = '\0';
3665
3666 return buf;
3667 }
3668
3669 #ifdef CONFIG_PROC_FS
3670 /* Called by the /proc file system to return a list of modules. */
3671 static void *m_start(struct seq_file *m, loff_t *pos)
3672 {
3673 mutex_lock(&module_mutex);
3674 return seq_list_start(&modules, *pos);
3675 }
3676
3677 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3678 {
3679 return seq_list_next(p, &modules, pos);
3680 }
3681
3682 static void m_stop(struct seq_file *m, void *p)
3683 {
3684 mutex_unlock(&module_mutex);
3685 }
3686
3687 static int m_show(struct seq_file *m, void *p)
3688 {
3689 struct module *mod = list_entry(p, struct module, list);
3690 char buf[8];
3691
3692 /* We always ignore unformed modules. */
3693 if (mod->state == MODULE_STATE_UNFORMED)
3694 return 0;
3695
3696 seq_printf(m, "%s %u",
3697 mod->name, mod->init_size + mod->core_size);
3698 print_unload_info(m, mod);
3699
3700 /* Informative for users. */
3701 seq_printf(m, " %s",
3702 mod->state == MODULE_STATE_GOING ? "Unloading" :
3703 mod->state == MODULE_STATE_COMING ? "Loading" :
3704 "Live");
3705 /* Used by oprofile and other similar tools. */
3706 seq_printf(m, " 0x%pK", mod->module_core);
3707
3708 /* Taints info */
3709 if (mod->taints)
3710 seq_printf(m, " %s", module_flags(mod, buf));
3711
3712 seq_puts(m, "\n");
3713 return 0;
3714 }
3715
3716 /* Format: modulename size refcount deps address
3717
3718 Where refcount is a number or -, and deps is a comma-separated list
3719 of depends or -.
3720 */
3721 static const struct seq_operations modules_op = {
3722 .start = m_start,
3723 .next = m_next,
3724 .stop = m_stop,
3725 .show = m_show
3726 };
3727
3728 static int modules_open(struct inode *inode, struct file *file)
3729 {
3730 return seq_open(file, &modules_op);
3731 }
3732
3733 static const struct file_operations proc_modules_operations = {
3734 .open = modules_open,
3735 .read = seq_read,
3736 .llseek = seq_lseek,
3737 .release = seq_release,
3738 };
3739
3740 static int __init proc_modules_init(void)
3741 {
3742 proc_create("modules", 0, NULL, &proc_modules_operations);
3743 return 0;
3744 }
3745 module_init(proc_modules_init);
3746 #endif
3747
3748 /* Given an address, look for it in the module exception tables. */
3749 const struct exception_table_entry *search_module_extables(unsigned long addr)
3750 {
3751 const struct exception_table_entry *e = NULL;
3752 struct module *mod;
3753
3754 preempt_disable();
3755 list_for_each_entry_rcu(mod, &modules, list) {
3756 if (mod->state == MODULE_STATE_UNFORMED)
3757 continue;
3758 if (mod->num_exentries == 0)
3759 continue;
3760
3761 e = search_extable(mod->extable,
3762 mod->extable + mod->num_exentries - 1,
3763 addr);
3764 if (e)
3765 break;
3766 }
3767 preempt_enable();
3768
3769 /* Now, if we found one, we are running inside it now, hence
3770 we cannot unload the module, hence no refcnt needed. */
3771 return e;
3772 }
3773
3774 /*
3775 * is_module_address - is this address inside a module?
3776 * @addr: the address to check.
3777 *
3778 * See is_module_text_address() if you simply want to see if the address
3779 * is code (not data).
3780 */
3781 bool is_module_address(unsigned long addr)
3782 {
3783 bool ret;
3784
3785 preempt_disable();
3786 ret = __module_address(addr) != NULL;
3787 preempt_enable();
3788
3789 return ret;
3790 }
3791
3792 /*
3793 * __module_address - get the module which contains an address.
3794 * @addr: the address.
3795 *
3796 * Must be called with preempt disabled or module mutex held so that
3797 * module doesn't get freed during this.
3798 */
3799 struct module *__module_address(unsigned long addr)
3800 {
3801 struct module *mod;
3802
3803 if (addr < module_addr_min || addr > module_addr_max)
3804 return NULL;
3805
3806 list_for_each_entry_rcu(mod, &modules, list) {
3807 if (mod->state == MODULE_STATE_UNFORMED)
3808 continue;
3809 if (within_module(addr, mod))
3810 return mod;
3811 }
3812 return NULL;
3813 }
3814 EXPORT_SYMBOL_GPL(__module_address);
3815
3816 /*
3817 * is_module_text_address - is this address inside module code?
3818 * @addr: the address to check.
3819 *
3820 * See is_module_address() if you simply want to see if the address is
3821 * anywhere in a module. See kernel_text_address() for testing if an
3822 * address corresponds to kernel or module code.
3823 */
3824 bool is_module_text_address(unsigned long addr)
3825 {
3826 bool ret;
3827
3828 preempt_disable();
3829 ret = __module_text_address(addr) != NULL;
3830 preempt_enable();
3831
3832 return ret;
3833 }
3834
3835 /*
3836 * __module_text_address - get the module whose code contains an address.
3837 * @addr: the address.
3838 *
3839 * Must be called with preempt disabled or module mutex held so that
3840 * module doesn't get freed during this.
3841 */
3842 struct module *__module_text_address(unsigned long addr)
3843 {
3844 struct module *mod = __module_address(addr);
3845 if (mod) {
3846 /* Make sure it's within the text section. */
3847 if (!within(addr, mod->module_init, mod->init_text_size)
3848 && !within(addr, mod->module_core, mod->core_text_size))
3849 mod = NULL;
3850 }
3851 return mod;
3852 }
3853 EXPORT_SYMBOL_GPL(__module_text_address);
3854
3855 /* Don't grab lock, we're oopsing. */
3856 void print_modules(void)
3857 {
3858 struct module *mod;
3859 char buf[8];
3860
3861 printk(KERN_DEFAULT "Modules linked in:");
3862 /* Most callers should already have preempt disabled, but make sure */
3863 preempt_disable();
3864 list_for_each_entry_rcu(mod, &modules, list) {
3865 if (mod->state == MODULE_STATE_UNFORMED)
3866 continue;
3867 pr_cont(" %s%s", mod->name, module_flags(mod, buf));
3868 }
3869 preempt_enable();
3870 if (last_unloaded_module[0])
3871 pr_cont(" [last unloaded: %s]", last_unloaded_module);
3872 pr_cont("\n");
3873 }
3874
3875 #ifdef CONFIG_MODVERSIONS
3876 /* Generate the signature for all relevant module structures here.
3877 * If these change, we don't want to try to parse the module. */
3878 void module_layout(struct module *mod,
3879 struct modversion_info *ver,
3880 struct kernel_param *kp,
3881 struct kernel_symbol *ks,
3882 struct tracepoint * const *tp)
3883 {
3884 }
3885 EXPORT_SYMBOL(module_layout);
3886 #endif
This page took 0.177273 seconds and 6 git commands to generate.