2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/ftrace_event.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
24 #include <linux/file.h>
26 #include <linux/sysfs.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/elf.h>
31 #include <linux/proc_fs.h>
32 #include <linux/security.h>
33 #include <linux/seq_file.h>
34 #include <linux/syscalls.h>
35 #include <linux/fcntl.h>
36 #include <linux/rcupdate.h>
37 #include <linux/capability.h>
38 #include <linux/cpu.h>
39 #include <linux/moduleparam.h>
40 #include <linux/errno.h>
41 #include <linux/err.h>
42 #include <linux/vermagic.h>
43 #include <linux/notifier.h>
44 #include <linux/sched.h>
45 #include <linux/device.h>
46 #include <linux/string.h>
47 #include <linux/mutex.h>
48 #include <linux/rculist.h>
49 #include <asm/uaccess.h>
50 #include <asm/cacheflush.h>
51 #include <asm/mmu_context.h>
52 #include <linux/license.h>
53 #include <asm/sections.h>
54 #include <linux/tracepoint.h>
55 #include <linux/ftrace.h>
56 #include <linux/async.h>
57 #include <linux/percpu.h>
58 #include <linux/kmemleak.h>
59 #include <linux/jump_label.h>
60 #include <linux/pfn.h>
61 #include <linux/bsearch.h>
62 #include <uapi/linux/module.h>
63 #include "module-internal.h"
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/module.h>
68 #ifndef ARCH_SHF_SMALL
69 #define ARCH_SHF_SMALL 0
73 * Modules' sections will be aligned on page boundaries
74 * to ensure complete separation of code and data, but
75 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
77 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
78 # define debug_align(X) ALIGN(X, PAGE_SIZE)
80 # define debug_align(X) (X)
84 * Given BASE and SIZE this macro calculates the number of pages the
85 * memory regions occupies
87 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
88 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
89 PFN_DOWN((unsigned long)BASE) + 1) \
92 /* If this is set, the section belongs in the init part of the module */
93 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
97 * 1) List of modules (also safely readable with preempt_disable),
98 * 2) module_use links,
99 * 3) module_addr_min/module_addr_max.
100 * (delete and add uses RCU list operations). */
101 DEFINE_MUTEX(module_mutex
);
102 EXPORT_SYMBOL_GPL(module_mutex
);
103 static LIST_HEAD(modules
);
105 #ifdef CONFIG_MODULES_TREE_LOOKUP
108 * Use a latched RB-tree for __module_address(); this allows us to use
109 * RCU-sched lookups of the address from any context.
111 * Because modules have two address ranges: init and core, we need two
112 * latch_tree_nodes entries. Therefore we need the back-pointer from
115 * Because init ranges are short lived we mark them unlikely and have placed
116 * them outside the critical cacheline in struct module.
118 * This is conditional on PERF_EVENTS || TRACING because those can really hit
119 * __module_address() hard by doing a lot of stack unwinding; potentially from
123 static __always_inline
unsigned long __mod_tree_val(struct latch_tree_node
*n
)
125 struct mod_tree_node
*mtn
= container_of(n
, struct mod_tree_node
, node
);
126 struct module
*mod
= mtn
->mod
;
128 if (unlikely(mtn
== &mod
->mtn_init
))
129 return (unsigned long)mod
->module_init
;
131 return (unsigned long)mod
->module_core
;
134 static __always_inline
unsigned long __mod_tree_size(struct latch_tree_node
*n
)
136 struct mod_tree_node
*mtn
= container_of(n
, struct mod_tree_node
, node
);
137 struct module
*mod
= mtn
->mod
;
139 if (unlikely(mtn
== &mod
->mtn_init
))
140 return (unsigned long)mod
->init_size
;
142 return (unsigned long)mod
->core_size
;
145 static __always_inline
bool
146 mod_tree_less(struct latch_tree_node
*a
, struct latch_tree_node
*b
)
148 return __mod_tree_val(a
) < __mod_tree_val(b
);
151 static __always_inline
int
152 mod_tree_comp(void *key
, struct latch_tree_node
*n
)
154 unsigned long val
= (unsigned long)key
;
155 unsigned long start
, end
;
157 start
= __mod_tree_val(n
);
161 end
= start
+ __mod_tree_size(n
);
168 static const struct latch_tree_ops mod_tree_ops
= {
169 .less
= mod_tree_less
,
170 .comp
= mod_tree_comp
,
173 static struct mod_tree_root
{
174 struct latch_tree_root root
;
175 unsigned long addr_min
;
176 unsigned long addr_max
;
177 } mod_tree __cacheline_aligned
= {
181 #define module_addr_min mod_tree.addr_min
182 #define module_addr_max mod_tree.addr_max
184 static noinline
void __mod_tree_insert(struct mod_tree_node
*node
)
186 latch_tree_insert(&node
->node
, &mod_tree
.root
, &mod_tree_ops
);
189 static void __mod_tree_remove(struct mod_tree_node
*node
)
191 latch_tree_erase(&node
->node
, &mod_tree
.root
, &mod_tree_ops
);
195 * These modifications: insert, remove_init and remove; are serialized by the
198 static void mod_tree_insert(struct module
*mod
)
200 mod
->mtn_core
.mod
= mod
;
201 mod
->mtn_init
.mod
= mod
;
203 __mod_tree_insert(&mod
->mtn_core
);
205 __mod_tree_insert(&mod
->mtn_init
);
208 static void mod_tree_remove_init(struct module
*mod
)
211 __mod_tree_remove(&mod
->mtn_init
);
214 static void mod_tree_remove(struct module
*mod
)
216 __mod_tree_remove(&mod
->mtn_core
);
217 mod_tree_remove_init(mod
);
220 static struct module
*mod_find(unsigned long addr
)
222 struct latch_tree_node
*ltn
;
224 ltn
= latch_tree_find((void *)addr
, &mod_tree
.root
, &mod_tree_ops
);
228 return container_of(ltn
, struct mod_tree_node
, node
)->mod
;
231 #else /* MODULES_TREE_LOOKUP */
233 static unsigned long module_addr_min
= -1UL, module_addr_max
= 0;
235 static void mod_tree_insert(struct module
*mod
) { }
236 static void mod_tree_remove_init(struct module
*mod
) { }
237 static void mod_tree_remove(struct module
*mod
) { }
239 static struct module
*mod_find(unsigned long addr
)
243 list_for_each_entry_rcu(mod
, &modules
, list
) {
244 if (within_module(addr
, mod
))
251 #endif /* MODULES_TREE_LOOKUP */
254 * Bounds of module text, for speeding up __module_address.
255 * Protected by module_mutex.
257 static void __mod_update_bounds(void *base
, unsigned int size
)
259 unsigned long min
= (unsigned long)base
;
260 unsigned long max
= min
+ size
;
262 if (min
< module_addr_min
)
263 module_addr_min
= min
;
264 if (max
> module_addr_max
)
265 module_addr_max
= max
;
268 static void mod_update_bounds(struct module
*mod
)
270 __mod_update_bounds(mod
->module_core
, mod
->core_size
);
272 __mod_update_bounds(mod
->module_init
, mod
->init_size
);
275 #ifdef CONFIG_KGDB_KDB
276 struct list_head
*kdb_modules
= &modules
; /* kdb needs the list of modules */
277 #endif /* CONFIG_KGDB_KDB */
279 static void module_assert_mutex(void)
281 lockdep_assert_held(&module_mutex
);
284 static void module_assert_mutex_or_preempt(void)
286 #ifdef CONFIG_LOCKDEP
287 if (unlikely(!debug_locks
))
290 WARN_ON(!rcu_read_lock_sched_held() &&
291 !lockdep_is_held(&module_mutex
));
295 #ifdef CONFIG_MODULE_SIG
296 #ifdef CONFIG_MODULE_SIG_FORCE
297 static bool sig_enforce
= true;
299 static bool sig_enforce
= false;
301 static int param_set_bool_enable_only(const char *val
,
302 const struct kernel_param
*kp
)
306 bool orig_value
= *(bool *)kp
->arg
;
307 struct kernel_param dummy_kp
= *kp
;
309 dummy_kp
.arg
= &new_value
;
311 err
= param_set_bool(val
, &dummy_kp
);
315 /* Don't let them unset it once it's set! */
316 if (!new_value
&& orig_value
)
320 err
= param_set_bool(val
, kp
);
325 static const struct kernel_param_ops param_ops_bool_enable_only
= {
326 .flags
= KERNEL_PARAM_OPS_FL_NOARG
,
327 .set
= param_set_bool_enable_only
,
328 .get
= param_get_bool
,
330 #define param_check_bool_enable_only param_check_bool
332 module_param(sig_enforce
, bool_enable_only
, 0644);
333 #endif /* !CONFIG_MODULE_SIG_FORCE */
334 #endif /* CONFIG_MODULE_SIG */
336 /* Block module loading/unloading? */
337 int modules_disabled
= 0;
338 core_param(nomodule
, modules_disabled
, bint
, 0);
340 /* Waiting for a module to finish initializing? */
341 static DECLARE_WAIT_QUEUE_HEAD(module_wq
);
343 static BLOCKING_NOTIFIER_HEAD(module_notify_list
);
345 int register_module_notifier(struct notifier_block
*nb
)
347 return blocking_notifier_chain_register(&module_notify_list
, nb
);
349 EXPORT_SYMBOL(register_module_notifier
);
351 int unregister_module_notifier(struct notifier_block
*nb
)
353 return blocking_notifier_chain_unregister(&module_notify_list
, nb
);
355 EXPORT_SYMBOL(unregister_module_notifier
);
361 char *secstrings
, *strtab
;
362 unsigned long symoffs
, stroffs
;
363 struct _ddebug
*debug
;
364 unsigned int num_debug
;
367 unsigned int sym
, str
, mod
, vers
, info
, pcpu
;
371 /* We require a truly strong try_module_get(): 0 means failure due to
372 ongoing or failed initialization etc. */
373 static inline int strong_try_module_get(struct module
*mod
)
375 BUG_ON(mod
&& mod
->state
== MODULE_STATE_UNFORMED
);
376 if (mod
&& mod
->state
== MODULE_STATE_COMING
)
378 if (try_module_get(mod
))
384 static inline void add_taint_module(struct module
*mod
, unsigned flag
,
385 enum lockdep_ok lockdep_ok
)
387 add_taint(flag
, lockdep_ok
);
388 mod
->taints
|= (1U << flag
);
392 * A thread that wants to hold a reference to a module only while it
393 * is running can call this to safely exit. nfsd and lockd use this.
395 void __module_put_and_exit(struct module
*mod
, long code
)
400 EXPORT_SYMBOL(__module_put_and_exit
);
402 /* Find a module section: 0 means not found. */
403 static unsigned int find_sec(const struct load_info
*info
, const char *name
)
407 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
408 Elf_Shdr
*shdr
= &info
->sechdrs
[i
];
409 /* Alloc bit cleared means "ignore it." */
410 if ((shdr
->sh_flags
& SHF_ALLOC
)
411 && strcmp(info
->secstrings
+ shdr
->sh_name
, name
) == 0)
417 /* Find a module section, or NULL. */
418 static void *section_addr(const struct load_info
*info
, const char *name
)
420 /* Section 0 has sh_addr 0. */
421 return (void *)info
->sechdrs
[find_sec(info
, name
)].sh_addr
;
424 /* Find a module section, or NULL. Fill in number of "objects" in section. */
425 static void *section_objs(const struct load_info
*info
,
430 unsigned int sec
= find_sec(info
, name
);
432 /* Section 0 has sh_addr 0 and sh_size 0. */
433 *num
= info
->sechdrs
[sec
].sh_size
/ object_size
;
434 return (void *)info
->sechdrs
[sec
].sh_addr
;
437 /* Provided by the linker */
438 extern const struct kernel_symbol __start___ksymtab
[];
439 extern const struct kernel_symbol __stop___ksymtab
[];
440 extern const struct kernel_symbol __start___ksymtab_gpl
[];
441 extern const struct kernel_symbol __stop___ksymtab_gpl
[];
442 extern const struct kernel_symbol __start___ksymtab_gpl_future
[];
443 extern const struct kernel_symbol __stop___ksymtab_gpl_future
[];
444 extern const unsigned long __start___kcrctab
[];
445 extern const unsigned long __start___kcrctab_gpl
[];
446 extern const unsigned long __start___kcrctab_gpl_future
[];
447 #ifdef CONFIG_UNUSED_SYMBOLS
448 extern const struct kernel_symbol __start___ksymtab_unused
[];
449 extern const struct kernel_symbol __stop___ksymtab_unused
[];
450 extern const struct kernel_symbol __start___ksymtab_unused_gpl
[];
451 extern const struct kernel_symbol __stop___ksymtab_unused_gpl
[];
452 extern const unsigned long __start___kcrctab_unused
[];
453 extern const unsigned long __start___kcrctab_unused_gpl
[];
456 #ifndef CONFIG_MODVERSIONS
457 #define symversion(base, idx) NULL
459 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
462 static bool each_symbol_in_section(const struct symsearch
*arr
,
463 unsigned int arrsize
,
464 struct module
*owner
,
465 bool (*fn
)(const struct symsearch
*syms
,
466 struct module
*owner
,
472 for (j
= 0; j
< arrsize
; j
++) {
473 if (fn(&arr
[j
], owner
, data
))
480 /* Returns true as soon as fn returns true, otherwise false. */
481 bool each_symbol_section(bool (*fn
)(const struct symsearch
*arr
,
482 struct module
*owner
,
487 static const struct symsearch arr
[] = {
488 { __start___ksymtab
, __stop___ksymtab
, __start___kcrctab
,
489 NOT_GPL_ONLY
, false },
490 { __start___ksymtab_gpl
, __stop___ksymtab_gpl
,
491 __start___kcrctab_gpl
,
493 { __start___ksymtab_gpl_future
, __stop___ksymtab_gpl_future
,
494 __start___kcrctab_gpl_future
,
495 WILL_BE_GPL_ONLY
, false },
496 #ifdef CONFIG_UNUSED_SYMBOLS
497 { __start___ksymtab_unused
, __stop___ksymtab_unused
,
498 __start___kcrctab_unused
,
499 NOT_GPL_ONLY
, true },
500 { __start___ksymtab_unused_gpl
, __stop___ksymtab_unused_gpl
,
501 __start___kcrctab_unused_gpl
,
506 module_assert_mutex_or_preempt();
508 if (each_symbol_in_section(arr
, ARRAY_SIZE(arr
), NULL
, fn
, data
))
511 list_for_each_entry_rcu(mod
, &modules
, list
) {
512 struct symsearch arr
[] = {
513 { mod
->syms
, mod
->syms
+ mod
->num_syms
, mod
->crcs
,
514 NOT_GPL_ONLY
, false },
515 { mod
->gpl_syms
, mod
->gpl_syms
+ mod
->num_gpl_syms
,
518 { mod
->gpl_future_syms
,
519 mod
->gpl_future_syms
+ mod
->num_gpl_future_syms
,
520 mod
->gpl_future_crcs
,
521 WILL_BE_GPL_ONLY
, false },
522 #ifdef CONFIG_UNUSED_SYMBOLS
524 mod
->unused_syms
+ mod
->num_unused_syms
,
526 NOT_GPL_ONLY
, true },
527 { mod
->unused_gpl_syms
,
528 mod
->unused_gpl_syms
+ mod
->num_unused_gpl_syms
,
529 mod
->unused_gpl_crcs
,
534 if (mod
->state
== MODULE_STATE_UNFORMED
)
537 if (each_symbol_in_section(arr
, ARRAY_SIZE(arr
), mod
, fn
, data
))
542 EXPORT_SYMBOL_GPL(each_symbol_section
);
544 struct find_symbol_arg
{
551 struct module
*owner
;
552 const unsigned long *crc
;
553 const struct kernel_symbol
*sym
;
556 static bool check_symbol(const struct symsearch
*syms
,
557 struct module
*owner
,
558 unsigned int symnum
, void *data
)
560 struct find_symbol_arg
*fsa
= data
;
563 if (syms
->licence
== GPL_ONLY
)
565 if (syms
->licence
== WILL_BE_GPL_ONLY
&& fsa
->warn
) {
566 pr_warn("Symbol %s is being used by a non-GPL module, "
567 "which will not be allowed in the future\n",
572 #ifdef CONFIG_UNUSED_SYMBOLS
573 if (syms
->unused
&& fsa
->warn
) {
574 pr_warn("Symbol %s is marked as UNUSED, however this module is "
575 "using it.\n", fsa
->name
);
576 pr_warn("This symbol will go away in the future.\n");
577 pr_warn("Please evaluate if this is the right api to use and "
578 "if it really is, submit a report to the linux kernel "
579 "mailing list together with submitting your code for "
585 fsa
->crc
= symversion(syms
->crcs
, symnum
);
586 fsa
->sym
= &syms
->start
[symnum
];
590 static int cmp_name(const void *va
, const void *vb
)
593 const struct kernel_symbol
*b
;
595 return strcmp(a
, b
->name
);
598 static bool find_symbol_in_section(const struct symsearch
*syms
,
599 struct module
*owner
,
602 struct find_symbol_arg
*fsa
= data
;
603 struct kernel_symbol
*sym
;
605 sym
= bsearch(fsa
->name
, syms
->start
, syms
->stop
- syms
->start
,
606 sizeof(struct kernel_symbol
), cmp_name
);
608 if (sym
!= NULL
&& check_symbol(syms
, owner
, sym
- syms
->start
, data
))
614 /* Find a symbol and return it, along with, (optional) crc and
615 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
616 const struct kernel_symbol
*find_symbol(const char *name
,
617 struct module
**owner
,
618 const unsigned long **crc
,
622 struct find_symbol_arg fsa
;
628 if (each_symbol_section(find_symbol_in_section
, &fsa
)) {
636 pr_debug("Failed to find symbol %s\n", name
);
639 EXPORT_SYMBOL_GPL(find_symbol
);
641 /* Search for module by name: must hold module_mutex. */
642 static struct module
*find_module_all(const char *name
, size_t len
,
647 module_assert_mutex();
649 list_for_each_entry(mod
, &modules
, list
) {
650 if (!even_unformed
&& mod
->state
== MODULE_STATE_UNFORMED
)
652 if (strlen(mod
->name
) == len
&& !memcmp(mod
->name
, name
, len
))
658 struct module
*find_module(const char *name
)
660 return find_module_all(name
, strlen(name
), false);
662 EXPORT_SYMBOL_GPL(find_module
);
666 static inline void __percpu
*mod_percpu(struct module
*mod
)
671 static int percpu_modalloc(struct module
*mod
, struct load_info
*info
)
673 Elf_Shdr
*pcpusec
= &info
->sechdrs
[info
->index
.pcpu
];
674 unsigned long align
= pcpusec
->sh_addralign
;
676 if (!pcpusec
->sh_size
)
679 if (align
> PAGE_SIZE
) {
680 pr_warn("%s: per-cpu alignment %li > %li\n",
681 mod
->name
, align
, PAGE_SIZE
);
685 mod
->percpu
= __alloc_reserved_percpu(pcpusec
->sh_size
, align
);
687 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
688 mod
->name
, (unsigned long)pcpusec
->sh_size
);
691 mod
->percpu_size
= pcpusec
->sh_size
;
695 static void percpu_modfree(struct module
*mod
)
697 free_percpu(mod
->percpu
);
700 static unsigned int find_pcpusec(struct load_info
*info
)
702 return find_sec(info
, ".data..percpu");
705 static void percpu_modcopy(struct module
*mod
,
706 const void *from
, unsigned long size
)
710 for_each_possible_cpu(cpu
)
711 memcpy(per_cpu_ptr(mod
->percpu
, cpu
), from
, size
);
715 * is_module_percpu_address - test whether address is from module static percpu
716 * @addr: address to test
718 * Test whether @addr belongs to module static percpu area.
721 * %true if @addr is from module static percpu area
723 bool is_module_percpu_address(unsigned long addr
)
730 list_for_each_entry_rcu(mod
, &modules
, list
) {
731 if (mod
->state
== MODULE_STATE_UNFORMED
)
733 if (!mod
->percpu_size
)
735 for_each_possible_cpu(cpu
) {
736 void *start
= per_cpu_ptr(mod
->percpu
, cpu
);
738 if ((void *)addr
>= start
&&
739 (void *)addr
< start
+ mod
->percpu_size
) {
750 #else /* ... !CONFIG_SMP */
752 static inline void __percpu
*mod_percpu(struct module
*mod
)
756 static int percpu_modalloc(struct module
*mod
, struct load_info
*info
)
758 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
759 if (info
->sechdrs
[info
->index
.pcpu
].sh_size
!= 0)
763 static inline void percpu_modfree(struct module
*mod
)
766 static unsigned int find_pcpusec(struct load_info
*info
)
770 static inline void percpu_modcopy(struct module
*mod
,
771 const void *from
, unsigned long size
)
773 /* pcpusec should be 0, and size of that section should be 0. */
776 bool is_module_percpu_address(unsigned long addr
)
781 #endif /* CONFIG_SMP */
783 #define MODINFO_ATTR(field) \
784 static void setup_modinfo_##field(struct module *mod, const char *s) \
786 mod->field = kstrdup(s, GFP_KERNEL); \
788 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
789 struct module_kobject *mk, char *buffer) \
791 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
793 static int modinfo_##field##_exists(struct module *mod) \
795 return mod->field != NULL; \
797 static void free_modinfo_##field(struct module *mod) \
802 static struct module_attribute modinfo_##field = { \
803 .attr = { .name = __stringify(field), .mode = 0444 }, \
804 .show = show_modinfo_##field, \
805 .setup = setup_modinfo_##field, \
806 .test = modinfo_##field##_exists, \
807 .free = free_modinfo_##field, \
810 MODINFO_ATTR(version
);
811 MODINFO_ATTR(srcversion
);
813 static char last_unloaded_module
[MODULE_NAME_LEN
+1];
815 #ifdef CONFIG_MODULE_UNLOAD
817 EXPORT_TRACEPOINT_SYMBOL(module_get
);
819 /* MODULE_REF_BASE is the base reference count by kmodule loader. */
820 #define MODULE_REF_BASE 1
822 /* Init the unload section of the module. */
823 static int module_unload_init(struct module
*mod
)
826 * Initialize reference counter to MODULE_REF_BASE.
827 * refcnt == 0 means module is going.
829 atomic_set(&mod
->refcnt
, MODULE_REF_BASE
);
831 INIT_LIST_HEAD(&mod
->source_list
);
832 INIT_LIST_HEAD(&mod
->target_list
);
834 /* Hold reference count during initialization. */
835 atomic_inc(&mod
->refcnt
);
840 /* Does a already use b? */
841 static int already_uses(struct module
*a
, struct module
*b
)
843 struct module_use
*use
;
845 list_for_each_entry(use
, &b
->source_list
, source_list
) {
846 if (use
->source
== a
) {
847 pr_debug("%s uses %s!\n", a
->name
, b
->name
);
851 pr_debug("%s does not use %s!\n", a
->name
, b
->name
);
857 * - we add 'a' as a "source", 'b' as a "target" of module use
858 * - the module_use is added to the list of 'b' sources (so
859 * 'b' can walk the list to see who sourced them), and of 'a'
860 * targets (so 'a' can see what modules it targets).
862 static int add_module_usage(struct module
*a
, struct module
*b
)
864 struct module_use
*use
;
866 pr_debug("Allocating new usage for %s.\n", a
->name
);
867 use
= kmalloc(sizeof(*use
), GFP_ATOMIC
);
869 pr_warn("%s: out of memory loading\n", a
->name
);
875 list_add(&use
->source_list
, &b
->source_list
);
876 list_add(&use
->target_list
, &a
->target_list
);
880 /* Module a uses b: caller needs module_mutex() */
881 int ref_module(struct module
*a
, struct module
*b
)
885 if (b
== NULL
|| already_uses(a
, b
))
888 /* If module isn't available, we fail. */
889 err
= strong_try_module_get(b
);
893 err
= add_module_usage(a
, b
);
900 EXPORT_SYMBOL_GPL(ref_module
);
902 /* Clear the unload stuff of the module. */
903 static void module_unload_free(struct module
*mod
)
905 struct module_use
*use
, *tmp
;
907 mutex_lock(&module_mutex
);
908 list_for_each_entry_safe(use
, tmp
, &mod
->target_list
, target_list
) {
909 struct module
*i
= use
->target
;
910 pr_debug("%s unusing %s\n", mod
->name
, i
->name
);
912 list_del(&use
->source_list
);
913 list_del(&use
->target_list
);
916 mutex_unlock(&module_mutex
);
919 #ifdef CONFIG_MODULE_FORCE_UNLOAD
920 static inline int try_force_unload(unsigned int flags
)
922 int ret
= (flags
& O_TRUNC
);
924 add_taint(TAINT_FORCED_RMMOD
, LOCKDEP_NOW_UNRELIABLE
);
928 static inline int try_force_unload(unsigned int flags
)
932 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
934 /* Try to release refcount of module, 0 means success. */
935 static int try_release_module_ref(struct module
*mod
)
939 /* Try to decrement refcnt which we set at loading */
940 ret
= atomic_sub_return(MODULE_REF_BASE
, &mod
->refcnt
);
943 /* Someone can put this right now, recover with checking */
944 ret
= atomic_add_unless(&mod
->refcnt
, MODULE_REF_BASE
, 0);
949 static int try_stop_module(struct module
*mod
, int flags
, int *forced
)
951 /* If it's not unused, quit unless we're forcing. */
952 if (try_release_module_ref(mod
) != 0) {
953 *forced
= try_force_unload(flags
);
958 /* Mark it as dying. */
959 mod
->state
= MODULE_STATE_GOING
;
965 * module_refcount - return the refcount or -1 if unloading
967 * @mod: the module we're checking
970 * -1 if the module is in the process of unloading
971 * otherwise the number of references in the kernel to the module
973 int module_refcount(struct module
*mod
)
975 return atomic_read(&mod
->refcnt
) - MODULE_REF_BASE
;
977 EXPORT_SYMBOL(module_refcount
);
979 /* This exists whether we can unload or not */
980 static void free_module(struct module
*mod
);
982 SYSCALL_DEFINE2(delete_module
, const char __user
*, name_user
,
986 char name
[MODULE_NAME_LEN
];
989 if (!capable(CAP_SYS_MODULE
) || modules_disabled
)
992 if (strncpy_from_user(name
, name_user
, MODULE_NAME_LEN
-1) < 0)
994 name
[MODULE_NAME_LEN
-1] = '\0';
996 if (mutex_lock_interruptible(&module_mutex
) != 0)
999 mod
= find_module(name
);
1005 if (!list_empty(&mod
->source_list
)) {
1006 /* Other modules depend on us: get rid of them first. */
1011 /* Doing init or already dying? */
1012 if (mod
->state
!= MODULE_STATE_LIVE
) {
1013 /* FIXME: if (force), slam module count damn the torpedoes */
1014 pr_debug("%s already dying\n", mod
->name
);
1019 /* If it has an init func, it must have an exit func to unload */
1020 if (mod
->init
&& !mod
->exit
) {
1021 forced
= try_force_unload(flags
);
1023 /* This module can't be removed */
1029 /* Stop the machine so refcounts can't move and disable module. */
1030 ret
= try_stop_module(mod
, flags
, &forced
);
1034 mutex_unlock(&module_mutex
);
1035 /* Final destruction now no one is using it. */
1036 if (mod
->exit
!= NULL
)
1038 blocking_notifier_call_chain(&module_notify_list
,
1039 MODULE_STATE_GOING
, mod
);
1040 async_synchronize_full();
1042 /* Store the name of the last unloaded module for diagnostic purposes */
1043 strlcpy(last_unloaded_module
, mod
->name
, sizeof(last_unloaded_module
));
1048 mutex_unlock(&module_mutex
);
1052 static inline void print_unload_info(struct seq_file
*m
, struct module
*mod
)
1054 struct module_use
*use
;
1055 int printed_something
= 0;
1057 seq_printf(m
, " %i ", module_refcount(mod
));
1060 * Always include a trailing , so userspace can differentiate
1061 * between this and the old multi-field proc format.
1063 list_for_each_entry(use
, &mod
->source_list
, source_list
) {
1064 printed_something
= 1;
1065 seq_printf(m
, "%s,", use
->source
->name
);
1068 if (mod
->init
!= NULL
&& mod
->exit
== NULL
) {
1069 printed_something
= 1;
1070 seq_puts(m
, "[permanent],");
1073 if (!printed_something
)
1077 void __symbol_put(const char *symbol
)
1079 struct module
*owner
;
1082 if (!find_symbol(symbol
, &owner
, NULL
, true, false))
1087 EXPORT_SYMBOL(__symbol_put
);
1089 /* Note this assumes addr is a function, which it currently always is. */
1090 void symbol_put_addr(void *addr
)
1092 struct module
*modaddr
;
1093 unsigned long a
= (unsigned long)dereference_function_descriptor(addr
);
1095 if (core_kernel_text(a
))
1098 /* module_text_address is safe here: we're supposed to have reference
1099 * to module from symbol_get, so it can't go away. */
1100 modaddr
= __module_text_address(a
);
1102 module_put(modaddr
);
1104 EXPORT_SYMBOL_GPL(symbol_put_addr
);
1106 static ssize_t
show_refcnt(struct module_attribute
*mattr
,
1107 struct module_kobject
*mk
, char *buffer
)
1109 return sprintf(buffer
, "%i\n", module_refcount(mk
->mod
));
1112 static struct module_attribute modinfo_refcnt
=
1113 __ATTR(refcnt
, 0444, show_refcnt
, NULL
);
1115 void __module_get(struct module
*module
)
1119 atomic_inc(&module
->refcnt
);
1120 trace_module_get(module
, _RET_IP_
);
1124 EXPORT_SYMBOL(__module_get
);
1126 bool try_module_get(struct module
*module
)
1132 /* Note: here, we can fail to get a reference */
1133 if (likely(module_is_live(module
) &&
1134 atomic_inc_not_zero(&module
->refcnt
) != 0))
1135 trace_module_get(module
, _RET_IP_
);
1143 EXPORT_SYMBOL(try_module_get
);
1145 void module_put(struct module
*module
)
1151 ret
= atomic_dec_if_positive(&module
->refcnt
);
1152 WARN_ON(ret
< 0); /* Failed to put refcount */
1153 trace_module_put(module
, _RET_IP_
);
1157 EXPORT_SYMBOL(module_put
);
1159 #else /* !CONFIG_MODULE_UNLOAD */
1160 static inline void print_unload_info(struct seq_file
*m
, struct module
*mod
)
1162 /* We don't know the usage count, or what modules are using. */
1163 seq_puts(m
, " - -");
1166 static inline void module_unload_free(struct module
*mod
)
1170 int ref_module(struct module
*a
, struct module
*b
)
1172 return strong_try_module_get(b
);
1174 EXPORT_SYMBOL_GPL(ref_module
);
1176 static inline int module_unload_init(struct module
*mod
)
1180 #endif /* CONFIG_MODULE_UNLOAD */
1182 static size_t module_flags_taint(struct module
*mod
, char *buf
)
1186 if (mod
->taints
& (1 << TAINT_PROPRIETARY_MODULE
))
1188 if (mod
->taints
& (1 << TAINT_OOT_MODULE
))
1190 if (mod
->taints
& (1 << TAINT_FORCED_MODULE
))
1192 if (mod
->taints
& (1 << TAINT_CRAP
))
1194 if (mod
->taints
& (1 << TAINT_UNSIGNED_MODULE
))
1197 * TAINT_FORCED_RMMOD: could be added.
1198 * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1204 static ssize_t
show_initstate(struct module_attribute
*mattr
,
1205 struct module_kobject
*mk
, char *buffer
)
1207 const char *state
= "unknown";
1209 switch (mk
->mod
->state
) {
1210 case MODULE_STATE_LIVE
:
1213 case MODULE_STATE_COMING
:
1216 case MODULE_STATE_GOING
:
1222 return sprintf(buffer
, "%s\n", state
);
1225 static struct module_attribute modinfo_initstate
=
1226 __ATTR(initstate
, 0444, show_initstate
, NULL
);
1228 static ssize_t
store_uevent(struct module_attribute
*mattr
,
1229 struct module_kobject
*mk
,
1230 const char *buffer
, size_t count
)
1232 enum kobject_action action
;
1234 if (kobject_action_type(buffer
, count
, &action
) == 0)
1235 kobject_uevent(&mk
->kobj
, action
);
1239 struct module_attribute module_uevent
=
1240 __ATTR(uevent
, 0200, NULL
, store_uevent
);
1242 static ssize_t
show_coresize(struct module_attribute
*mattr
,
1243 struct module_kobject
*mk
, char *buffer
)
1245 return sprintf(buffer
, "%u\n", mk
->mod
->core_size
);
1248 static struct module_attribute modinfo_coresize
=
1249 __ATTR(coresize
, 0444, show_coresize
, NULL
);
1251 static ssize_t
show_initsize(struct module_attribute
*mattr
,
1252 struct module_kobject
*mk
, char *buffer
)
1254 return sprintf(buffer
, "%u\n", mk
->mod
->init_size
);
1257 static struct module_attribute modinfo_initsize
=
1258 __ATTR(initsize
, 0444, show_initsize
, NULL
);
1260 static ssize_t
show_taint(struct module_attribute
*mattr
,
1261 struct module_kobject
*mk
, char *buffer
)
1265 l
= module_flags_taint(mk
->mod
, buffer
);
1270 static struct module_attribute modinfo_taint
=
1271 __ATTR(taint
, 0444, show_taint
, NULL
);
1273 static struct module_attribute
*modinfo_attrs
[] = {
1276 &modinfo_srcversion
,
1281 #ifdef CONFIG_MODULE_UNLOAD
1287 static const char vermagic
[] = VERMAGIC_STRING
;
1289 static int try_to_force_load(struct module
*mod
, const char *reason
)
1291 #ifdef CONFIG_MODULE_FORCE_LOAD
1292 if (!test_taint(TAINT_FORCED_MODULE
))
1293 pr_warn("%s: %s: kernel tainted.\n", mod
->name
, reason
);
1294 add_taint_module(mod
, TAINT_FORCED_MODULE
, LOCKDEP_NOW_UNRELIABLE
);
1301 #ifdef CONFIG_MODVERSIONS
1302 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1303 static unsigned long maybe_relocated(unsigned long crc
,
1304 const struct module
*crc_owner
)
1306 #ifdef ARCH_RELOCATES_KCRCTAB
1307 if (crc_owner
== NULL
)
1308 return crc
- (unsigned long)reloc_start
;
1313 static int check_version(Elf_Shdr
*sechdrs
,
1314 unsigned int versindex
,
1315 const char *symname
,
1317 const unsigned long *crc
,
1318 const struct module
*crc_owner
)
1320 unsigned int i
, num_versions
;
1321 struct modversion_info
*versions
;
1323 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1327 /* No versions at all? modprobe --force does this. */
1329 return try_to_force_load(mod
, symname
) == 0;
1331 versions
= (void *) sechdrs
[versindex
].sh_addr
;
1332 num_versions
= sechdrs
[versindex
].sh_size
1333 / sizeof(struct modversion_info
);
1335 for (i
= 0; i
< num_versions
; i
++) {
1336 if (strcmp(versions
[i
].name
, symname
) != 0)
1339 if (versions
[i
].crc
== maybe_relocated(*crc
, crc_owner
))
1341 pr_debug("Found checksum %lX vs module %lX\n",
1342 maybe_relocated(*crc
, crc_owner
), versions
[i
].crc
);
1346 pr_warn("%s: no symbol version for %s\n", mod
->name
, symname
);
1350 pr_warn("%s: disagrees about version of symbol %s\n",
1351 mod
->name
, symname
);
1355 static inline int check_modstruct_version(Elf_Shdr
*sechdrs
,
1356 unsigned int versindex
,
1359 const unsigned long *crc
;
1362 * Since this should be found in kernel (which can't be removed), no
1363 * locking is necessary -- use preempt_disable() to placate lockdep.
1366 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout
), NULL
,
1367 &crc
, true, false)) {
1372 return check_version(sechdrs
, versindex
,
1373 VMLINUX_SYMBOL_STR(module_layout
), mod
, crc
,
1377 /* First part is kernel version, which we ignore if module has crcs. */
1378 static inline int same_magic(const char *amagic
, const char *bmagic
,
1382 amagic
+= strcspn(amagic
, " ");
1383 bmagic
+= strcspn(bmagic
, " ");
1385 return strcmp(amagic
, bmagic
) == 0;
1388 static inline int check_version(Elf_Shdr
*sechdrs
,
1389 unsigned int versindex
,
1390 const char *symname
,
1392 const unsigned long *crc
,
1393 const struct module
*crc_owner
)
1398 static inline int check_modstruct_version(Elf_Shdr
*sechdrs
,
1399 unsigned int versindex
,
1405 static inline int same_magic(const char *amagic
, const char *bmagic
,
1408 return strcmp(amagic
, bmagic
) == 0;
1410 #endif /* CONFIG_MODVERSIONS */
1412 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1413 static const struct kernel_symbol
*resolve_symbol(struct module
*mod
,
1414 const struct load_info
*info
,
1418 struct module
*owner
;
1419 const struct kernel_symbol
*sym
;
1420 const unsigned long *crc
;
1424 * The module_mutex should not be a heavily contended lock;
1425 * if we get the occasional sleep here, we'll go an extra iteration
1426 * in the wait_event_interruptible(), which is harmless.
1428 sched_annotate_sleep();
1429 mutex_lock(&module_mutex
);
1430 sym
= find_symbol(name
, &owner
, &crc
,
1431 !(mod
->taints
& (1 << TAINT_PROPRIETARY_MODULE
)), true);
1435 if (!check_version(info
->sechdrs
, info
->index
.vers
, name
, mod
, crc
,
1437 sym
= ERR_PTR(-EINVAL
);
1441 err
= ref_module(mod
, owner
);
1448 /* We must make copy under the lock if we failed to get ref. */
1449 strncpy(ownername
, module_name(owner
), MODULE_NAME_LEN
);
1451 mutex_unlock(&module_mutex
);
1455 static const struct kernel_symbol
*
1456 resolve_symbol_wait(struct module
*mod
,
1457 const struct load_info
*info
,
1460 const struct kernel_symbol
*ksym
;
1461 char owner
[MODULE_NAME_LEN
];
1463 if (wait_event_interruptible_timeout(module_wq
,
1464 !IS_ERR(ksym
= resolve_symbol(mod
, info
, name
, owner
))
1465 || PTR_ERR(ksym
) != -EBUSY
,
1467 pr_warn("%s: gave up waiting for init of module %s.\n",
1474 * /sys/module/foo/sections stuff
1475 * J. Corbet <corbet@lwn.net>
1479 #ifdef CONFIG_KALLSYMS
1480 static inline bool sect_empty(const Elf_Shdr
*sect
)
1482 return !(sect
->sh_flags
& SHF_ALLOC
) || sect
->sh_size
== 0;
1485 struct module_sect_attr
{
1486 struct module_attribute mattr
;
1488 unsigned long address
;
1491 struct module_sect_attrs
{
1492 struct attribute_group grp
;
1493 unsigned int nsections
;
1494 struct module_sect_attr attrs
[0];
1497 static ssize_t
module_sect_show(struct module_attribute
*mattr
,
1498 struct module_kobject
*mk
, char *buf
)
1500 struct module_sect_attr
*sattr
=
1501 container_of(mattr
, struct module_sect_attr
, mattr
);
1502 return sprintf(buf
, "0x%pK\n", (void *)sattr
->address
);
1505 static void free_sect_attrs(struct module_sect_attrs
*sect_attrs
)
1507 unsigned int section
;
1509 for (section
= 0; section
< sect_attrs
->nsections
; section
++)
1510 kfree(sect_attrs
->attrs
[section
].name
);
1514 static void add_sect_attrs(struct module
*mod
, const struct load_info
*info
)
1516 unsigned int nloaded
= 0, i
, size
[2];
1517 struct module_sect_attrs
*sect_attrs
;
1518 struct module_sect_attr
*sattr
;
1519 struct attribute
**gattr
;
1521 /* Count loaded sections and allocate structures */
1522 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++)
1523 if (!sect_empty(&info
->sechdrs
[i
]))
1525 size
[0] = ALIGN(sizeof(*sect_attrs
)
1526 + nloaded
* sizeof(sect_attrs
->attrs
[0]),
1527 sizeof(sect_attrs
->grp
.attrs
[0]));
1528 size
[1] = (nloaded
+ 1) * sizeof(sect_attrs
->grp
.attrs
[0]);
1529 sect_attrs
= kzalloc(size
[0] + size
[1], GFP_KERNEL
);
1530 if (sect_attrs
== NULL
)
1533 /* Setup section attributes. */
1534 sect_attrs
->grp
.name
= "sections";
1535 sect_attrs
->grp
.attrs
= (void *)sect_attrs
+ size
[0];
1537 sect_attrs
->nsections
= 0;
1538 sattr
= §_attrs
->attrs
[0];
1539 gattr
= §_attrs
->grp
.attrs
[0];
1540 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++) {
1541 Elf_Shdr
*sec
= &info
->sechdrs
[i
];
1542 if (sect_empty(sec
))
1544 sattr
->address
= sec
->sh_addr
;
1545 sattr
->name
= kstrdup(info
->secstrings
+ sec
->sh_name
,
1547 if (sattr
->name
== NULL
)
1549 sect_attrs
->nsections
++;
1550 sysfs_attr_init(&sattr
->mattr
.attr
);
1551 sattr
->mattr
.show
= module_sect_show
;
1552 sattr
->mattr
.store
= NULL
;
1553 sattr
->mattr
.attr
.name
= sattr
->name
;
1554 sattr
->mattr
.attr
.mode
= S_IRUGO
;
1555 *(gattr
++) = &(sattr
++)->mattr
.attr
;
1559 if (sysfs_create_group(&mod
->mkobj
.kobj
, §_attrs
->grp
))
1562 mod
->sect_attrs
= sect_attrs
;
1565 free_sect_attrs(sect_attrs
);
1568 static void remove_sect_attrs(struct module
*mod
)
1570 if (mod
->sect_attrs
) {
1571 sysfs_remove_group(&mod
->mkobj
.kobj
,
1572 &mod
->sect_attrs
->grp
);
1573 /* We are positive that no one is using any sect attrs
1574 * at this point. Deallocate immediately. */
1575 free_sect_attrs(mod
->sect_attrs
);
1576 mod
->sect_attrs
= NULL
;
1581 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1584 struct module_notes_attrs
{
1585 struct kobject
*dir
;
1587 struct bin_attribute attrs
[0];
1590 static ssize_t
module_notes_read(struct file
*filp
, struct kobject
*kobj
,
1591 struct bin_attribute
*bin_attr
,
1592 char *buf
, loff_t pos
, size_t count
)
1595 * The caller checked the pos and count against our size.
1597 memcpy(buf
, bin_attr
->private + pos
, count
);
1601 static void free_notes_attrs(struct module_notes_attrs
*notes_attrs
,
1604 if (notes_attrs
->dir
) {
1606 sysfs_remove_bin_file(notes_attrs
->dir
,
1607 ¬es_attrs
->attrs
[i
]);
1608 kobject_put(notes_attrs
->dir
);
1613 static void add_notes_attrs(struct module
*mod
, const struct load_info
*info
)
1615 unsigned int notes
, loaded
, i
;
1616 struct module_notes_attrs
*notes_attrs
;
1617 struct bin_attribute
*nattr
;
1619 /* failed to create section attributes, so can't create notes */
1620 if (!mod
->sect_attrs
)
1623 /* Count notes sections and allocate structures. */
1625 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++)
1626 if (!sect_empty(&info
->sechdrs
[i
]) &&
1627 (info
->sechdrs
[i
].sh_type
== SHT_NOTE
))
1633 notes_attrs
= kzalloc(sizeof(*notes_attrs
)
1634 + notes
* sizeof(notes_attrs
->attrs
[0]),
1636 if (notes_attrs
== NULL
)
1639 notes_attrs
->notes
= notes
;
1640 nattr
= ¬es_attrs
->attrs
[0];
1641 for (loaded
= i
= 0; i
< info
->hdr
->e_shnum
; ++i
) {
1642 if (sect_empty(&info
->sechdrs
[i
]))
1644 if (info
->sechdrs
[i
].sh_type
== SHT_NOTE
) {
1645 sysfs_bin_attr_init(nattr
);
1646 nattr
->attr
.name
= mod
->sect_attrs
->attrs
[loaded
].name
;
1647 nattr
->attr
.mode
= S_IRUGO
;
1648 nattr
->size
= info
->sechdrs
[i
].sh_size
;
1649 nattr
->private = (void *) info
->sechdrs
[i
].sh_addr
;
1650 nattr
->read
= module_notes_read
;
1656 notes_attrs
->dir
= kobject_create_and_add("notes", &mod
->mkobj
.kobj
);
1657 if (!notes_attrs
->dir
)
1660 for (i
= 0; i
< notes
; ++i
)
1661 if (sysfs_create_bin_file(notes_attrs
->dir
,
1662 ¬es_attrs
->attrs
[i
]))
1665 mod
->notes_attrs
= notes_attrs
;
1669 free_notes_attrs(notes_attrs
, i
);
1672 static void remove_notes_attrs(struct module
*mod
)
1674 if (mod
->notes_attrs
)
1675 free_notes_attrs(mod
->notes_attrs
, mod
->notes_attrs
->notes
);
1680 static inline void add_sect_attrs(struct module
*mod
,
1681 const struct load_info
*info
)
1685 static inline void remove_sect_attrs(struct module
*mod
)
1689 static inline void add_notes_attrs(struct module
*mod
,
1690 const struct load_info
*info
)
1694 static inline void remove_notes_attrs(struct module
*mod
)
1697 #endif /* CONFIG_KALLSYMS */
1699 static void add_usage_links(struct module
*mod
)
1701 #ifdef CONFIG_MODULE_UNLOAD
1702 struct module_use
*use
;
1705 mutex_lock(&module_mutex
);
1706 list_for_each_entry(use
, &mod
->target_list
, target_list
) {
1707 nowarn
= sysfs_create_link(use
->target
->holders_dir
,
1708 &mod
->mkobj
.kobj
, mod
->name
);
1710 mutex_unlock(&module_mutex
);
1714 static void del_usage_links(struct module
*mod
)
1716 #ifdef CONFIG_MODULE_UNLOAD
1717 struct module_use
*use
;
1719 mutex_lock(&module_mutex
);
1720 list_for_each_entry(use
, &mod
->target_list
, target_list
)
1721 sysfs_remove_link(use
->target
->holders_dir
, mod
->name
);
1722 mutex_unlock(&module_mutex
);
1726 static int module_add_modinfo_attrs(struct module
*mod
)
1728 struct module_attribute
*attr
;
1729 struct module_attribute
*temp_attr
;
1733 mod
->modinfo_attrs
= kzalloc((sizeof(struct module_attribute
) *
1734 (ARRAY_SIZE(modinfo_attrs
) + 1)),
1736 if (!mod
->modinfo_attrs
)
1739 temp_attr
= mod
->modinfo_attrs
;
1740 for (i
= 0; (attr
= modinfo_attrs
[i
]) && !error
; i
++) {
1742 (attr
->test
&& attr
->test(mod
))) {
1743 memcpy(temp_attr
, attr
, sizeof(*temp_attr
));
1744 sysfs_attr_init(&temp_attr
->attr
);
1745 error
= sysfs_create_file(&mod
->mkobj
.kobj
,
1753 static void module_remove_modinfo_attrs(struct module
*mod
)
1755 struct module_attribute
*attr
;
1758 for (i
= 0; (attr
= &mod
->modinfo_attrs
[i
]); i
++) {
1759 /* pick a field to test for end of list */
1760 if (!attr
->attr
.name
)
1762 sysfs_remove_file(&mod
->mkobj
.kobj
, &attr
->attr
);
1766 kfree(mod
->modinfo_attrs
);
1769 static void mod_kobject_put(struct module
*mod
)
1771 DECLARE_COMPLETION_ONSTACK(c
);
1772 mod
->mkobj
.kobj_completion
= &c
;
1773 kobject_put(&mod
->mkobj
.kobj
);
1774 wait_for_completion(&c
);
1777 static int mod_sysfs_init(struct module
*mod
)
1780 struct kobject
*kobj
;
1782 if (!module_sysfs_initialized
) {
1783 pr_err("%s: module sysfs not initialized\n", mod
->name
);
1788 kobj
= kset_find_obj(module_kset
, mod
->name
);
1790 pr_err("%s: module is already loaded\n", mod
->name
);
1796 mod
->mkobj
.mod
= mod
;
1798 memset(&mod
->mkobj
.kobj
, 0, sizeof(mod
->mkobj
.kobj
));
1799 mod
->mkobj
.kobj
.kset
= module_kset
;
1800 err
= kobject_init_and_add(&mod
->mkobj
.kobj
, &module_ktype
, NULL
,
1803 mod_kobject_put(mod
);
1805 /* delay uevent until full sysfs population */
1810 static int mod_sysfs_setup(struct module
*mod
,
1811 const struct load_info
*info
,
1812 struct kernel_param
*kparam
,
1813 unsigned int num_params
)
1817 err
= mod_sysfs_init(mod
);
1821 mod
->holders_dir
= kobject_create_and_add("holders", &mod
->mkobj
.kobj
);
1822 if (!mod
->holders_dir
) {
1827 err
= module_param_sysfs_setup(mod
, kparam
, num_params
);
1829 goto out_unreg_holders
;
1831 err
= module_add_modinfo_attrs(mod
);
1833 goto out_unreg_param
;
1835 add_usage_links(mod
);
1836 add_sect_attrs(mod
, info
);
1837 add_notes_attrs(mod
, info
);
1839 kobject_uevent(&mod
->mkobj
.kobj
, KOBJ_ADD
);
1843 module_param_sysfs_remove(mod
);
1845 kobject_put(mod
->holders_dir
);
1847 mod_kobject_put(mod
);
1852 static void mod_sysfs_fini(struct module
*mod
)
1854 remove_notes_attrs(mod
);
1855 remove_sect_attrs(mod
);
1856 mod_kobject_put(mod
);
1859 #else /* !CONFIG_SYSFS */
1861 static int mod_sysfs_setup(struct module
*mod
,
1862 const struct load_info
*info
,
1863 struct kernel_param
*kparam
,
1864 unsigned int num_params
)
1869 static void mod_sysfs_fini(struct module
*mod
)
1873 static void module_remove_modinfo_attrs(struct module
*mod
)
1877 static void del_usage_links(struct module
*mod
)
1881 #endif /* CONFIG_SYSFS */
1883 static void mod_sysfs_teardown(struct module
*mod
)
1885 del_usage_links(mod
);
1886 module_remove_modinfo_attrs(mod
);
1887 module_param_sysfs_remove(mod
);
1888 kobject_put(mod
->mkobj
.drivers_dir
);
1889 kobject_put(mod
->holders_dir
);
1890 mod_sysfs_fini(mod
);
1893 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1895 * LKM RO/NX protection: protect module's text/ro-data
1896 * from modification and any data from execution.
1898 void set_page_attributes(void *start
, void *end
, int (*set
)(unsigned long start
, int num_pages
))
1900 unsigned long begin_pfn
= PFN_DOWN((unsigned long)start
);
1901 unsigned long end_pfn
= PFN_DOWN((unsigned long)end
);
1903 if (end_pfn
> begin_pfn
)
1904 set(begin_pfn
<< PAGE_SHIFT
, end_pfn
- begin_pfn
);
1907 static void set_section_ro_nx(void *base
,
1908 unsigned long text_size
,
1909 unsigned long ro_size
,
1910 unsigned long total_size
)
1912 /* begin and end PFNs of the current subsection */
1913 unsigned long begin_pfn
;
1914 unsigned long end_pfn
;
1917 * Set RO for module text and RO-data:
1918 * - Always protect first page.
1919 * - Do not protect last partial page.
1922 set_page_attributes(base
, base
+ ro_size
, set_memory_ro
);
1925 * Set NX permissions for module data:
1926 * - Do not protect first partial page.
1927 * - Always protect last page.
1929 if (total_size
> text_size
) {
1930 begin_pfn
= PFN_UP((unsigned long)base
+ text_size
);
1931 end_pfn
= PFN_UP((unsigned long)base
+ total_size
);
1932 if (end_pfn
> begin_pfn
)
1933 set_memory_nx(begin_pfn
<< PAGE_SHIFT
, end_pfn
- begin_pfn
);
1937 static void unset_module_core_ro_nx(struct module
*mod
)
1939 set_page_attributes(mod
->module_core
+ mod
->core_text_size
,
1940 mod
->module_core
+ mod
->core_size
,
1942 set_page_attributes(mod
->module_core
,
1943 mod
->module_core
+ mod
->core_ro_size
,
1947 static void unset_module_init_ro_nx(struct module
*mod
)
1949 set_page_attributes(mod
->module_init
+ mod
->init_text_size
,
1950 mod
->module_init
+ mod
->init_size
,
1952 set_page_attributes(mod
->module_init
,
1953 mod
->module_init
+ mod
->init_ro_size
,
1957 /* Iterate through all modules and set each module's text as RW */
1958 void set_all_modules_text_rw(void)
1962 mutex_lock(&module_mutex
);
1963 list_for_each_entry_rcu(mod
, &modules
, list
) {
1964 if (mod
->state
== MODULE_STATE_UNFORMED
)
1966 if ((mod
->module_core
) && (mod
->core_text_size
)) {
1967 set_page_attributes(mod
->module_core
,
1968 mod
->module_core
+ mod
->core_text_size
,
1971 if ((mod
->module_init
) && (mod
->init_text_size
)) {
1972 set_page_attributes(mod
->module_init
,
1973 mod
->module_init
+ mod
->init_text_size
,
1977 mutex_unlock(&module_mutex
);
1980 /* Iterate through all modules and set each module's text as RO */
1981 void set_all_modules_text_ro(void)
1985 mutex_lock(&module_mutex
);
1986 list_for_each_entry_rcu(mod
, &modules
, list
) {
1987 if (mod
->state
== MODULE_STATE_UNFORMED
)
1989 if ((mod
->module_core
) && (mod
->core_text_size
)) {
1990 set_page_attributes(mod
->module_core
,
1991 mod
->module_core
+ mod
->core_text_size
,
1994 if ((mod
->module_init
) && (mod
->init_text_size
)) {
1995 set_page_attributes(mod
->module_init
,
1996 mod
->module_init
+ mod
->init_text_size
,
2000 mutex_unlock(&module_mutex
);
2003 static inline void set_section_ro_nx(void *base
, unsigned long text_size
, unsigned long ro_size
, unsigned long total_size
) { }
2004 static void unset_module_core_ro_nx(struct module
*mod
) { }
2005 static void unset_module_init_ro_nx(struct module
*mod
) { }
2008 void __weak
module_memfree(void *module_region
)
2010 vfree(module_region
);
2013 void __weak
module_arch_cleanup(struct module
*mod
)
2017 void __weak
module_arch_freeing_init(struct module
*mod
)
2021 /* Free a module, remove from lists, etc. */
2022 static void free_module(struct module
*mod
)
2024 trace_module_free(mod
);
2026 mod_sysfs_teardown(mod
);
2028 /* We leave it in list to prevent duplicate loads, but make sure
2029 * that noone uses it while it's being deconstructed. */
2030 mutex_lock(&module_mutex
);
2031 mod
->state
= MODULE_STATE_UNFORMED
;
2032 mutex_unlock(&module_mutex
);
2034 /* Remove dynamic debug info */
2035 ddebug_remove_module(mod
->name
);
2037 /* Arch-specific cleanup. */
2038 module_arch_cleanup(mod
);
2040 /* Module unload stuff */
2041 module_unload_free(mod
);
2043 /* Free any allocated parameters. */
2044 destroy_params(mod
->kp
, mod
->num_kp
);
2046 /* Now we can delete it from the lists */
2047 mutex_lock(&module_mutex
);
2048 /* Unlink carefully: kallsyms could be walking list. */
2049 list_del_rcu(&mod
->list
);
2050 mod_tree_remove(mod
);
2051 /* Remove this module from bug list, this uses list_del_rcu */
2052 module_bug_cleanup(mod
);
2053 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
2054 synchronize_sched();
2055 mutex_unlock(&module_mutex
);
2057 /* This may be NULL, but that's OK */
2058 unset_module_init_ro_nx(mod
);
2059 module_arch_freeing_init(mod
);
2060 module_memfree(mod
->module_init
);
2062 percpu_modfree(mod
);
2064 /* Free lock-classes; relies on the preceding sync_rcu(). */
2065 lockdep_free_key_range(mod
->module_core
, mod
->core_size
);
2067 /* Finally, free the core (containing the module structure) */
2068 unset_module_core_ro_nx(mod
);
2069 module_memfree(mod
->module_core
);
2072 update_protections(current
->mm
);
2076 void *__symbol_get(const char *symbol
)
2078 struct module
*owner
;
2079 const struct kernel_symbol
*sym
;
2082 sym
= find_symbol(symbol
, &owner
, NULL
, true, true);
2083 if (sym
&& strong_try_module_get(owner
))
2087 return sym
? (void *)sym
->value
: NULL
;
2089 EXPORT_SYMBOL_GPL(__symbol_get
);
2092 * Ensure that an exported symbol [global namespace] does not already exist
2093 * in the kernel or in some other module's exported symbol table.
2095 * You must hold the module_mutex.
2097 static int verify_export_symbols(struct module
*mod
)
2100 struct module
*owner
;
2101 const struct kernel_symbol
*s
;
2103 const struct kernel_symbol
*sym
;
2106 { mod
->syms
, mod
->num_syms
},
2107 { mod
->gpl_syms
, mod
->num_gpl_syms
},
2108 { mod
->gpl_future_syms
, mod
->num_gpl_future_syms
},
2109 #ifdef CONFIG_UNUSED_SYMBOLS
2110 { mod
->unused_syms
, mod
->num_unused_syms
},
2111 { mod
->unused_gpl_syms
, mod
->num_unused_gpl_syms
},
2115 for (i
= 0; i
< ARRAY_SIZE(arr
); i
++) {
2116 for (s
= arr
[i
].sym
; s
< arr
[i
].sym
+ arr
[i
].num
; s
++) {
2117 if (find_symbol(s
->name
, &owner
, NULL
, true, false)) {
2118 pr_err("%s: exports duplicate symbol %s"
2120 mod
->name
, s
->name
, module_name(owner
));
2128 /* Change all symbols so that st_value encodes the pointer directly. */
2129 static int simplify_symbols(struct module
*mod
, const struct load_info
*info
)
2131 Elf_Shdr
*symsec
= &info
->sechdrs
[info
->index
.sym
];
2132 Elf_Sym
*sym
= (void *)symsec
->sh_addr
;
2133 unsigned long secbase
;
2136 const struct kernel_symbol
*ksym
;
2138 for (i
= 1; i
< symsec
->sh_size
/ sizeof(Elf_Sym
); i
++) {
2139 const char *name
= info
->strtab
+ sym
[i
].st_name
;
2141 switch (sym
[i
].st_shndx
) {
2143 /* Ignore common symbols */
2144 if (!strncmp(name
, "__gnu_lto", 9))
2147 /* We compiled with -fno-common. These are not
2148 supposed to happen. */
2149 pr_debug("Common symbol: %s\n", name
);
2150 pr_warn("%s: please compile with -fno-common\n",
2156 /* Don't need to do anything */
2157 pr_debug("Absolute symbol: 0x%08lx\n",
2158 (long)sym
[i
].st_value
);
2162 ksym
= resolve_symbol_wait(mod
, info
, name
);
2163 /* Ok if resolved. */
2164 if (ksym
&& !IS_ERR(ksym
)) {
2165 sym
[i
].st_value
= ksym
->value
;
2170 if (!ksym
&& ELF_ST_BIND(sym
[i
].st_info
) == STB_WEAK
)
2173 pr_warn("%s: Unknown symbol %s (err %li)\n",
2174 mod
->name
, name
, PTR_ERR(ksym
));
2175 ret
= PTR_ERR(ksym
) ?: -ENOENT
;
2179 /* Divert to percpu allocation if a percpu var. */
2180 if (sym
[i
].st_shndx
== info
->index
.pcpu
)
2181 secbase
= (unsigned long)mod_percpu(mod
);
2183 secbase
= info
->sechdrs
[sym
[i
].st_shndx
].sh_addr
;
2184 sym
[i
].st_value
+= secbase
;
2192 static int apply_relocations(struct module
*mod
, const struct load_info
*info
)
2197 /* Now do relocations. */
2198 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
2199 unsigned int infosec
= info
->sechdrs
[i
].sh_info
;
2201 /* Not a valid relocation section? */
2202 if (infosec
>= info
->hdr
->e_shnum
)
2205 /* Don't bother with non-allocated sections */
2206 if (!(info
->sechdrs
[infosec
].sh_flags
& SHF_ALLOC
))
2209 if (info
->sechdrs
[i
].sh_type
== SHT_REL
)
2210 err
= apply_relocate(info
->sechdrs
, info
->strtab
,
2211 info
->index
.sym
, i
, mod
);
2212 else if (info
->sechdrs
[i
].sh_type
== SHT_RELA
)
2213 err
= apply_relocate_add(info
->sechdrs
, info
->strtab
,
2214 info
->index
.sym
, i
, mod
);
2221 /* Additional bytes needed by arch in front of individual sections */
2222 unsigned int __weak
arch_mod_section_prepend(struct module
*mod
,
2223 unsigned int section
)
2225 /* default implementation just returns zero */
2229 /* Update size with this section: return offset. */
2230 static long get_offset(struct module
*mod
, unsigned int *size
,
2231 Elf_Shdr
*sechdr
, unsigned int section
)
2235 *size
+= arch_mod_section_prepend(mod
, section
);
2236 ret
= ALIGN(*size
, sechdr
->sh_addralign
?: 1);
2237 *size
= ret
+ sechdr
->sh_size
;
2241 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2242 might -- code, read-only data, read-write data, small data. Tally
2243 sizes, and place the offsets into sh_entsize fields: high bit means it
2245 static void layout_sections(struct module
*mod
, struct load_info
*info
)
2247 static unsigned long const masks
[][2] = {
2248 /* NOTE: all executable code must be the first section
2249 * in this array; otherwise modify the text_size
2250 * finder in the two loops below */
2251 { SHF_EXECINSTR
| SHF_ALLOC
, ARCH_SHF_SMALL
},
2252 { SHF_ALLOC
, SHF_WRITE
| ARCH_SHF_SMALL
},
2253 { SHF_WRITE
| SHF_ALLOC
, ARCH_SHF_SMALL
},
2254 { ARCH_SHF_SMALL
| SHF_ALLOC
, 0 }
2258 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++)
2259 info
->sechdrs
[i
].sh_entsize
= ~0UL;
2261 pr_debug("Core section allocation order:\n");
2262 for (m
= 0; m
< ARRAY_SIZE(masks
); ++m
) {
2263 for (i
= 0; i
< info
->hdr
->e_shnum
; ++i
) {
2264 Elf_Shdr
*s
= &info
->sechdrs
[i
];
2265 const char *sname
= info
->secstrings
+ s
->sh_name
;
2267 if ((s
->sh_flags
& masks
[m
][0]) != masks
[m
][0]
2268 || (s
->sh_flags
& masks
[m
][1])
2269 || s
->sh_entsize
!= ~0UL
2270 || strstarts(sname
, ".init"))
2272 s
->sh_entsize
= get_offset(mod
, &mod
->core_size
, s
, i
);
2273 pr_debug("\t%s\n", sname
);
2276 case 0: /* executable */
2277 mod
->core_size
= debug_align(mod
->core_size
);
2278 mod
->core_text_size
= mod
->core_size
;
2280 case 1: /* RO: text and ro-data */
2281 mod
->core_size
= debug_align(mod
->core_size
);
2282 mod
->core_ro_size
= mod
->core_size
;
2284 case 3: /* whole core */
2285 mod
->core_size
= debug_align(mod
->core_size
);
2290 pr_debug("Init section allocation order:\n");
2291 for (m
= 0; m
< ARRAY_SIZE(masks
); ++m
) {
2292 for (i
= 0; i
< info
->hdr
->e_shnum
; ++i
) {
2293 Elf_Shdr
*s
= &info
->sechdrs
[i
];
2294 const char *sname
= info
->secstrings
+ s
->sh_name
;
2296 if ((s
->sh_flags
& masks
[m
][0]) != masks
[m
][0]
2297 || (s
->sh_flags
& masks
[m
][1])
2298 || s
->sh_entsize
!= ~0UL
2299 || !strstarts(sname
, ".init"))
2301 s
->sh_entsize
= (get_offset(mod
, &mod
->init_size
, s
, i
)
2302 | INIT_OFFSET_MASK
);
2303 pr_debug("\t%s\n", sname
);
2306 case 0: /* executable */
2307 mod
->init_size
= debug_align(mod
->init_size
);
2308 mod
->init_text_size
= mod
->init_size
;
2310 case 1: /* RO: text and ro-data */
2311 mod
->init_size
= debug_align(mod
->init_size
);
2312 mod
->init_ro_size
= mod
->init_size
;
2314 case 3: /* whole init */
2315 mod
->init_size
= debug_align(mod
->init_size
);
2321 static void set_license(struct module
*mod
, const char *license
)
2324 license
= "unspecified";
2326 if (!license_is_gpl_compatible(license
)) {
2327 if (!test_taint(TAINT_PROPRIETARY_MODULE
))
2328 pr_warn("%s: module license '%s' taints kernel.\n",
2329 mod
->name
, license
);
2330 add_taint_module(mod
, TAINT_PROPRIETARY_MODULE
,
2331 LOCKDEP_NOW_UNRELIABLE
);
2335 /* Parse tag=value strings from .modinfo section */
2336 static char *next_string(char *string
, unsigned long *secsize
)
2338 /* Skip non-zero chars */
2341 if ((*secsize
)-- <= 1)
2345 /* Skip any zero padding. */
2346 while (!string
[0]) {
2348 if ((*secsize
)-- <= 1)
2354 static char *get_modinfo(struct load_info
*info
, const char *tag
)
2357 unsigned int taglen
= strlen(tag
);
2358 Elf_Shdr
*infosec
= &info
->sechdrs
[info
->index
.info
];
2359 unsigned long size
= infosec
->sh_size
;
2361 for (p
= (char *)infosec
->sh_addr
; p
; p
= next_string(p
, &size
)) {
2362 if (strncmp(p
, tag
, taglen
) == 0 && p
[taglen
] == '=')
2363 return p
+ taglen
+ 1;
2368 static void setup_modinfo(struct module
*mod
, struct load_info
*info
)
2370 struct module_attribute
*attr
;
2373 for (i
= 0; (attr
= modinfo_attrs
[i
]); i
++) {
2375 attr
->setup(mod
, get_modinfo(info
, attr
->attr
.name
));
2379 static void free_modinfo(struct module
*mod
)
2381 struct module_attribute
*attr
;
2384 for (i
= 0; (attr
= modinfo_attrs
[i
]); i
++) {
2390 #ifdef CONFIG_KALLSYMS
2392 /* lookup symbol in given range of kernel_symbols */
2393 static const struct kernel_symbol
*lookup_symbol(const char *name
,
2394 const struct kernel_symbol
*start
,
2395 const struct kernel_symbol
*stop
)
2397 return bsearch(name
, start
, stop
- start
,
2398 sizeof(struct kernel_symbol
), cmp_name
);
2401 static int is_exported(const char *name
, unsigned long value
,
2402 const struct module
*mod
)
2404 const struct kernel_symbol
*ks
;
2406 ks
= lookup_symbol(name
, __start___ksymtab
, __stop___ksymtab
);
2408 ks
= lookup_symbol(name
, mod
->syms
, mod
->syms
+ mod
->num_syms
);
2409 return ks
!= NULL
&& ks
->value
== value
;
2413 static char elf_type(const Elf_Sym
*sym
, const struct load_info
*info
)
2415 const Elf_Shdr
*sechdrs
= info
->sechdrs
;
2417 if (ELF_ST_BIND(sym
->st_info
) == STB_WEAK
) {
2418 if (ELF_ST_TYPE(sym
->st_info
) == STT_OBJECT
)
2423 if (sym
->st_shndx
== SHN_UNDEF
)
2425 if (sym
->st_shndx
== SHN_ABS
)
2427 if (sym
->st_shndx
>= SHN_LORESERVE
)
2429 if (sechdrs
[sym
->st_shndx
].sh_flags
& SHF_EXECINSTR
)
2431 if (sechdrs
[sym
->st_shndx
].sh_flags
& SHF_ALLOC
2432 && sechdrs
[sym
->st_shndx
].sh_type
!= SHT_NOBITS
) {
2433 if (!(sechdrs
[sym
->st_shndx
].sh_flags
& SHF_WRITE
))
2435 else if (sechdrs
[sym
->st_shndx
].sh_flags
& ARCH_SHF_SMALL
)
2440 if (sechdrs
[sym
->st_shndx
].sh_type
== SHT_NOBITS
) {
2441 if (sechdrs
[sym
->st_shndx
].sh_flags
& ARCH_SHF_SMALL
)
2446 if (strstarts(info
->secstrings
+ sechdrs
[sym
->st_shndx
].sh_name
,
2453 static bool is_core_symbol(const Elf_Sym
*src
, const Elf_Shdr
*sechdrs
,
2456 const Elf_Shdr
*sec
;
2458 if (src
->st_shndx
== SHN_UNDEF
2459 || src
->st_shndx
>= shnum
2463 sec
= sechdrs
+ src
->st_shndx
;
2464 if (!(sec
->sh_flags
& SHF_ALLOC
)
2465 #ifndef CONFIG_KALLSYMS_ALL
2466 || !(sec
->sh_flags
& SHF_EXECINSTR
)
2468 || (sec
->sh_entsize
& INIT_OFFSET_MASK
))
2475 * We only allocate and copy the strings needed by the parts of symtab
2476 * we keep. This is simple, but has the effect of making multiple
2477 * copies of duplicates. We could be more sophisticated, see
2478 * linux-kernel thread starting with
2479 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2481 static void layout_symtab(struct module
*mod
, struct load_info
*info
)
2483 Elf_Shdr
*symsect
= info
->sechdrs
+ info
->index
.sym
;
2484 Elf_Shdr
*strsect
= info
->sechdrs
+ info
->index
.str
;
2486 unsigned int i
, nsrc
, ndst
, strtab_size
= 0;
2488 /* Put symbol section at end of init part of module. */
2489 symsect
->sh_flags
|= SHF_ALLOC
;
2490 symsect
->sh_entsize
= get_offset(mod
, &mod
->init_size
, symsect
,
2491 info
->index
.sym
) | INIT_OFFSET_MASK
;
2492 pr_debug("\t%s\n", info
->secstrings
+ symsect
->sh_name
);
2494 src
= (void *)info
->hdr
+ symsect
->sh_offset
;
2495 nsrc
= symsect
->sh_size
/ sizeof(*src
);
2497 /* Compute total space required for the core symbols' strtab. */
2498 for (ndst
= i
= 0; i
< nsrc
; i
++) {
2500 is_core_symbol(src
+i
, info
->sechdrs
, info
->hdr
->e_shnum
)) {
2501 strtab_size
+= strlen(&info
->strtab
[src
[i
].st_name
])+1;
2506 /* Append room for core symbols at end of core part. */
2507 info
->symoffs
= ALIGN(mod
->core_size
, symsect
->sh_addralign
?: 1);
2508 info
->stroffs
= mod
->core_size
= info
->symoffs
+ ndst
* sizeof(Elf_Sym
);
2509 mod
->core_size
+= strtab_size
;
2510 mod
->core_size
= debug_align(mod
->core_size
);
2512 /* Put string table section at end of init part of module. */
2513 strsect
->sh_flags
|= SHF_ALLOC
;
2514 strsect
->sh_entsize
= get_offset(mod
, &mod
->init_size
, strsect
,
2515 info
->index
.str
) | INIT_OFFSET_MASK
;
2516 mod
->init_size
= debug_align(mod
->init_size
);
2517 pr_debug("\t%s\n", info
->secstrings
+ strsect
->sh_name
);
2520 static void add_kallsyms(struct module
*mod
, const struct load_info
*info
)
2522 unsigned int i
, ndst
;
2526 Elf_Shdr
*symsec
= &info
->sechdrs
[info
->index
.sym
];
2528 mod
->symtab
= (void *)symsec
->sh_addr
;
2529 mod
->num_symtab
= symsec
->sh_size
/ sizeof(Elf_Sym
);
2530 /* Make sure we get permanent strtab: don't use info->strtab. */
2531 mod
->strtab
= (void *)info
->sechdrs
[info
->index
.str
].sh_addr
;
2533 /* Set types up while we still have access to sections. */
2534 for (i
= 0; i
< mod
->num_symtab
; i
++)
2535 mod
->symtab
[i
].st_info
= elf_type(&mod
->symtab
[i
], info
);
2537 mod
->core_symtab
= dst
= mod
->module_core
+ info
->symoffs
;
2538 mod
->core_strtab
= s
= mod
->module_core
+ info
->stroffs
;
2540 for (ndst
= i
= 0; i
< mod
->num_symtab
; i
++) {
2542 is_core_symbol(src
+i
, info
->sechdrs
, info
->hdr
->e_shnum
)) {
2544 dst
[ndst
++].st_name
= s
- mod
->core_strtab
;
2545 s
+= strlcpy(s
, &mod
->strtab
[src
[i
].st_name
],
2549 mod
->core_num_syms
= ndst
;
2552 static inline void layout_symtab(struct module
*mod
, struct load_info
*info
)
2556 static void add_kallsyms(struct module
*mod
, const struct load_info
*info
)
2559 #endif /* CONFIG_KALLSYMS */
2561 static void dynamic_debug_setup(struct _ddebug
*debug
, unsigned int num
)
2565 #ifdef CONFIG_DYNAMIC_DEBUG
2566 if (ddebug_add_module(debug
, num
, debug
->modname
))
2567 pr_err("dynamic debug error adding module: %s\n",
2572 static void dynamic_debug_remove(struct _ddebug
*debug
)
2575 ddebug_remove_module(debug
->modname
);
2578 void * __weak
module_alloc(unsigned long size
)
2580 return vmalloc_exec(size
);
2583 #ifdef CONFIG_DEBUG_KMEMLEAK
2584 static void kmemleak_load_module(const struct module
*mod
,
2585 const struct load_info
*info
)
2589 /* only scan the sections containing data */
2590 kmemleak_scan_area(mod
, sizeof(struct module
), GFP_KERNEL
);
2592 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
2593 /* Scan all writable sections that's not executable */
2594 if (!(info
->sechdrs
[i
].sh_flags
& SHF_ALLOC
) ||
2595 !(info
->sechdrs
[i
].sh_flags
& SHF_WRITE
) ||
2596 (info
->sechdrs
[i
].sh_flags
& SHF_EXECINSTR
))
2599 kmemleak_scan_area((void *)info
->sechdrs
[i
].sh_addr
,
2600 info
->sechdrs
[i
].sh_size
, GFP_KERNEL
);
2604 static inline void kmemleak_load_module(const struct module
*mod
,
2605 const struct load_info
*info
)
2610 #ifdef CONFIG_MODULE_SIG
2611 static int module_sig_check(struct load_info
*info
)
2614 const unsigned long markerlen
= sizeof(MODULE_SIG_STRING
) - 1;
2615 const void *mod
= info
->hdr
;
2617 if (info
->len
> markerlen
&&
2618 memcmp(mod
+ info
->len
- markerlen
, MODULE_SIG_STRING
, markerlen
) == 0) {
2619 /* We truncate the module to discard the signature */
2620 info
->len
-= markerlen
;
2621 err
= mod_verify_sig(mod
, &info
->len
);
2625 info
->sig_ok
= true;
2629 /* Not having a signature is only an error if we're strict. */
2630 if (err
== -ENOKEY
&& !sig_enforce
)
2635 #else /* !CONFIG_MODULE_SIG */
2636 static int module_sig_check(struct load_info
*info
)
2640 #endif /* !CONFIG_MODULE_SIG */
2642 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2643 static int elf_header_check(struct load_info
*info
)
2645 if (info
->len
< sizeof(*(info
->hdr
)))
2648 if (memcmp(info
->hdr
->e_ident
, ELFMAG
, SELFMAG
) != 0
2649 || info
->hdr
->e_type
!= ET_REL
2650 || !elf_check_arch(info
->hdr
)
2651 || info
->hdr
->e_shentsize
!= sizeof(Elf_Shdr
))
2654 if (info
->hdr
->e_shoff
>= info
->len
2655 || (info
->hdr
->e_shnum
* sizeof(Elf_Shdr
) >
2656 info
->len
- info
->hdr
->e_shoff
))
2662 #define COPY_CHUNK_SIZE (16*PAGE_SIZE)
2664 static int copy_chunked_from_user(void *dst
, const void __user
*usrc
, unsigned long len
)
2667 unsigned long n
= min(len
, COPY_CHUNK_SIZE
);
2669 if (copy_from_user(dst
, usrc
, n
) != 0)
2679 /* Sets info->hdr and info->len. */
2680 static int copy_module_from_user(const void __user
*umod
, unsigned long len
,
2681 struct load_info
*info
)
2686 if (info
->len
< sizeof(*(info
->hdr
)))
2689 err
= security_kernel_module_from_file(NULL
);
2693 /* Suck in entire file: we'll want most of it. */
2694 info
->hdr
= __vmalloc(info
->len
,
2695 GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_NOWARN
, PAGE_KERNEL
);
2699 if (copy_chunked_from_user(info
->hdr
, umod
, info
->len
) != 0) {
2707 /* Sets info->hdr and info->len. */
2708 static int copy_module_from_fd(int fd
, struct load_info
*info
)
2710 struct fd f
= fdget(fd
);
2719 err
= security_kernel_module_from_file(f
.file
);
2723 err
= vfs_getattr(&f
.file
->f_path
, &stat
);
2727 if (stat
.size
> INT_MAX
) {
2732 /* Don't hand 0 to vmalloc, it whines. */
2733 if (stat
.size
== 0) {
2738 info
->hdr
= vmalloc(stat
.size
);
2745 while (pos
< stat
.size
) {
2746 bytes
= kernel_read(f
.file
, pos
, (char *)(info
->hdr
) + pos
,
2764 static void free_copy(struct load_info
*info
)
2769 static int rewrite_section_headers(struct load_info
*info
, int flags
)
2773 /* This should always be true, but let's be sure. */
2774 info
->sechdrs
[0].sh_addr
= 0;
2776 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
2777 Elf_Shdr
*shdr
= &info
->sechdrs
[i
];
2778 if (shdr
->sh_type
!= SHT_NOBITS
2779 && info
->len
< shdr
->sh_offset
+ shdr
->sh_size
) {
2780 pr_err("Module len %lu truncated\n", info
->len
);
2784 /* Mark all sections sh_addr with their address in the
2786 shdr
->sh_addr
= (size_t)info
->hdr
+ shdr
->sh_offset
;
2788 #ifndef CONFIG_MODULE_UNLOAD
2789 /* Don't load .exit sections */
2790 if (strstarts(info
->secstrings
+shdr
->sh_name
, ".exit"))
2791 shdr
->sh_flags
&= ~(unsigned long)SHF_ALLOC
;
2795 /* Track but don't keep modinfo and version sections. */
2796 if (flags
& MODULE_INIT_IGNORE_MODVERSIONS
)
2797 info
->index
.vers
= 0; /* Pretend no __versions section! */
2799 info
->index
.vers
= find_sec(info
, "__versions");
2800 info
->index
.info
= find_sec(info
, ".modinfo");
2801 info
->sechdrs
[info
->index
.info
].sh_flags
&= ~(unsigned long)SHF_ALLOC
;
2802 info
->sechdrs
[info
->index
.vers
].sh_flags
&= ~(unsigned long)SHF_ALLOC
;
2807 * Set up our basic convenience variables (pointers to section headers,
2808 * search for module section index etc), and do some basic section
2811 * Return the temporary module pointer (we'll replace it with the final
2812 * one when we move the module sections around).
2814 static struct module
*setup_load_info(struct load_info
*info
, int flags
)
2820 /* Set up the convenience variables */
2821 info
->sechdrs
= (void *)info
->hdr
+ info
->hdr
->e_shoff
;
2822 info
->secstrings
= (void *)info
->hdr
2823 + info
->sechdrs
[info
->hdr
->e_shstrndx
].sh_offset
;
2825 err
= rewrite_section_headers(info
, flags
);
2827 return ERR_PTR(err
);
2829 /* Find internal symbols and strings. */
2830 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
2831 if (info
->sechdrs
[i
].sh_type
== SHT_SYMTAB
) {
2832 info
->index
.sym
= i
;
2833 info
->index
.str
= info
->sechdrs
[i
].sh_link
;
2834 info
->strtab
= (char *)info
->hdr
2835 + info
->sechdrs
[info
->index
.str
].sh_offset
;
2840 info
->index
.mod
= find_sec(info
, ".gnu.linkonce.this_module");
2841 if (!info
->index
.mod
) {
2842 pr_warn("No module found in object\n");
2843 return ERR_PTR(-ENOEXEC
);
2845 /* This is temporary: point mod into copy of data. */
2846 mod
= (void *)info
->sechdrs
[info
->index
.mod
].sh_addr
;
2848 if (info
->index
.sym
== 0) {
2849 pr_warn("%s: module has no symbols (stripped?)\n", mod
->name
);
2850 return ERR_PTR(-ENOEXEC
);
2853 info
->index
.pcpu
= find_pcpusec(info
);
2855 /* Check module struct version now, before we try to use module. */
2856 if (!check_modstruct_version(info
->sechdrs
, info
->index
.vers
, mod
))
2857 return ERR_PTR(-ENOEXEC
);
2862 static int check_modinfo(struct module
*mod
, struct load_info
*info
, int flags
)
2864 const char *modmagic
= get_modinfo(info
, "vermagic");
2867 if (flags
& MODULE_INIT_IGNORE_VERMAGIC
)
2870 /* This is allowed: modprobe --force will invalidate it. */
2872 err
= try_to_force_load(mod
, "bad vermagic");
2875 } else if (!same_magic(modmagic
, vermagic
, info
->index
.vers
)) {
2876 pr_err("%s: version magic '%s' should be '%s'\n",
2877 mod
->name
, modmagic
, vermagic
);
2881 if (!get_modinfo(info
, "intree"))
2882 add_taint_module(mod
, TAINT_OOT_MODULE
, LOCKDEP_STILL_OK
);
2884 if (get_modinfo(info
, "staging")) {
2885 add_taint_module(mod
, TAINT_CRAP
, LOCKDEP_STILL_OK
);
2886 pr_warn("%s: module is from the staging directory, the quality "
2887 "is unknown, you have been warned.\n", mod
->name
);
2890 /* Set up license info based on the info section */
2891 set_license(mod
, get_modinfo(info
, "license"));
2896 static int find_module_sections(struct module
*mod
, struct load_info
*info
)
2898 mod
->kp
= section_objs(info
, "__param",
2899 sizeof(*mod
->kp
), &mod
->num_kp
);
2900 mod
->syms
= section_objs(info
, "__ksymtab",
2901 sizeof(*mod
->syms
), &mod
->num_syms
);
2902 mod
->crcs
= section_addr(info
, "__kcrctab");
2903 mod
->gpl_syms
= section_objs(info
, "__ksymtab_gpl",
2904 sizeof(*mod
->gpl_syms
),
2905 &mod
->num_gpl_syms
);
2906 mod
->gpl_crcs
= section_addr(info
, "__kcrctab_gpl");
2907 mod
->gpl_future_syms
= section_objs(info
,
2908 "__ksymtab_gpl_future",
2909 sizeof(*mod
->gpl_future_syms
),
2910 &mod
->num_gpl_future_syms
);
2911 mod
->gpl_future_crcs
= section_addr(info
, "__kcrctab_gpl_future");
2913 #ifdef CONFIG_UNUSED_SYMBOLS
2914 mod
->unused_syms
= section_objs(info
, "__ksymtab_unused",
2915 sizeof(*mod
->unused_syms
),
2916 &mod
->num_unused_syms
);
2917 mod
->unused_crcs
= section_addr(info
, "__kcrctab_unused");
2918 mod
->unused_gpl_syms
= section_objs(info
, "__ksymtab_unused_gpl",
2919 sizeof(*mod
->unused_gpl_syms
),
2920 &mod
->num_unused_gpl_syms
);
2921 mod
->unused_gpl_crcs
= section_addr(info
, "__kcrctab_unused_gpl");
2923 #ifdef CONFIG_CONSTRUCTORS
2924 mod
->ctors
= section_objs(info
, ".ctors",
2925 sizeof(*mod
->ctors
), &mod
->num_ctors
);
2927 mod
->ctors
= section_objs(info
, ".init_array",
2928 sizeof(*mod
->ctors
), &mod
->num_ctors
);
2929 else if (find_sec(info
, ".init_array")) {
2931 * This shouldn't happen with same compiler and binutils
2932 * building all parts of the module.
2934 pr_warn("%s: has both .ctors and .init_array.\n",
2940 #ifdef CONFIG_TRACEPOINTS
2941 mod
->tracepoints_ptrs
= section_objs(info
, "__tracepoints_ptrs",
2942 sizeof(*mod
->tracepoints_ptrs
),
2943 &mod
->num_tracepoints
);
2945 #ifdef HAVE_JUMP_LABEL
2946 mod
->jump_entries
= section_objs(info
, "__jump_table",
2947 sizeof(*mod
->jump_entries
),
2948 &mod
->num_jump_entries
);
2950 #ifdef CONFIG_EVENT_TRACING
2951 mod
->trace_events
= section_objs(info
, "_ftrace_events",
2952 sizeof(*mod
->trace_events
),
2953 &mod
->num_trace_events
);
2954 mod
->trace_enums
= section_objs(info
, "_ftrace_enum_map",
2955 sizeof(*mod
->trace_enums
),
2956 &mod
->num_trace_enums
);
2958 #ifdef CONFIG_TRACING
2959 mod
->trace_bprintk_fmt_start
= section_objs(info
, "__trace_printk_fmt",
2960 sizeof(*mod
->trace_bprintk_fmt_start
),
2961 &mod
->num_trace_bprintk_fmt
);
2963 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2964 /* sechdrs[0].sh_size is always zero */
2965 mod
->ftrace_callsites
= section_objs(info
, "__mcount_loc",
2966 sizeof(*mod
->ftrace_callsites
),
2967 &mod
->num_ftrace_callsites
);
2970 mod
->extable
= section_objs(info
, "__ex_table",
2971 sizeof(*mod
->extable
), &mod
->num_exentries
);
2973 if (section_addr(info
, "__obsparm"))
2974 pr_warn("%s: Ignoring obsolete parameters\n", mod
->name
);
2976 info
->debug
= section_objs(info
, "__verbose",
2977 sizeof(*info
->debug
), &info
->num_debug
);
2982 static int move_module(struct module
*mod
, struct load_info
*info
)
2987 /* Do the allocs. */
2988 ptr
= module_alloc(mod
->core_size
);
2990 * The pointer to this block is stored in the module structure
2991 * which is inside the block. Just mark it as not being a
2994 kmemleak_not_leak(ptr
);
2998 memset(ptr
, 0, mod
->core_size
);
2999 mod
->module_core
= ptr
;
3001 if (mod
->init_size
) {
3002 ptr
= module_alloc(mod
->init_size
);
3004 * The pointer to this block is stored in the module structure
3005 * which is inside the block. This block doesn't need to be
3006 * scanned as it contains data and code that will be freed
3007 * after the module is initialized.
3009 kmemleak_ignore(ptr
);
3011 module_memfree(mod
->module_core
);
3014 memset(ptr
, 0, mod
->init_size
);
3015 mod
->module_init
= ptr
;
3017 mod
->module_init
= NULL
;
3019 /* Transfer each section which specifies SHF_ALLOC */
3020 pr_debug("final section addresses:\n");
3021 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++) {
3023 Elf_Shdr
*shdr
= &info
->sechdrs
[i
];
3025 if (!(shdr
->sh_flags
& SHF_ALLOC
))
3028 if (shdr
->sh_entsize
& INIT_OFFSET_MASK
)
3029 dest
= mod
->module_init
3030 + (shdr
->sh_entsize
& ~INIT_OFFSET_MASK
);
3032 dest
= mod
->module_core
+ shdr
->sh_entsize
;
3034 if (shdr
->sh_type
!= SHT_NOBITS
)
3035 memcpy(dest
, (void *)shdr
->sh_addr
, shdr
->sh_size
);
3036 /* Update sh_addr to point to copy in image. */
3037 shdr
->sh_addr
= (unsigned long)dest
;
3038 pr_debug("\t0x%lx %s\n",
3039 (long)shdr
->sh_addr
, info
->secstrings
+ shdr
->sh_name
);
3045 static int check_module_license_and_versions(struct module
*mod
)
3048 * ndiswrapper is under GPL by itself, but loads proprietary modules.
3049 * Don't use add_taint_module(), as it would prevent ndiswrapper from
3050 * using GPL-only symbols it needs.
3052 if (strcmp(mod
->name
, "ndiswrapper") == 0)
3053 add_taint(TAINT_PROPRIETARY_MODULE
, LOCKDEP_NOW_UNRELIABLE
);
3055 /* driverloader was caught wrongly pretending to be under GPL */
3056 if (strcmp(mod
->name
, "driverloader") == 0)
3057 add_taint_module(mod
, TAINT_PROPRIETARY_MODULE
,
3058 LOCKDEP_NOW_UNRELIABLE
);
3060 /* lve claims to be GPL but upstream won't provide source */
3061 if (strcmp(mod
->name
, "lve") == 0)
3062 add_taint_module(mod
, TAINT_PROPRIETARY_MODULE
,
3063 LOCKDEP_NOW_UNRELIABLE
);
3065 #ifdef CONFIG_MODVERSIONS
3066 if ((mod
->num_syms
&& !mod
->crcs
)
3067 || (mod
->num_gpl_syms
&& !mod
->gpl_crcs
)
3068 || (mod
->num_gpl_future_syms
&& !mod
->gpl_future_crcs
)
3069 #ifdef CONFIG_UNUSED_SYMBOLS
3070 || (mod
->num_unused_syms
&& !mod
->unused_crcs
)
3071 || (mod
->num_unused_gpl_syms
&& !mod
->unused_gpl_crcs
)
3074 return try_to_force_load(mod
,
3075 "no versions for exported symbols");
3081 static void flush_module_icache(const struct module
*mod
)
3083 mm_segment_t old_fs
;
3085 /* flush the icache in correct context */
3090 * Flush the instruction cache, since we've played with text.
3091 * Do it before processing of module parameters, so the module
3092 * can provide parameter accessor functions of its own.
3094 if (mod
->module_init
)
3095 flush_icache_range((unsigned long)mod
->module_init
,
3096 (unsigned long)mod
->module_init
3098 flush_icache_range((unsigned long)mod
->module_core
,
3099 (unsigned long)mod
->module_core
+ mod
->core_size
);
3104 int __weak
module_frob_arch_sections(Elf_Ehdr
*hdr
,
3112 static struct module
*layout_and_allocate(struct load_info
*info
, int flags
)
3114 /* Module within temporary copy. */
3118 mod
= setup_load_info(info
, flags
);
3122 err
= check_modinfo(mod
, info
, flags
);
3124 return ERR_PTR(err
);
3126 /* Allow arches to frob section contents and sizes. */
3127 err
= module_frob_arch_sections(info
->hdr
, info
->sechdrs
,
3128 info
->secstrings
, mod
);
3130 return ERR_PTR(err
);
3132 /* We will do a special allocation for per-cpu sections later. */
3133 info
->sechdrs
[info
->index
.pcpu
].sh_flags
&= ~(unsigned long)SHF_ALLOC
;
3135 /* Determine total sizes, and put offsets in sh_entsize. For now
3136 this is done generically; there doesn't appear to be any
3137 special cases for the architectures. */
3138 layout_sections(mod
, info
);
3139 layout_symtab(mod
, info
);
3141 /* Allocate and move to the final place */
3142 err
= move_module(mod
, info
);
3144 return ERR_PTR(err
);
3146 /* Module has been copied to its final place now: return it. */
3147 mod
= (void *)info
->sechdrs
[info
->index
.mod
].sh_addr
;
3148 kmemleak_load_module(mod
, info
);
3152 /* mod is no longer valid after this! */
3153 static void module_deallocate(struct module
*mod
, struct load_info
*info
)
3155 percpu_modfree(mod
);
3156 module_arch_freeing_init(mod
);
3157 module_memfree(mod
->module_init
);
3158 module_memfree(mod
->module_core
);
3161 int __weak
module_finalize(const Elf_Ehdr
*hdr
,
3162 const Elf_Shdr
*sechdrs
,
3168 static int post_relocation(struct module
*mod
, const struct load_info
*info
)
3170 /* Sort exception table now relocations are done. */
3171 sort_extable(mod
->extable
, mod
->extable
+ mod
->num_exentries
);
3173 /* Copy relocated percpu area over. */
3174 percpu_modcopy(mod
, (void *)info
->sechdrs
[info
->index
.pcpu
].sh_addr
,
3175 info
->sechdrs
[info
->index
.pcpu
].sh_size
);
3177 /* Setup kallsyms-specific fields. */
3178 add_kallsyms(mod
, info
);
3180 /* Arch-specific module finalizing. */
3181 return module_finalize(info
->hdr
, info
->sechdrs
, mod
);
3184 /* Is this module of this name done loading? No locks held. */
3185 static bool finished_loading(const char *name
)
3191 * The module_mutex should not be a heavily contended lock;
3192 * if we get the occasional sleep here, we'll go an extra iteration
3193 * in the wait_event_interruptible(), which is harmless.
3195 sched_annotate_sleep();
3196 mutex_lock(&module_mutex
);
3197 mod
= find_module_all(name
, strlen(name
), true);
3198 ret
= !mod
|| mod
->state
== MODULE_STATE_LIVE
3199 || mod
->state
== MODULE_STATE_GOING
;
3200 mutex_unlock(&module_mutex
);
3205 /* Call module constructors. */
3206 static void do_mod_ctors(struct module
*mod
)
3208 #ifdef CONFIG_CONSTRUCTORS
3211 for (i
= 0; i
< mod
->num_ctors
; i
++)
3216 /* For freeing module_init on success, in case kallsyms traversing */
3217 struct mod_initfree
{
3218 struct rcu_head rcu
;
3222 static void do_free_init(struct rcu_head
*head
)
3224 struct mod_initfree
*m
= container_of(head
, struct mod_initfree
, rcu
);
3225 module_memfree(m
->module_init
);
3230 * This is where the real work happens.
3232 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
3233 * helper command 'lx-symbols'.
3235 static noinline
int do_init_module(struct module
*mod
)
3238 struct mod_initfree
*freeinit
;
3240 freeinit
= kmalloc(sizeof(*freeinit
), GFP_KERNEL
);
3245 freeinit
->module_init
= mod
->module_init
;
3248 * We want to find out whether @mod uses async during init. Clear
3249 * PF_USED_ASYNC. async_schedule*() will set it.
3251 current
->flags
&= ~PF_USED_ASYNC
;
3254 /* Start the module */
3255 if (mod
->init
!= NULL
)
3256 ret
= do_one_initcall(mod
->init
);
3258 goto fail_free_freeinit
;
3261 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3262 "follow 0/-E convention\n"
3263 "%s: loading module anyway...\n",
3264 __func__
, mod
->name
, ret
, __func__
);
3268 /* Now it's a first class citizen! */
3269 mod
->state
= MODULE_STATE_LIVE
;
3270 blocking_notifier_call_chain(&module_notify_list
,
3271 MODULE_STATE_LIVE
, mod
);
3274 * We need to finish all async code before the module init sequence
3275 * is done. This has potential to deadlock. For example, a newly
3276 * detected block device can trigger request_module() of the
3277 * default iosched from async probing task. Once userland helper
3278 * reaches here, async_synchronize_full() will wait on the async
3279 * task waiting on request_module() and deadlock.
3281 * This deadlock is avoided by perfomring async_synchronize_full()
3282 * iff module init queued any async jobs. This isn't a full
3283 * solution as it will deadlock the same if module loading from
3284 * async jobs nests more than once; however, due to the various
3285 * constraints, this hack seems to be the best option for now.
3286 * Please refer to the following thread for details.
3288 * http://thread.gmane.org/gmane.linux.kernel/1420814
3290 if (current
->flags
& PF_USED_ASYNC
)
3291 async_synchronize_full();
3293 mutex_lock(&module_mutex
);
3294 /* Drop initial reference. */
3296 trim_init_extable(mod
);
3297 #ifdef CONFIG_KALLSYMS
3298 mod
->num_symtab
= mod
->core_num_syms
;
3299 mod
->symtab
= mod
->core_symtab
;
3300 mod
->strtab
= mod
->core_strtab
;
3302 mod_tree_remove_init(mod
);
3303 unset_module_init_ro_nx(mod
);
3304 module_arch_freeing_init(mod
);
3305 mod
->module_init
= NULL
;
3307 mod
->init_ro_size
= 0;
3308 mod
->init_text_size
= 0;
3310 * We want to free module_init, but be aware that kallsyms may be
3311 * walking this with preempt disabled. In all the failure paths, we
3312 * call synchronize_sched(), but we don't want to slow down the success
3313 * path, so use actual RCU here.
3315 call_rcu_sched(&freeinit
->rcu
, do_free_init
);
3316 mutex_unlock(&module_mutex
);
3317 wake_up_all(&module_wq
);
3324 /* Try to protect us from buggy refcounters. */
3325 mod
->state
= MODULE_STATE_GOING
;
3326 synchronize_sched();
3328 blocking_notifier_call_chain(&module_notify_list
,
3329 MODULE_STATE_GOING
, mod
);
3331 wake_up_all(&module_wq
);
3335 static int may_init_module(void)
3337 if (!capable(CAP_SYS_MODULE
) || modules_disabled
)
3344 * We try to place it in the list now to make sure it's unique before
3345 * we dedicate too many resources. In particular, temporary percpu
3346 * memory exhaustion.
3348 static int add_unformed_module(struct module
*mod
)
3353 mod
->state
= MODULE_STATE_UNFORMED
;
3356 mutex_lock(&module_mutex
);
3357 old
= find_module_all(mod
->name
, strlen(mod
->name
), true);
3359 if (old
->state
== MODULE_STATE_COMING
3360 || old
->state
== MODULE_STATE_UNFORMED
) {
3361 /* Wait in case it fails to load. */
3362 mutex_unlock(&module_mutex
);
3363 err
= wait_event_interruptible(module_wq
,
3364 finished_loading(mod
->name
));
3372 mod_update_bounds(mod
);
3373 list_add_rcu(&mod
->list
, &modules
);
3374 mod_tree_insert(mod
);
3378 mutex_unlock(&module_mutex
);
3383 static int complete_formation(struct module
*mod
, struct load_info
*info
)
3387 mutex_lock(&module_mutex
);
3389 /* Find duplicate symbols (must be called under lock). */
3390 err
= verify_export_symbols(mod
);
3394 /* This relies on module_mutex for list integrity. */
3395 module_bug_finalize(info
->hdr
, info
->sechdrs
, mod
);
3397 /* Set RO and NX regions for core */
3398 set_section_ro_nx(mod
->module_core
,
3399 mod
->core_text_size
,
3403 /* Set RO and NX regions for init */
3404 set_section_ro_nx(mod
->module_init
,
3405 mod
->init_text_size
,
3409 /* Mark state as coming so strong_try_module_get() ignores us,
3410 * but kallsyms etc. can see us. */
3411 mod
->state
= MODULE_STATE_COMING
;
3412 mutex_unlock(&module_mutex
);
3414 blocking_notifier_call_chain(&module_notify_list
,
3415 MODULE_STATE_COMING
, mod
);
3419 mutex_unlock(&module_mutex
);
3423 static int unknown_module_param_cb(char *param
, char *val
, const char *modname
)
3425 /* Check for magic 'dyndbg' arg */
3426 int ret
= ddebug_dyndbg_module_param_cb(param
, val
, modname
);
3428 pr_warn("%s: unknown parameter '%s' ignored\n", modname
, param
);
3432 /* Allocate and load the module: note that size of section 0 is always
3433 zero, and we rely on this for optional sections. */
3434 static int load_module(struct load_info
*info
, const char __user
*uargs
,
3441 err
= module_sig_check(info
);
3445 err
= elf_header_check(info
);
3449 /* Figure out module layout, and allocate all the memory. */
3450 mod
= layout_and_allocate(info
, flags
);
3456 /* Reserve our place in the list. */
3457 err
= add_unformed_module(mod
);
3461 #ifdef CONFIG_MODULE_SIG
3462 mod
->sig_ok
= info
->sig_ok
;
3464 pr_notice_once("%s: module verification failed: signature "
3465 "and/or required key missing - tainting "
3466 "kernel\n", mod
->name
);
3467 add_taint_module(mod
, TAINT_UNSIGNED_MODULE
, LOCKDEP_STILL_OK
);
3471 /* To avoid stressing percpu allocator, do this once we're unique. */
3472 err
= percpu_modalloc(mod
, info
);
3476 /* Now module is in final location, initialize linked lists, etc. */
3477 err
= module_unload_init(mod
);
3481 /* Now we've got everything in the final locations, we can
3482 * find optional sections. */
3483 err
= find_module_sections(mod
, info
);
3487 err
= check_module_license_and_versions(mod
);
3491 /* Set up MODINFO_ATTR fields */
3492 setup_modinfo(mod
, info
);
3494 /* Fix up syms, so that st_value is a pointer to location. */
3495 err
= simplify_symbols(mod
, info
);
3499 err
= apply_relocations(mod
, info
);
3503 err
= post_relocation(mod
, info
);
3507 flush_module_icache(mod
);
3509 /* Now copy in args */
3510 mod
->args
= strndup_user(uargs
, ~0UL >> 1);
3511 if (IS_ERR(mod
->args
)) {
3512 err
= PTR_ERR(mod
->args
);
3513 goto free_arch_cleanup
;
3516 dynamic_debug_setup(info
->debug
, info
->num_debug
);
3518 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3519 ftrace_module_init(mod
);
3521 /* Finally it's fully formed, ready to start executing. */
3522 err
= complete_formation(mod
, info
);
3524 goto ddebug_cleanup
;
3526 /* Module is ready to execute: parsing args may do that. */
3527 after_dashes
= parse_args(mod
->name
, mod
->args
, mod
->kp
, mod
->num_kp
,
3528 -32768, 32767, unknown_module_param_cb
);
3529 if (IS_ERR(after_dashes
)) {
3530 err
= PTR_ERR(after_dashes
);
3532 } else if (after_dashes
) {
3533 pr_warn("%s: parameters '%s' after `--' ignored\n",
3534 mod
->name
, after_dashes
);
3537 /* Link in to syfs. */
3538 err
= mod_sysfs_setup(mod
, info
, mod
->kp
, mod
->num_kp
);
3542 /* Get rid of temporary copy. */
3546 trace_module_load(mod
);
3548 return do_init_module(mod
);
3551 /* module_bug_cleanup needs module_mutex protection */
3552 mutex_lock(&module_mutex
);
3553 module_bug_cleanup(mod
);
3554 mutex_unlock(&module_mutex
);
3556 /* we can't deallocate the module until we clear memory protection */
3557 unset_module_init_ro_nx(mod
);
3558 unset_module_core_ro_nx(mod
);
3561 dynamic_debug_remove(info
->debug
);
3562 synchronize_sched();
3565 module_arch_cleanup(mod
);
3569 module_unload_free(mod
);
3571 mutex_lock(&module_mutex
);
3572 /* Unlink carefully: kallsyms could be walking list. */
3573 list_del_rcu(&mod
->list
);
3574 wake_up_all(&module_wq
);
3575 /* Wait for RCU-sched synchronizing before releasing mod->list. */
3576 synchronize_sched();
3577 mutex_unlock(&module_mutex
);
3579 /* Free lock-classes; relies on the preceding sync_rcu() */
3580 lockdep_free_key_range(mod
->module_core
, mod
->core_size
);
3582 module_deallocate(mod
, info
);
3588 SYSCALL_DEFINE3(init_module
, void __user
*, umod
,
3589 unsigned long, len
, const char __user
*, uargs
)
3592 struct load_info info
= { };
3594 err
= may_init_module();
3598 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3601 err
= copy_module_from_user(umod
, len
, &info
);
3605 return load_module(&info
, uargs
, 0);
3608 SYSCALL_DEFINE3(finit_module
, int, fd
, const char __user
*, uargs
, int, flags
)
3611 struct load_info info
= { };
3613 err
= may_init_module();
3617 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd
, uargs
, flags
);
3619 if (flags
& ~(MODULE_INIT_IGNORE_MODVERSIONS
3620 |MODULE_INIT_IGNORE_VERMAGIC
))
3623 err
= copy_module_from_fd(fd
, &info
);
3627 return load_module(&info
, uargs
, flags
);
3630 static inline int within(unsigned long addr
, void *start
, unsigned long size
)
3632 return ((void *)addr
>= start
&& (void *)addr
< start
+ size
);
3635 #ifdef CONFIG_KALLSYMS
3637 * This ignores the intensely annoying "mapping symbols" found
3638 * in ARM ELF files: $a, $t and $d.
3640 static inline int is_arm_mapping_symbol(const char *str
)
3642 if (str
[0] == '.' && str
[1] == 'L')
3644 return str
[0] == '$' && strchr("axtd", str
[1])
3645 && (str
[2] == '\0' || str
[2] == '.');
3648 static const char *get_ksymbol(struct module
*mod
,
3650 unsigned long *size
,
3651 unsigned long *offset
)
3653 unsigned int i
, best
= 0;
3654 unsigned long nextval
;
3656 /* At worse, next value is at end of module */
3657 if (within_module_init(addr
, mod
))
3658 nextval
= (unsigned long)mod
->module_init
+mod
->init_text_size
;
3660 nextval
= (unsigned long)mod
->module_core
+mod
->core_text_size
;
3662 /* Scan for closest preceding symbol, and next symbol. (ELF
3663 starts real symbols at 1). */
3664 for (i
= 1; i
< mod
->num_symtab
; i
++) {
3665 if (mod
->symtab
[i
].st_shndx
== SHN_UNDEF
)
3668 /* We ignore unnamed symbols: they're uninformative
3669 * and inserted at a whim. */
3670 if (mod
->symtab
[i
].st_value
<= addr
3671 && mod
->symtab
[i
].st_value
> mod
->symtab
[best
].st_value
3672 && *(mod
->strtab
+ mod
->symtab
[i
].st_name
) != '\0'
3673 && !is_arm_mapping_symbol(mod
->strtab
+ mod
->symtab
[i
].st_name
))
3675 if (mod
->symtab
[i
].st_value
> addr
3676 && mod
->symtab
[i
].st_value
< nextval
3677 && *(mod
->strtab
+ mod
->symtab
[i
].st_name
) != '\0'
3678 && !is_arm_mapping_symbol(mod
->strtab
+ mod
->symtab
[i
].st_name
))
3679 nextval
= mod
->symtab
[i
].st_value
;
3686 *size
= nextval
- mod
->symtab
[best
].st_value
;
3688 *offset
= addr
- mod
->symtab
[best
].st_value
;
3689 return mod
->strtab
+ mod
->symtab
[best
].st_name
;
3692 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3693 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3694 const char *module_address_lookup(unsigned long addr
,
3695 unsigned long *size
,
3696 unsigned long *offset
,
3700 const char *ret
= NULL
;
3704 mod
= __module_address(addr
);
3707 *modname
= mod
->name
;
3708 ret
= get_ksymbol(mod
, addr
, size
, offset
);
3710 /* Make a copy in here where it's safe */
3712 strncpy(namebuf
, ret
, KSYM_NAME_LEN
- 1);
3720 int lookup_module_symbol_name(unsigned long addr
, char *symname
)
3725 list_for_each_entry_rcu(mod
, &modules
, list
) {
3726 if (mod
->state
== MODULE_STATE_UNFORMED
)
3728 if (within_module(addr
, mod
)) {
3731 sym
= get_ksymbol(mod
, addr
, NULL
, NULL
);
3734 strlcpy(symname
, sym
, KSYM_NAME_LEN
);
3744 int lookup_module_symbol_attrs(unsigned long addr
, unsigned long *size
,
3745 unsigned long *offset
, char *modname
, char *name
)
3750 list_for_each_entry_rcu(mod
, &modules
, list
) {
3751 if (mod
->state
== MODULE_STATE_UNFORMED
)
3753 if (within_module(addr
, mod
)) {
3756 sym
= get_ksymbol(mod
, addr
, size
, offset
);
3760 strlcpy(modname
, mod
->name
, MODULE_NAME_LEN
);
3762 strlcpy(name
, sym
, KSYM_NAME_LEN
);
3772 int module_get_kallsym(unsigned int symnum
, unsigned long *value
, char *type
,
3773 char *name
, char *module_name
, int *exported
)
3778 list_for_each_entry_rcu(mod
, &modules
, list
) {
3779 if (mod
->state
== MODULE_STATE_UNFORMED
)
3781 if (symnum
< mod
->num_symtab
) {
3782 *value
= mod
->symtab
[symnum
].st_value
;
3783 *type
= mod
->symtab
[symnum
].st_info
;
3784 strlcpy(name
, mod
->strtab
+ mod
->symtab
[symnum
].st_name
,
3786 strlcpy(module_name
, mod
->name
, MODULE_NAME_LEN
);
3787 *exported
= is_exported(name
, *value
, mod
);
3791 symnum
-= mod
->num_symtab
;
3797 static unsigned long mod_find_symname(struct module
*mod
, const char *name
)
3801 for (i
= 0; i
< mod
->num_symtab
; i
++)
3802 if (strcmp(name
, mod
->strtab
+mod
->symtab
[i
].st_name
) == 0 &&
3803 mod
->symtab
[i
].st_info
!= 'U')
3804 return mod
->symtab
[i
].st_value
;
3808 /* Look for this name: can be of form module:name. */
3809 unsigned long module_kallsyms_lookup_name(const char *name
)
3813 unsigned long ret
= 0;
3815 /* Don't lock: we're in enough trouble already. */
3817 if ((colon
= strchr(name
, ':')) != NULL
) {
3818 if ((mod
= find_module_all(name
, colon
- name
, false)) != NULL
)
3819 ret
= mod_find_symname(mod
, colon
+1);
3821 list_for_each_entry_rcu(mod
, &modules
, list
) {
3822 if (mod
->state
== MODULE_STATE_UNFORMED
)
3824 if ((ret
= mod_find_symname(mod
, name
)) != 0)
3832 int module_kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
3833 struct module
*, unsigned long),
3840 module_assert_mutex();
3842 list_for_each_entry(mod
, &modules
, list
) {
3843 if (mod
->state
== MODULE_STATE_UNFORMED
)
3845 for (i
= 0; i
< mod
->num_symtab
; i
++) {
3846 ret
= fn(data
, mod
->strtab
+ mod
->symtab
[i
].st_name
,
3847 mod
, mod
->symtab
[i
].st_value
);
3854 #endif /* CONFIG_KALLSYMS */
3856 static char *module_flags(struct module
*mod
, char *buf
)
3860 BUG_ON(mod
->state
== MODULE_STATE_UNFORMED
);
3862 mod
->state
== MODULE_STATE_GOING
||
3863 mod
->state
== MODULE_STATE_COMING
) {
3865 bx
+= module_flags_taint(mod
, buf
+ bx
);
3866 /* Show a - for module-is-being-unloaded */
3867 if (mod
->state
== MODULE_STATE_GOING
)
3869 /* Show a + for module-is-being-loaded */
3870 if (mod
->state
== MODULE_STATE_COMING
)
3879 #ifdef CONFIG_PROC_FS
3880 /* Called by the /proc file system to return a list of modules. */
3881 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
3883 mutex_lock(&module_mutex
);
3884 return seq_list_start(&modules
, *pos
);
3887 static void *m_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
3889 return seq_list_next(p
, &modules
, pos
);
3892 static void m_stop(struct seq_file
*m
, void *p
)
3894 mutex_unlock(&module_mutex
);
3897 static int m_show(struct seq_file
*m
, void *p
)
3899 struct module
*mod
= list_entry(p
, struct module
, list
);
3902 /* We always ignore unformed modules. */
3903 if (mod
->state
== MODULE_STATE_UNFORMED
)
3906 seq_printf(m
, "%s %u",
3907 mod
->name
, mod
->init_size
+ mod
->core_size
);
3908 print_unload_info(m
, mod
);
3910 /* Informative for users. */
3911 seq_printf(m
, " %s",
3912 mod
->state
== MODULE_STATE_GOING
? "Unloading" :
3913 mod
->state
== MODULE_STATE_COMING
? "Loading" :
3915 /* Used by oprofile and other similar tools. */
3916 seq_printf(m
, " 0x%pK", mod
->module_core
);
3920 seq_printf(m
, " %s", module_flags(mod
, buf
));
3926 /* Format: modulename size refcount deps address
3928 Where refcount is a number or -, and deps is a comma-separated list
3931 static const struct seq_operations modules_op
= {
3938 static int modules_open(struct inode
*inode
, struct file
*file
)
3940 return seq_open(file
, &modules_op
);
3943 static const struct file_operations proc_modules_operations
= {
3944 .open
= modules_open
,
3946 .llseek
= seq_lseek
,
3947 .release
= seq_release
,
3950 static int __init
proc_modules_init(void)
3952 proc_create("modules", 0, NULL
, &proc_modules_operations
);
3955 module_init(proc_modules_init
);
3958 /* Given an address, look for it in the module exception tables. */
3959 const struct exception_table_entry
*search_module_extables(unsigned long addr
)
3961 const struct exception_table_entry
*e
= NULL
;
3965 list_for_each_entry_rcu(mod
, &modules
, list
) {
3966 if (mod
->state
== MODULE_STATE_UNFORMED
)
3968 if (mod
->num_exentries
== 0)
3971 e
= search_extable(mod
->extable
,
3972 mod
->extable
+ mod
->num_exentries
- 1,
3979 /* Now, if we found one, we are running inside it now, hence
3980 we cannot unload the module, hence no refcnt needed. */
3985 * is_module_address - is this address inside a module?
3986 * @addr: the address to check.
3988 * See is_module_text_address() if you simply want to see if the address
3989 * is code (not data).
3991 bool is_module_address(unsigned long addr
)
3996 ret
= __module_address(addr
) != NULL
;
4003 * __module_address - get the module which contains an address.
4004 * @addr: the address.
4006 * Must be called with preempt disabled or module mutex held so that
4007 * module doesn't get freed during this.
4009 struct module
*__module_address(unsigned long addr
)
4013 if (addr
< module_addr_min
|| addr
> module_addr_max
)
4016 module_assert_mutex_or_preempt();
4018 mod
= mod_find(addr
);
4020 BUG_ON(!within_module(addr
, mod
));
4021 if (mod
->state
== MODULE_STATE_UNFORMED
)
4026 EXPORT_SYMBOL_GPL(__module_address
);
4029 * is_module_text_address - is this address inside module code?
4030 * @addr: the address to check.
4032 * See is_module_address() if you simply want to see if the address is
4033 * anywhere in a module. See kernel_text_address() for testing if an
4034 * address corresponds to kernel or module code.
4036 bool is_module_text_address(unsigned long addr
)
4041 ret
= __module_text_address(addr
) != NULL
;
4048 * __module_text_address - get the module whose code contains an address.
4049 * @addr: the address.
4051 * Must be called with preempt disabled or module mutex held so that
4052 * module doesn't get freed during this.
4054 struct module
*__module_text_address(unsigned long addr
)
4056 struct module
*mod
= __module_address(addr
);
4058 /* Make sure it's within the text section. */
4059 if (!within(addr
, mod
->module_init
, mod
->init_text_size
)
4060 && !within(addr
, mod
->module_core
, mod
->core_text_size
))
4065 EXPORT_SYMBOL_GPL(__module_text_address
);
4067 /* Don't grab lock, we're oopsing. */
4068 void print_modules(void)
4073 printk(KERN_DEFAULT
"Modules linked in:");
4074 /* Most callers should already have preempt disabled, but make sure */
4076 list_for_each_entry_rcu(mod
, &modules
, list
) {
4077 if (mod
->state
== MODULE_STATE_UNFORMED
)
4079 pr_cont(" %s%s", mod
->name
, module_flags(mod
, buf
));
4082 if (last_unloaded_module
[0])
4083 pr_cont(" [last unloaded: %s]", last_unloaded_module
);
4087 #ifdef CONFIG_MODVERSIONS
4088 /* Generate the signature for all relevant module structures here.
4089 * If these change, we don't want to try to parse the module. */
4090 void module_layout(struct module
*mod
,
4091 struct modversion_info
*ver
,
4092 struct kernel_param
*kp
,
4093 struct kernel_symbol
*ks
,
4094 struct tracepoint
* const *tp
)
4097 EXPORT_SYMBOL(module_layout
);