]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/module.c
crypto: s390 - fix aes,des ctr mode concurrency finding.
[mirror_ubuntu-bionic-kernel.git] / kernel / module.c
1 /*
2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/ftrace_event.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/sysfs.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/elf.h>
31 #include <linux/proc_fs.h>
32 #include <linux/security.h>
33 #include <linux/seq_file.h>
34 #include <linux/syscalls.h>
35 #include <linux/fcntl.h>
36 #include <linux/rcupdate.h>
37 #include <linux/capability.h>
38 #include <linux/cpu.h>
39 #include <linux/moduleparam.h>
40 #include <linux/errno.h>
41 #include <linux/err.h>
42 #include <linux/vermagic.h>
43 #include <linux/notifier.h>
44 #include <linux/sched.h>
45 #include <linux/stop_machine.h>
46 #include <linux/device.h>
47 #include <linux/string.h>
48 #include <linux/mutex.h>
49 #include <linux/rculist.h>
50 #include <asm/uaccess.h>
51 #include <asm/cacheflush.h>
52 #include <asm/mmu_context.h>
53 #include <linux/license.h>
54 #include <asm/sections.h>
55 #include <linux/tracepoint.h>
56 #include <linux/ftrace.h>
57 #include <linux/async.h>
58 #include <linux/percpu.h>
59 #include <linux/kmemleak.h>
60 #include <linux/jump_label.h>
61 #include <linux/pfn.h>
62 #include <linux/bsearch.h>
63 #include <linux/fips.h>
64 #include <uapi/linux/module.h>
65 #include "module-internal.h"
66
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/module.h>
69
70 #ifndef ARCH_SHF_SMALL
71 #define ARCH_SHF_SMALL 0
72 #endif
73
74 /*
75 * Modules' sections will be aligned on page boundaries
76 * to ensure complete separation of code and data, but
77 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
78 */
79 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
80 # define debug_align(X) ALIGN(X, PAGE_SIZE)
81 #else
82 # define debug_align(X) (X)
83 #endif
84
85 /*
86 * Given BASE and SIZE this macro calculates the number of pages the
87 * memory regions occupies
88 */
89 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
90 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
91 PFN_DOWN((unsigned long)BASE) + 1) \
92 : (0UL))
93
94 /* If this is set, the section belongs in the init part of the module */
95 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
96
97 /*
98 * Mutex protects:
99 * 1) List of modules (also safely readable with preempt_disable),
100 * 2) module_use links,
101 * 3) module_addr_min/module_addr_max.
102 * (delete uses stop_machine/add uses RCU list operations). */
103 DEFINE_MUTEX(module_mutex);
104 EXPORT_SYMBOL_GPL(module_mutex);
105 static LIST_HEAD(modules);
106 #ifdef CONFIG_KGDB_KDB
107 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
108 #endif /* CONFIG_KGDB_KDB */
109
110 #ifdef CONFIG_MODULE_SIG
111 #ifdef CONFIG_MODULE_SIG_FORCE
112 static bool sig_enforce = true;
113 #else
114 static bool sig_enforce = false;
115
116 static int param_set_bool_enable_only(const char *val,
117 const struct kernel_param *kp)
118 {
119 int err;
120 bool test;
121 struct kernel_param dummy_kp = *kp;
122
123 dummy_kp.arg = &test;
124
125 err = param_set_bool(val, &dummy_kp);
126 if (err)
127 return err;
128
129 /* Don't let them unset it once it's set! */
130 if (!test && sig_enforce)
131 return -EROFS;
132
133 if (test)
134 sig_enforce = true;
135 return 0;
136 }
137
138 static const struct kernel_param_ops param_ops_bool_enable_only = {
139 .flags = KERNEL_PARAM_FL_NOARG,
140 .set = param_set_bool_enable_only,
141 .get = param_get_bool,
142 };
143 #define param_check_bool_enable_only param_check_bool
144
145 module_param(sig_enforce, bool_enable_only, 0644);
146 #endif /* !CONFIG_MODULE_SIG_FORCE */
147 #endif /* CONFIG_MODULE_SIG */
148
149 /* Block module loading/unloading? */
150 int modules_disabled = 0;
151 core_param(nomodule, modules_disabled, bint, 0);
152
153 /* Waiting for a module to finish initializing? */
154 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
155
156 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
157
158 /* Bounds of module allocation, for speeding __module_address.
159 * Protected by module_mutex. */
160 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
161
162 int register_module_notifier(struct notifier_block * nb)
163 {
164 return blocking_notifier_chain_register(&module_notify_list, nb);
165 }
166 EXPORT_SYMBOL(register_module_notifier);
167
168 int unregister_module_notifier(struct notifier_block * nb)
169 {
170 return blocking_notifier_chain_unregister(&module_notify_list, nb);
171 }
172 EXPORT_SYMBOL(unregister_module_notifier);
173
174 struct load_info {
175 Elf_Ehdr *hdr;
176 unsigned long len;
177 Elf_Shdr *sechdrs;
178 char *secstrings, *strtab;
179 unsigned long symoffs, stroffs;
180 struct _ddebug *debug;
181 unsigned int num_debug;
182 bool sig_ok;
183 struct {
184 unsigned int sym, str, mod, vers, info, pcpu;
185 } index;
186 };
187
188 /* We require a truly strong try_module_get(): 0 means failure due to
189 ongoing or failed initialization etc. */
190 static inline int strong_try_module_get(struct module *mod)
191 {
192 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
193 if (mod && mod->state == MODULE_STATE_COMING)
194 return -EBUSY;
195 if (try_module_get(mod))
196 return 0;
197 else
198 return -ENOENT;
199 }
200
201 static inline void add_taint_module(struct module *mod, unsigned flag,
202 enum lockdep_ok lockdep_ok)
203 {
204 add_taint(flag, lockdep_ok);
205 mod->taints |= (1U << flag);
206 }
207
208 /*
209 * A thread that wants to hold a reference to a module only while it
210 * is running can call this to safely exit. nfsd and lockd use this.
211 */
212 void __module_put_and_exit(struct module *mod, long code)
213 {
214 module_put(mod);
215 do_exit(code);
216 }
217 EXPORT_SYMBOL(__module_put_and_exit);
218
219 /* Find a module section: 0 means not found. */
220 static unsigned int find_sec(const struct load_info *info, const char *name)
221 {
222 unsigned int i;
223
224 for (i = 1; i < info->hdr->e_shnum; i++) {
225 Elf_Shdr *shdr = &info->sechdrs[i];
226 /* Alloc bit cleared means "ignore it." */
227 if ((shdr->sh_flags & SHF_ALLOC)
228 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
229 return i;
230 }
231 return 0;
232 }
233
234 /* Find a module section, or NULL. */
235 static void *section_addr(const struct load_info *info, const char *name)
236 {
237 /* Section 0 has sh_addr 0. */
238 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
239 }
240
241 /* Find a module section, or NULL. Fill in number of "objects" in section. */
242 static void *section_objs(const struct load_info *info,
243 const char *name,
244 size_t object_size,
245 unsigned int *num)
246 {
247 unsigned int sec = find_sec(info, name);
248
249 /* Section 0 has sh_addr 0 and sh_size 0. */
250 *num = info->sechdrs[sec].sh_size / object_size;
251 return (void *)info->sechdrs[sec].sh_addr;
252 }
253
254 /* Provided by the linker */
255 extern const struct kernel_symbol __start___ksymtab[];
256 extern const struct kernel_symbol __stop___ksymtab[];
257 extern const struct kernel_symbol __start___ksymtab_gpl[];
258 extern const struct kernel_symbol __stop___ksymtab_gpl[];
259 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
260 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
261 extern const unsigned long __start___kcrctab[];
262 extern const unsigned long __start___kcrctab_gpl[];
263 extern const unsigned long __start___kcrctab_gpl_future[];
264 #ifdef CONFIG_UNUSED_SYMBOLS
265 extern const struct kernel_symbol __start___ksymtab_unused[];
266 extern const struct kernel_symbol __stop___ksymtab_unused[];
267 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
268 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
269 extern const unsigned long __start___kcrctab_unused[];
270 extern const unsigned long __start___kcrctab_unused_gpl[];
271 #endif
272
273 #ifndef CONFIG_MODVERSIONS
274 #define symversion(base, idx) NULL
275 #else
276 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
277 #endif
278
279 static bool each_symbol_in_section(const struct symsearch *arr,
280 unsigned int arrsize,
281 struct module *owner,
282 bool (*fn)(const struct symsearch *syms,
283 struct module *owner,
284 void *data),
285 void *data)
286 {
287 unsigned int j;
288
289 for (j = 0; j < arrsize; j++) {
290 if (fn(&arr[j], owner, data))
291 return true;
292 }
293
294 return false;
295 }
296
297 /* Returns true as soon as fn returns true, otherwise false. */
298 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
299 struct module *owner,
300 void *data),
301 void *data)
302 {
303 struct module *mod;
304 static const struct symsearch arr[] = {
305 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
306 NOT_GPL_ONLY, false },
307 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
308 __start___kcrctab_gpl,
309 GPL_ONLY, false },
310 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
311 __start___kcrctab_gpl_future,
312 WILL_BE_GPL_ONLY, false },
313 #ifdef CONFIG_UNUSED_SYMBOLS
314 { __start___ksymtab_unused, __stop___ksymtab_unused,
315 __start___kcrctab_unused,
316 NOT_GPL_ONLY, true },
317 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
318 __start___kcrctab_unused_gpl,
319 GPL_ONLY, true },
320 #endif
321 };
322
323 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
324 return true;
325
326 list_for_each_entry_rcu(mod, &modules, list) {
327 struct symsearch arr[] = {
328 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
329 NOT_GPL_ONLY, false },
330 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
331 mod->gpl_crcs,
332 GPL_ONLY, false },
333 { mod->gpl_future_syms,
334 mod->gpl_future_syms + mod->num_gpl_future_syms,
335 mod->gpl_future_crcs,
336 WILL_BE_GPL_ONLY, false },
337 #ifdef CONFIG_UNUSED_SYMBOLS
338 { mod->unused_syms,
339 mod->unused_syms + mod->num_unused_syms,
340 mod->unused_crcs,
341 NOT_GPL_ONLY, true },
342 { mod->unused_gpl_syms,
343 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
344 mod->unused_gpl_crcs,
345 GPL_ONLY, true },
346 #endif
347 };
348
349 if (mod->state == MODULE_STATE_UNFORMED)
350 continue;
351
352 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
353 return true;
354 }
355 return false;
356 }
357 EXPORT_SYMBOL_GPL(each_symbol_section);
358
359 struct find_symbol_arg {
360 /* Input */
361 const char *name;
362 bool gplok;
363 bool warn;
364
365 /* Output */
366 struct module *owner;
367 const unsigned long *crc;
368 const struct kernel_symbol *sym;
369 };
370
371 static bool check_symbol(const struct symsearch *syms,
372 struct module *owner,
373 unsigned int symnum, void *data)
374 {
375 struct find_symbol_arg *fsa = data;
376
377 if (!fsa->gplok) {
378 if (syms->licence == GPL_ONLY)
379 return false;
380 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
381 pr_warn("Symbol %s is being used by a non-GPL module, "
382 "which will not be allowed in the future\n",
383 fsa->name);
384 }
385 }
386
387 #ifdef CONFIG_UNUSED_SYMBOLS
388 if (syms->unused && fsa->warn) {
389 pr_warn("Symbol %s is marked as UNUSED, however this module is "
390 "using it.\n", fsa->name);
391 pr_warn("This symbol will go away in the future.\n");
392 pr_warn("Please evalute if this is the right api to use and if "
393 "it really is, submit a report the linux kernel "
394 "mailinglist together with submitting your code for "
395 "inclusion.\n");
396 }
397 #endif
398
399 fsa->owner = owner;
400 fsa->crc = symversion(syms->crcs, symnum);
401 fsa->sym = &syms->start[symnum];
402 return true;
403 }
404
405 static int cmp_name(const void *va, const void *vb)
406 {
407 const char *a;
408 const struct kernel_symbol *b;
409 a = va; b = vb;
410 return strcmp(a, b->name);
411 }
412
413 static bool find_symbol_in_section(const struct symsearch *syms,
414 struct module *owner,
415 void *data)
416 {
417 struct find_symbol_arg *fsa = data;
418 struct kernel_symbol *sym;
419
420 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
421 sizeof(struct kernel_symbol), cmp_name);
422
423 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
424 return true;
425
426 return false;
427 }
428
429 /* Find a symbol and return it, along with, (optional) crc and
430 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
431 const struct kernel_symbol *find_symbol(const char *name,
432 struct module **owner,
433 const unsigned long **crc,
434 bool gplok,
435 bool warn)
436 {
437 struct find_symbol_arg fsa;
438
439 fsa.name = name;
440 fsa.gplok = gplok;
441 fsa.warn = warn;
442
443 if (each_symbol_section(find_symbol_in_section, &fsa)) {
444 if (owner)
445 *owner = fsa.owner;
446 if (crc)
447 *crc = fsa.crc;
448 return fsa.sym;
449 }
450
451 pr_debug("Failed to find symbol %s\n", name);
452 return NULL;
453 }
454 EXPORT_SYMBOL_GPL(find_symbol);
455
456 /* Search for module by name: must hold module_mutex. */
457 static struct module *find_module_all(const char *name, size_t len,
458 bool even_unformed)
459 {
460 struct module *mod;
461
462 list_for_each_entry(mod, &modules, list) {
463 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
464 continue;
465 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
466 return mod;
467 }
468 return NULL;
469 }
470
471 struct module *find_module(const char *name)
472 {
473 return find_module_all(name, strlen(name), false);
474 }
475 EXPORT_SYMBOL_GPL(find_module);
476
477 #ifdef CONFIG_SMP
478
479 static inline void __percpu *mod_percpu(struct module *mod)
480 {
481 return mod->percpu;
482 }
483
484 static int percpu_modalloc(struct module *mod, struct load_info *info)
485 {
486 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
487 unsigned long align = pcpusec->sh_addralign;
488
489 if (!pcpusec->sh_size)
490 return 0;
491
492 if (align > PAGE_SIZE) {
493 pr_warn("%s: per-cpu alignment %li > %li\n",
494 mod->name, align, PAGE_SIZE);
495 align = PAGE_SIZE;
496 }
497
498 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
499 if (!mod->percpu) {
500 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
501 mod->name, (unsigned long)pcpusec->sh_size);
502 return -ENOMEM;
503 }
504 mod->percpu_size = pcpusec->sh_size;
505 return 0;
506 }
507
508 static void percpu_modfree(struct module *mod)
509 {
510 free_percpu(mod->percpu);
511 }
512
513 static unsigned int find_pcpusec(struct load_info *info)
514 {
515 return find_sec(info, ".data..percpu");
516 }
517
518 static void percpu_modcopy(struct module *mod,
519 const void *from, unsigned long size)
520 {
521 int cpu;
522
523 for_each_possible_cpu(cpu)
524 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
525 }
526
527 /**
528 * is_module_percpu_address - test whether address is from module static percpu
529 * @addr: address to test
530 *
531 * Test whether @addr belongs to module static percpu area.
532 *
533 * RETURNS:
534 * %true if @addr is from module static percpu area
535 */
536 bool is_module_percpu_address(unsigned long addr)
537 {
538 struct module *mod;
539 unsigned int cpu;
540
541 preempt_disable();
542
543 list_for_each_entry_rcu(mod, &modules, list) {
544 if (mod->state == MODULE_STATE_UNFORMED)
545 continue;
546 if (!mod->percpu_size)
547 continue;
548 for_each_possible_cpu(cpu) {
549 void *start = per_cpu_ptr(mod->percpu, cpu);
550
551 if ((void *)addr >= start &&
552 (void *)addr < start + mod->percpu_size) {
553 preempt_enable();
554 return true;
555 }
556 }
557 }
558
559 preempt_enable();
560 return false;
561 }
562
563 #else /* ... !CONFIG_SMP */
564
565 static inline void __percpu *mod_percpu(struct module *mod)
566 {
567 return NULL;
568 }
569 static int percpu_modalloc(struct module *mod, struct load_info *info)
570 {
571 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
572 if (info->sechdrs[info->index.pcpu].sh_size != 0)
573 return -ENOMEM;
574 return 0;
575 }
576 static inline void percpu_modfree(struct module *mod)
577 {
578 }
579 static unsigned int find_pcpusec(struct load_info *info)
580 {
581 return 0;
582 }
583 static inline void percpu_modcopy(struct module *mod,
584 const void *from, unsigned long size)
585 {
586 /* pcpusec should be 0, and size of that section should be 0. */
587 BUG_ON(size != 0);
588 }
589 bool is_module_percpu_address(unsigned long addr)
590 {
591 return false;
592 }
593
594 #endif /* CONFIG_SMP */
595
596 #define MODINFO_ATTR(field) \
597 static void setup_modinfo_##field(struct module *mod, const char *s) \
598 { \
599 mod->field = kstrdup(s, GFP_KERNEL); \
600 } \
601 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
602 struct module_kobject *mk, char *buffer) \
603 { \
604 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
605 } \
606 static int modinfo_##field##_exists(struct module *mod) \
607 { \
608 return mod->field != NULL; \
609 } \
610 static void free_modinfo_##field(struct module *mod) \
611 { \
612 kfree(mod->field); \
613 mod->field = NULL; \
614 } \
615 static struct module_attribute modinfo_##field = { \
616 .attr = { .name = __stringify(field), .mode = 0444 }, \
617 .show = show_modinfo_##field, \
618 .setup = setup_modinfo_##field, \
619 .test = modinfo_##field##_exists, \
620 .free = free_modinfo_##field, \
621 };
622
623 MODINFO_ATTR(version);
624 MODINFO_ATTR(srcversion);
625
626 static char last_unloaded_module[MODULE_NAME_LEN+1];
627
628 #ifdef CONFIG_MODULE_UNLOAD
629
630 EXPORT_TRACEPOINT_SYMBOL(module_get);
631
632 /* Init the unload section of the module. */
633 static int module_unload_init(struct module *mod)
634 {
635 mod->refptr = alloc_percpu(struct module_ref);
636 if (!mod->refptr)
637 return -ENOMEM;
638
639 INIT_LIST_HEAD(&mod->source_list);
640 INIT_LIST_HEAD(&mod->target_list);
641
642 /* Hold reference count during initialization. */
643 __this_cpu_write(mod->refptr->incs, 1);
644
645 return 0;
646 }
647
648 /* Does a already use b? */
649 static int already_uses(struct module *a, struct module *b)
650 {
651 struct module_use *use;
652
653 list_for_each_entry(use, &b->source_list, source_list) {
654 if (use->source == a) {
655 pr_debug("%s uses %s!\n", a->name, b->name);
656 return 1;
657 }
658 }
659 pr_debug("%s does not use %s!\n", a->name, b->name);
660 return 0;
661 }
662
663 /*
664 * Module a uses b
665 * - we add 'a' as a "source", 'b' as a "target" of module use
666 * - the module_use is added to the list of 'b' sources (so
667 * 'b' can walk the list to see who sourced them), and of 'a'
668 * targets (so 'a' can see what modules it targets).
669 */
670 static int add_module_usage(struct module *a, struct module *b)
671 {
672 struct module_use *use;
673
674 pr_debug("Allocating new usage for %s.\n", a->name);
675 use = kmalloc(sizeof(*use), GFP_ATOMIC);
676 if (!use) {
677 pr_warn("%s: out of memory loading\n", a->name);
678 return -ENOMEM;
679 }
680
681 use->source = a;
682 use->target = b;
683 list_add(&use->source_list, &b->source_list);
684 list_add(&use->target_list, &a->target_list);
685 return 0;
686 }
687
688 /* Module a uses b: caller needs module_mutex() */
689 int ref_module(struct module *a, struct module *b)
690 {
691 int err;
692
693 if (b == NULL || already_uses(a, b))
694 return 0;
695
696 /* If module isn't available, we fail. */
697 err = strong_try_module_get(b);
698 if (err)
699 return err;
700
701 err = add_module_usage(a, b);
702 if (err) {
703 module_put(b);
704 return err;
705 }
706 return 0;
707 }
708 EXPORT_SYMBOL_GPL(ref_module);
709
710 /* Clear the unload stuff of the module. */
711 static void module_unload_free(struct module *mod)
712 {
713 struct module_use *use, *tmp;
714
715 mutex_lock(&module_mutex);
716 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
717 struct module *i = use->target;
718 pr_debug("%s unusing %s\n", mod->name, i->name);
719 module_put(i);
720 list_del(&use->source_list);
721 list_del(&use->target_list);
722 kfree(use);
723 }
724 mutex_unlock(&module_mutex);
725
726 free_percpu(mod->refptr);
727 }
728
729 #ifdef CONFIG_MODULE_FORCE_UNLOAD
730 static inline int try_force_unload(unsigned int flags)
731 {
732 int ret = (flags & O_TRUNC);
733 if (ret)
734 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
735 return ret;
736 }
737 #else
738 static inline int try_force_unload(unsigned int flags)
739 {
740 return 0;
741 }
742 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
743
744 struct stopref
745 {
746 struct module *mod;
747 int flags;
748 int *forced;
749 };
750
751 /* Whole machine is stopped with interrupts off when this runs. */
752 static int __try_stop_module(void *_sref)
753 {
754 struct stopref *sref = _sref;
755
756 /* If it's not unused, quit unless we're forcing. */
757 if (module_refcount(sref->mod) != 0) {
758 if (!(*sref->forced = try_force_unload(sref->flags)))
759 return -EWOULDBLOCK;
760 }
761
762 /* Mark it as dying. */
763 sref->mod->state = MODULE_STATE_GOING;
764 return 0;
765 }
766
767 static int try_stop_module(struct module *mod, int flags, int *forced)
768 {
769 struct stopref sref = { mod, flags, forced };
770
771 return stop_machine(__try_stop_module, &sref, NULL);
772 }
773
774 unsigned long module_refcount(struct module *mod)
775 {
776 unsigned long incs = 0, decs = 0;
777 int cpu;
778
779 for_each_possible_cpu(cpu)
780 decs += per_cpu_ptr(mod->refptr, cpu)->decs;
781 /*
782 * ensure the incs are added up after the decs.
783 * module_put ensures incs are visible before decs with smp_wmb.
784 *
785 * This 2-count scheme avoids the situation where the refcount
786 * for CPU0 is read, then CPU0 increments the module refcount,
787 * then CPU1 drops that refcount, then the refcount for CPU1 is
788 * read. We would record a decrement but not its corresponding
789 * increment so we would see a low count (disaster).
790 *
791 * Rare situation? But module_refcount can be preempted, and we
792 * might be tallying up 4096+ CPUs. So it is not impossible.
793 */
794 smp_rmb();
795 for_each_possible_cpu(cpu)
796 incs += per_cpu_ptr(mod->refptr, cpu)->incs;
797 return incs - decs;
798 }
799 EXPORT_SYMBOL(module_refcount);
800
801 /* This exists whether we can unload or not */
802 static void free_module(struct module *mod);
803
804 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
805 unsigned int, flags)
806 {
807 struct module *mod;
808 char name[MODULE_NAME_LEN];
809 int ret, forced = 0;
810
811 if (!capable(CAP_SYS_MODULE) || modules_disabled)
812 return -EPERM;
813
814 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
815 return -EFAULT;
816 name[MODULE_NAME_LEN-1] = '\0';
817
818 if (!(flags & O_NONBLOCK))
819 pr_warn("waiting module removal not supported: please upgrade\n");
820
821 if (mutex_lock_interruptible(&module_mutex) != 0)
822 return -EINTR;
823
824 mod = find_module(name);
825 if (!mod) {
826 ret = -ENOENT;
827 goto out;
828 }
829
830 if (!list_empty(&mod->source_list)) {
831 /* Other modules depend on us: get rid of them first. */
832 ret = -EWOULDBLOCK;
833 goto out;
834 }
835
836 /* Doing init or already dying? */
837 if (mod->state != MODULE_STATE_LIVE) {
838 /* FIXME: if (force), slam module count damn the torpedoes */
839 pr_debug("%s already dying\n", mod->name);
840 ret = -EBUSY;
841 goto out;
842 }
843
844 /* If it has an init func, it must have an exit func to unload */
845 if (mod->init && !mod->exit) {
846 forced = try_force_unload(flags);
847 if (!forced) {
848 /* This module can't be removed */
849 ret = -EBUSY;
850 goto out;
851 }
852 }
853
854 /* Stop the machine so refcounts can't move and disable module. */
855 ret = try_stop_module(mod, flags, &forced);
856 if (ret != 0)
857 goto out;
858
859 mutex_unlock(&module_mutex);
860 /* Final destruction now no one is using it. */
861 if (mod->exit != NULL)
862 mod->exit();
863 blocking_notifier_call_chain(&module_notify_list,
864 MODULE_STATE_GOING, mod);
865 async_synchronize_full();
866
867 /* Store the name of the last unloaded module for diagnostic purposes */
868 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
869
870 free_module(mod);
871 return 0;
872 out:
873 mutex_unlock(&module_mutex);
874 return ret;
875 }
876
877 static inline void print_unload_info(struct seq_file *m, struct module *mod)
878 {
879 struct module_use *use;
880 int printed_something = 0;
881
882 seq_printf(m, " %lu ", module_refcount(mod));
883
884 /* Always include a trailing , so userspace can differentiate
885 between this and the old multi-field proc format. */
886 list_for_each_entry(use, &mod->source_list, source_list) {
887 printed_something = 1;
888 seq_printf(m, "%s,", use->source->name);
889 }
890
891 if (mod->init != NULL && mod->exit == NULL) {
892 printed_something = 1;
893 seq_printf(m, "[permanent],");
894 }
895
896 if (!printed_something)
897 seq_printf(m, "-");
898 }
899
900 void __symbol_put(const char *symbol)
901 {
902 struct module *owner;
903
904 preempt_disable();
905 if (!find_symbol(symbol, &owner, NULL, true, false))
906 BUG();
907 module_put(owner);
908 preempt_enable();
909 }
910 EXPORT_SYMBOL(__symbol_put);
911
912 /* Note this assumes addr is a function, which it currently always is. */
913 void symbol_put_addr(void *addr)
914 {
915 struct module *modaddr;
916 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
917
918 if (core_kernel_text(a))
919 return;
920
921 /* module_text_address is safe here: we're supposed to have reference
922 * to module from symbol_get, so it can't go away. */
923 modaddr = __module_text_address(a);
924 BUG_ON(!modaddr);
925 module_put(modaddr);
926 }
927 EXPORT_SYMBOL_GPL(symbol_put_addr);
928
929 static ssize_t show_refcnt(struct module_attribute *mattr,
930 struct module_kobject *mk, char *buffer)
931 {
932 return sprintf(buffer, "%lu\n", module_refcount(mk->mod));
933 }
934
935 static struct module_attribute modinfo_refcnt =
936 __ATTR(refcnt, 0444, show_refcnt, NULL);
937
938 void __module_get(struct module *module)
939 {
940 if (module) {
941 preempt_disable();
942 __this_cpu_inc(module->refptr->incs);
943 trace_module_get(module, _RET_IP_);
944 preempt_enable();
945 }
946 }
947 EXPORT_SYMBOL(__module_get);
948
949 bool try_module_get(struct module *module)
950 {
951 bool ret = true;
952
953 if (module) {
954 preempt_disable();
955
956 if (likely(module_is_live(module))) {
957 __this_cpu_inc(module->refptr->incs);
958 trace_module_get(module, _RET_IP_);
959 } else
960 ret = false;
961
962 preempt_enable();
963 }
964 return ret;
965 }
966 EXPORT_SYMBOL(try_module_get);
967
968 void module_put(struct module *module)
969 {
970 if (module) {
971 preempt_disable();
972 smp_wmb(); /* see comment in module_refcount */
973 __this_cpu_inc(module->refptr->decs);
974
975 trace_module_put(module, _RET_IP_);
976 preempt_enable();
977 }
978 }
979 EXPORT_SYMBOL(module_put);
980
981 #else /* !CONFIG_MODULE_UNLOAD */
982 static inline void print_unload_info(struct seq_file *m, struct module *mod)
983 {
984 /* We don't know the usage count, or what modules are using. */
985 seq_printf(m, " - -");
986 }
987
988 static inline void module_unload_free(struct module *mod)
989 {
990 }
991
992 int ref_module(struct module *a, struct module *b)
993 {
994 return strong_try_module_get(b);
995 }
996 EXPORT_SYMBOL_GPL(ref_module);
997
998 static inline int module_unload_init(struct module *mod)
999 {
1000 return 0;
1001 }
1002 #endif /* CONFIG_MODULE_UNLOAD */
1003
1004 static size_t module_flags_taint(struct module *mod, char *buf)
1005 {
1006 size_t l = 0;
1007
1008 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
1009 buf[l++] = 'P';
1010 if (mod->taints & (1 << TAINT_OOT_MODULE))
1011 buf[l++] = 'O';
1012 if (mod->taints & (1 << TAINT_FORCED_MODULE))
1013 buf[l++] = 'F';
1014 if (mod->taints & (1 << TAINT_CRAP))
1015 buf[l++] = 'C';
1016 /*
1017 * TAINT_FORCED_RMMOD: could be added.
1018 * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1019 * apply to modules.
1020 */
1021 return l;
1022 }
1023
1024 static ssize_t show_initstate(struct module_attribute *mattr,
1025 struct module_kobject *mk, char *buffer)
1026 {
1027 const char *state = "unknown";
1028
1029 switch (mk->mod->state) {
1030 case MODULE_STATE_LIVE:
1031 state = "live";
1032 break;
1033 case MODULE_STATE_COMING:
1034 state = "coming";
1035 break;
1036 case MODULE_STATE_GOING:
1037 state = "going";
1038 break;
1039 default:
1040 BUG();
1041 }
1042 return sprintf(buffer, "%s\n", state);
1043 }
1044
1045 static struct module_attribute modinfo_initstate =
1046 __ATTR(initstate, 0444, show_initstate, NULL);
1047
1048 static ssize_t store_uevent(struct module_attribute *mattr,
1049 struct module_kobject *mk,
1050 const char *buffer, size_t count)
1051 {
1052 enum kobject_action action;
1053
1054 if (kobject_action_type(buffer, count, &action) == 0)
1055 kobject_uevent(&mk->kobj, action);
1056 return count;
1057 }
1058
1059 struct module_attribute module_uevent =
1060 __ATTR(uevent, 0200, NULL, store_uevent);
1061
1062 static ssize_t show_coresize(struct module_attribute *mattr,
1063 struct module_kobject *mk, char *buffer)
1064 {
1065 return sprintf(buffer, "%u\n", mk->mod->core_size);
1066 }
1067
1068 static struct module_attribute modinfo_coresize =
1069 __ATTR(coresize, 0444, show_coresize, NULL);
1070
1071 static ssize_t show_initsize(struct module_attribute *mattr,
1072 struct module_kobject *mk, char *buffer)
1073 {
1074 return sprintf(buffer, "%u\n", mk->mod->init_size);
1075 }
1076
1077 static struct module_attribute modinfo_initsize =
1078 __ATTR(initsize, 0444, show_initsize, NULL);
1079
1080 static ssize_t show_taint(struct module_attribute *mattr,
1081 struct module_kobject *mk, char *buffer)
1082 {
1083 size_t l;
1084
1085 l = module_flags_taint(mk->mod, buffer);
1086 buffer[l++] = '\n';
1087 return l;
1088 }
1089
1090 static struct module_attribute modinfo_taint =
1091 __ATTR(taint, 0444, show_taint, NULL);
1092
1093 static struct module_attribute *modinfo_attrs[] = {
1094 &module_uevent,
1095 &modinfo_version,
1096 &modinfo_srcversion,
1097 &modinfo_initstate,
1098 &modinfo_coresize,
1099 &modinfo_initsize,
1100 &modinfo_taint,
1101 #ifdef CONFIG_MODULE_UNLOAD
1102 &modinfo_refcnt,
1103 #endif
1104 NULL,
1105 };
1106
1107 static const char vermagic[] = VERMAGIC_STRING;
1108
1109 static int try_to_force_load(struct module *mod, const char *reason)
1110 {
1111 #ifdef CONFIG_MODULE_FORCE_LOAD
1112 if (!test_taint(TAINT_FORCED_MODULE))
1113 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1114 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1115 return 0;
1116 #else
1117 return -ENOEXEC;
1118 #endif
1119 }
1120
1121 #ifdef CONFIG_MODVERSIONS
1122 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1123 static unsigned long maybe_relocated(unsigned long crc,
1124 const struct module *crc_owner)
1125 {
1126 #ifdef ARCH_RELOCATES_KCRCTAB
1127 if (crc_owner == NULL)
1128 return crc - (unsigned long)reloc_start;
1129 #endif
1130 return crc;
1131 }
1132
1133 static int check_version(Elf_Shdr *sechdrs,
1134 unsigned int versindex,
1135 const char *symname,
1136 struct module *mod,
1137 const unsigned long *crc,
1138 const struct module *crc_owner)
1139 {
1140 unsigned int i, num_versions;
1141 struct modversion_info *versions;
1142
1143 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1144 if (!crc)
1145 return 1;
1146
1147 /* No versions at all? modprobe --force does this. */
1148 if (versindex == 0)
1149 return try_to_force_load(mod, symname) == 0;
1150
1151 versions = (void *) sechdrs[versindex].sh_addr;
1152 num_versions = sechdrs[versindex].sh_size
1153 / sizeof(struct modversion_info);
1154
1155 for (i = 0; i < num_versions; i++) {
1156 if (strcmp(versions[i].name, symname) != 0)
1157 continue;
1158
1159 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1160 return 1;
1161 pr_debug("Found checksum %lX vs module %lX\n",
1162 maybe_relocated(*crc, crc_owner), versions[i].crc);
1163 goto bad_version;
1164 }
1165
1166 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
1167 return 0;
1168
1169 bad_version:
1170 printk("%s: disagrees about version of symbol %s\n",
1171 mod->name, symname);
1172 return 0;
1173 }
1174
1175 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1176 unsigned int versindex,
1177 struct module *mod)
1178 {
1179 const unsigned long *crc;
1180
1181 /* Since this should be found in kernel (which can't be removed),
1182 * no locking is necessary. */
1183 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
1184 &crc, true, false))
1185 BUG();
1186 return check_version(sechdrs, versindex,
1187 VMLINUX_SYMBOL_STR(module_layout), mod, crc,
1188 NULL);
1189 }
1190
1191 /* First part is kernel version, which we ignore if module has crcs. */
1192 static inline int same_magic(const char *amagic, const char *bmagic,
1193 bool has_crcs)
1194 {
1195 if (has_crcs) {
1196 amagic += strcspn(amagic, " ");
1197 bmagic += strcspn(bmagic, " ");
1198 }
1199 return strcmp(amagic, bmagic) == 0;
1200 }
1201 #else
1202 static inline int check_version(Elf_Shdr *sechdrs,
1203 unsigned int versindex,
1204 const char *symname,
1205 struct module *mod,
1206 const unsigned long *crc,
1207 const struct module *crc_owner)
1208 {
1209 return 1;
1210 }
1211
1212 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1213 unsigned int versindex,
1214 struct module *mod)
1215 {
1216 return 1;
1217 }
1218
1219 static inline int same_magic(const char *amagic, const char *bmagic,
1220 bool has_crcs)
1221 {
1222 return strcmp(amagic, bmagic) == 0;
1223 }
1224 #endif /* CONFIG_MODVERSIONS */
1225
1226 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1227 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1228 const struct load_info *info,
1229 const char *name,
1230 char ownername[])
1231 {
1232 struct module *owner;
1233 const struct kernel_symbol *sym;
1234 const unsigned long *crc;
1235 int err;
1236
1237 mutex_lock(&module_mutex);
1238 sym = find_symbol(name, &owner, &crc,
1239 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1240 if (!sym)
1241 goto unlock;
1242
1243 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1244 owner)) {
1245 sym = ERR_PTR(-EINVAL);
1246 goto getname;
1247 }
1248
1249 err = ref_module(mod, owner);
1250 if (err) {
1251 sym = ERR_PTR(err);
1252 goto getname;
1253 }
1254
1255 getname:
1256 /* We must make copy under the lock if we failed to get ref. */
1257 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1258 unlock:
1259 mutex_unlock(&module_mutex);
1260 return sym;
1261 }
1262
1263 static const struct kernel_symbol *
1264 resolve_symbol_wait(struct module *mod,
1265 const struct load_info *info,
1266 const char *name)
1267 {
1268 const struct kernel_symbol *ksym;
1269 char owner[MODULE_NAME_LEN];
1270
1271 if (wait_event_interruptible_timeout(module_wq,
1272 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1273 || PTR_ERR(ksym) != -EBUSY,
1274 30 * HZ) <= 0) {
1275 pr_warn("%s: gave up waiting for init of module %s.\n",
1276 mod->name, owner);
1277 }
1278 return ksym;
1279 }
1280
1281 /*
1282 * /sys/module/foo/sections stuff
1283 * J. Corbet <corbet@lwn.net>
1284 */
1285 #ifdef CONFIG_SYSFS
1286
1287 #ifdef CONFIG_KALLSYMS
1288 static inline bool sect_empty(const Elf_Shdr *sect)
1289 {
1290 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1291 }
1292
1293 struct module_sect_attr
1294 {
1295 struct module_attribute mattr;
1296 char *name;
1297 unsigned long address;
1298 };
1299
1300 struct module_sect_attrs
1301 {
1302 struct attribute_group grp;
1303 unsigned int nsections;
1304 struct module_sect_attr attrs[0];
1305 };
1306
1307 static ssize_t module_sect_show(struct module_attribute *mattr,
1308 struct module_kobject *mk, char *buf)
1309 {
1310 struct module_sect_attr *sattr =
1311 container_of(mattr, struct module_sect_attr, mattr);
1312 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1313 }
1314
1315 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1316 {
1317 unsigned int section;
1318
1319 for (section = 0; section < sect_attrs->nsections; section++)
1320 kfree(sect_attrs->attrs[section].name);
1321 kfree(sect_attrs);
1322 }
1323
1324 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1325 {
1326 unsigned int nloaded = 0, i, size[2];
1327 struct module_sect_attrs *sect_attrs;
1328 struct module_sect_attr *sattr;
1329 struct attribute **gattr;
1330
1331 /* Count loaded sections and allocate structures */
1332 for (i = 0; i < info->hdr->e_shnum; i++)
1333 if (!sect_empty(&info->sechdrs[i]))
1334 nloaded++;
1335 size[0] = ALIGN(sizeof(*sect_attrs)
1336 + nloaded * sizeof(sect_attrs->attrs[0]),
1337 sizeof(sect_attrs->grp.attrs[0]));
1338 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1339 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1340 if (sect_attrs == NULL)
1341 return;
1342
1343 /* Setup section attributes. */
1344 sect_attrs->grp.name = "sections";
1345 sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1346
1347 sect_attrs->nsections = 0;
1348 sattr = &sect_attrs->attrs[0];
1349 gattr = &sect_attrs->grp.attrs[0];
1350 for (i = 0; i < info->hdr->e_shnum; i++) {
1351 Elf_Shdr *sec = &info->sechdrs[i];
1352 if (sect_empty(sec))
1353 continue;
1354 sattr->address = sec->sh_addr;
1355 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1356 GFP_KERNEL);
1357 if (sattr->name == NULL)
1358 goto out;
1359 sect_attrs->nsections++;
1360 sysfs_attr_init(&sattr->mattr.attr);
1361 sattr->mattr.show = module_sect_show;
1362 sattr->mattr.store = NULL;
1363 sattr->mattr.attr.name = sattr->name;
1364 sattr->mattr.attr.mode = S_IRUGO;
1365 *(gattr++) = &(sattr++)->mattr.attr;
1366 }
1367 *gattr = NULL;
1368
1369 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1370 goto out;
1371
1372 mod->sect_attrs = sect_attrs;
1373 return;
1374 out:
1375 free_sect_attrs(sect_attrs);
1376 }
1377
1378 static void remove_sect_attrs(struct module *mod)
1379 {
1380 if (mod->sect_attrs) {
1381 sysfs_remove_group(&mod->mkobj.kobj,
1382 &mod->sect_attrs->grp);
1383 /* We are positive that no one is using any sect attrs
1384 * at this point. Deallocate immediately. */
1385 free_sect_attrs(mod->sect_attrs);
1386 mod->sect_attrs = NULL;
1387 }
1388 }
1389
1390 /*
1391 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1392 */
1393
1394 struct module_notes_attrs {
1395 struct kobject *dir;
1396 unsigned int notes;
1397 struct bin_attribute attrs[0];
1398 };
1399
1400 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1401 struct bin_attribute *bin_attr,
1402 char *buf, loff_t pos, size_t count)
1403 {
1404 /*
1405 * The caller checked the pos and count against our size.
1406 */
1407 memcpy(buf, bin_attr->private + pos, count);
1408 return count;
1409 }
1410
1411 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1412 unsigned int i)
1413 {
1414 if (notes_attrs->dir) {
1415 while (i-- > 0)
1416 sysfs_remove_bin_file(notes_attrs->dir,
1417 &notes_attrs->attrs[i]);
1418 kobject_put(notes_attrs->dir);
1419 }
1420 kfree(notes_attrs);
1421 }
1422
1423 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1424 {
1425 unsigned int notes, loaded, i;
1426 struct module_notes_attrs *notes_attrs;
1427 struct bin_attribute *nattr;
1428
1429 /* failed to create section attributes, so can't create notes */
1430 if (!mod->sect_attrs)
1431 return;
1432
1433 /* Count notes sections and allocate structures. */
1434 notes = 0;
1435 for (i = 0; i < info->hdr->e_shnum; i++)
1436 if (!sect_empty(&info->sechdrs[i]) &&
1437 (info->sechdrs[i].sh_type == SHT_NOTE))
1438 ++notes;
1439
1440 if (notes == 0)
1441 return;
1442
1443 notes_attrs = kzalloc(sizeof(*notes_attrs)
1444 + notes * sizeof(notes_attrs->attrs[0]),
1445 GFP_KERNEL);
1446 if (notes_attrs == NULL)
1447 return;
1448
1449 notes_attrs->notes = notes;
1450 nattr = &notes_attrs->attrs[0];
1451 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1452 if (sect_empty(&info->sechdrs[i]))
1453 continue;
1454 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1455 sysfs_bin_attr_init(nattr);
1456 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1457 nattr->attr.mode = S_IRUGO;
1458 nattr->size = info->sechdrs[i].sh_size;
1459 nattr->private = (void *) info->sechdrs[i].sh_addr;
1460 nattr->read = module_notes_read;
1461 ++nattr;
1462 }
1463 ++loaded;
1464 }
1465
1466 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1467 if (!notes_attrs->dir)
1468 goto out;
1469
1470 for (i = 0; i < notes; ++i)
1471 if (sysfs_create_bin_file(notes_attrs->dir,
1472 &notes_attrs->attrs[i]))
1473 goto out;
1474
1475 mod->notes_attrs = notes_attrs;
1476 return;
1477
1478 out:
1479 free_notes_attrs(notes_attrs, i);
1480 }
1481
1482 static void remove_notes_attrs(struct module *mod)
1483 {
1484 if (mod->notes_attrs)
1485 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1486 }
1487
1488 #else
1489
1490 static inline void add_sect_attrs(struct module *mod,
1491 const struct load_info *info)
1492 {
1493 }
1494
1495 static inline void remove_sect_attrs(struct module *mod)
1496 {
1497 }
1498
1499 static inline void add_notes_attrs(struct module *mod,
1500 const struct load_info *info)
1501 {
1502 }
1503
1504 static inline void remove_notes_attrs(struct module *mod)
1505 {
1506 }
1507 #endif /* CONFIG_KALLSYMS */
1508
1509 static void add_usage_links(struct module *mod)
1510 {
1511 #ifdef CONFIG_MODULE_UNLOAD
1512 struct module_use *use;
1513 int nowarn;
1514
1515 mutex_lock(&module_mutex);
1516 list_for_each_entry(use, &mod->target_list, target_list) {
1517 nowarn = sysfs_create_link(use->target->holders_dir,
1518 &mod->mkobj.kobj, mod->name);
1519 }
1520 mutex_unlock(&module_mutex);
1521 #endif
1522 }
1523
1524 static void del_usage_links(struct module *mod)
1525 {
1526 #ifdef CONFIG_MODULE_UNLOAD
1527 struct module_use *use;
1528
1529 mutex_lock(&module_mutex);
1530 list_for_each_entry(use, &mod->target_list, target_list)
1531 sysfs_remove_link(use->target->holders_dir, mod->name);
1532 mutex_unlock(&module_mutex);
1533 #endif
1534 }
1535
1536 static int module_add_modinfo_attrs(struct module *mod)
1537 {
1538 struct module_attribute *attr;
1539 struct module_attribute *temp_attr;
1540 int error = 0;
1541 int i;
1542
1543 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1544 (ARRAY_SIZE(modinfo_attrs) + 1)),
1545 GFP_KERNEL);
1546 if (!mod->modinfo_attrs)
1547 return -ENOMEM;
1548
1549 temp_attr = mod->modinfo_attrs;
1550 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1551 if (!attr->test ||
1552 (attr->test && attr->test(mod))) {
1553 memcpy(temp_attr, attr, sizeof(*temp_attr));
1554 sysfs_attr_init(&temp_attr->attr);
1555 error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
1556 ++temp_attr;
1557 }
1558 }
1559 return error;
1560 }
1561
1562 static void module_remove_modinfo_attrs(struct module *mod)
1563 {
1564 struct module_attribute *attr;
1565 int i;
1566
1567 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1568 /* pick a field to test for end of list */
1569 if (!attr->attr.name)
1570 break;
1571 sysfs_remove_file(&mod->mkobj.kobj,&attr->attr);
1572 if (attr->free)
1573 attr->free(mod);
1574 }
1575 kfree(mod->modinfo_attrs);
1576 }
1577
1578 static void mod_kobject_put(struct module *mod)
1579 {
1580 DECLARE_COMPLETION_ONSTACK(c);
1581 mod->mkobj.kobj_completion = &c;
1582 kobject_put(&mod->mkobj.kobj);
1583 wait_for_completion(&c);
1584 }
1585
1586 static int mod_sysfs_init(struct module *mod)
1587 {
1588 int err;
1589 struct kobject *kobj;
1590
1591 if (!module_sysfs_initialized) {
1592 pr_err("%s: module sysfs not initialized\n", mod->name);
1593 err = -EINVAL;
1594 goto out;
1595 }
1596
1597 kobj = kset_find_obj(module_kset, mod->name);
1598 if (kobj) {
1599 pr_err("%s: module is already loaded\n", mod->name);
1600 kobject_put(kobj);
1601 err = -EINVAL;
1602 goto out;
1603 }
1604
1605 mod->mkobj.mod = mod;
1606
1607 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1608 mod->mkobj.kobj.kset = module_kset;
1609 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1610 "%s", mod->name);
1611 if (err)
1612 mod_kobject_put(mod);
1613
1614 /* delay uevent until full sysfs population */
1615 out:
1616 return err;
1617 }
1618
1619 static int mod_sysfs_setup(struct module *mod,
1620 const struct load_info *info,
1621 struct kernel_param *kparam,
1622 unsigned int num_params)
1623 {
1624 int err;
1625
1626 err = mod_sysfs_init(mod);
1627 if (err)
1628 goto out;
1629
1630 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1631 if (!mod->holders_dir) {
1632 err = -ENOMEM;
1633 goto out_unreg;
1634 }
1635
1636 err = module_param_sysfs_setup(mod, kparam, num_params);
1637 if (err)
1638 goto out_unreg_holders;
1639
1640 err = module_add_modinfo_attrs(mod);
1641 if (err)
1642 goto out_unreg_param;
1643
1644 add_usage_links(mod);
1645 add_sect_attrs(mod, info);
1646 add_notes_attrs(mod, info);
1647
1648 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1649 return 0;
1650
1651 out_unreg_param:
1652 module_param_sysfs_remove(mod);
1653 out_unreg_holders:
1654 kobject_put(mod->holders_dir);
1655 out_unreg:
1656 mod_kobject_put(mod);
1657 out:
1658 return err;
1659 }
1660
1661 static void mod_sysfs_fini(struct module *mod)
1662 {
1663 remove_notes_attrs(mod);
1664 remove_sect_attrs(mod);
1665 mod_kobject_put(mod);
1666 }
1667
1668 #else /* !CONFIG_SYSFS */
1669
1670 static int mod_sysfs_setup(struct module *mod,
1671 const struct load_info *info,
1672 struct kernel_param *kparam,
1673 unsigned int num_params)
1674 {
1675 return 0;
1676 }
1677
1678 static void mod_sysfs_fini(struct module *mod)
1679 {
1680 }
1681
1682 static void module_remove_modinfo_attrs(struct module *mod)
1683 {
1684 }
1685
1686 static void del_usage_links(struct module *mod)
1687 {
1688 }
1689
1690 #endif /* CONFIG_SYSFS */
1691
1692 static void mod_sysfs_teardown(struct module *mod)
1693 {
1694 del_usage_links(mod);
1695 module_remove_modinfo_attrs(mod);
1696 module_param_sysfs_remove(mod);
1697 kobject_put(mod->mkobj.drivers_dir);
1698 kobject_put(mod->holders_dir);
1699 mod_sysfs_fini(mod);
1700 }
1701
1702 /*
1703 * unlink the module with the whole machine is stopped with interrupts off
1704 * - this defends against kallsyms not taking locks
1705 */
1706 static int __unlink_module(void *_mod)
1707 {
1708 struct module *mod = _mod;
1709 list_del(&mod->list);
1710 module_bug_cleanup(mod);
1711 return 0;
1712 }
1713
1714 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1715 /*
1716 * LKM RO/NX protection: protect module's text/ro-data
1717 * from modification and any data from execution.
1718 */
1719 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages))
1720 {
1721 unsigned long begin_pfn = PFN_DOWN((unsigned long)start);
1722 unsigned long end_pfn = PFN_DOWN((unsigned long)end);
1723
1724 if (end_pfn > begin_pfn)
1725 set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1726 }
1727
1728 static void set_section_ro_nx(void *base,
1729 unsigned long text_size,
1730 unsigned long ro_size,
1731 unsigned long total_size)
1732 {
1733 /* begin and end PFNs of the current subsection */
1734 unsigned long begin_pfn;
1735 unsigned long end_pfn;
1736
1737 /*
1738 * Set RO for module text and RO-data:
1739 * - Always protect first page.
1740 * - Do not protect last partial page.
1741 */
1742 if (ro_size > 0)
1743 set_page_attributes(base, base + ro_size, set_memory_ro);
1744
1745 /*
1746 * Set NX permissions for module data:
1747 * - Do not protect first partial page.
1748 * - Always protect last page.
1749 */
1750 if (total_size > text_size) {
1751 begin_pfn = PFN_UP((unsigned long)base + text_size);
1752 end_pfn = PFN_UP((unsigned long)base + total_size);
1753 if (end_pfn > begin_pfn)
1754 set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1755 }
1756 }
1757
1758 static void unset_module_core_ro_nx(struct module *mod)
1759 {
1760 set_page_attributes(mod->module_core + mod->core_text_size,
1761 mod->module_core + mod->core_size,
1762 set_memory_x);
1763 set_page_attributes(mod->module_core,
1764 mod->module_core + mod->core_ro_size,
1765 set_memory_rw);
1766 }
1767
1768 static void unset_module_init_ro_nx(struct module *mod)
1769 {
1770 set_page_attributes(mod->module_init + mod->init_text_size,
1771 mod->module_init + mod->init_size,
1772 set_memory_x);
1773 set_page_attributes(mod->module_init,
1774 mod->module_init + mod->init_ro_size,
1775 set_memory_rw);
1776 }
1777
1778 /* Iterate through all modules and set each module's text as RW */
1779 void set_all_modules_text_rw(void)
1780 {
1781 struct module *mod;
1782
1783 mutex_lock(&module_mutex);
1784 list_for_each_entry_rcu(mod, &modules, list) {
1785 if (mod->state == MODULE_STATE_UNFORMED)
1786 continue;
1787 if ((mod->module_core) && (mod->core_text_size)) {
1788 set_page_attributes(mod->module_core,
1789 mod->module_core + mod->core_text_size,
1790 set_memory_rw);
1791 }
1792 if ((mod->module_init) && (mod->init_text_size)) {
1793 set_page_attributes(mod->module_init,
1794 mod->module_init + mod->init_text_size,
1795 set_memory_rw);
1796 }
1797 }
1798 mutex_unlock(&module_mutex);
1799 }
1800
1801 /* Iterate through all modules and set each module's text as RO */
1802 void set_all_modules_text_ro(void)
1803 {
1804 struct module *mod;
1805
1806 mutex_lock(&module_mutex);
1807 list_for_each_entry_rcu(mod, &modules, list) {
1808 if (mod->state == MODULE_STATE_UNFORMED)
1809 continue;
1810 if ((mod->module_core) && (mod->core_text_size)) {
1811 set_page_attributes(mod->module_core,
1812 mod->module_core + mod->core_text_size,
1813 set_memory_ro);
1814 }
1815 if ((mod->module_init) && (mod->init_text_size)) {
1816 set_page_attributes(mod->module_init,
1817 mod->module_init + mod->init_text_size,
1818 set_memory_ro);
1819 }
1820 }
1821 mutex_unlock(&module_mutex);
1822 }
1823 #else
1824 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
1825 static void unset_module_core_ro_nx(struct module *mod) { }
1826 static void unset_module_init_ro_nx(struct module *mod) { }
1827 #endif
1828
1829 void __weak module_free(struct module *mod, void *module_region)
1830 {
1831 vfree(module_region);
1832 }
1833
1834 void __weak module_arch_cleanup(struct module *mod)
1835 {
1836 }
1837
1838 /* Free a module, remove from lists, etc. */
1839 static void free_module(struct module *mod)
1840 {
1841 trace_module_free(mod);
1842
1843 mod_sysfs_teardown(mod);
1844
1845 /* We leave it in list to prevent duplicate loads, but make sure
1846 * that noone uses it while it's being deconstructed. */
1847 mod->state = MODULE_STATE_UNFORMED;
1848
1849 /* Remove dynamic debug info */
1850 ddebug_remove_module(mod->name);
1851
1852 /* Arch-specific cleanup. */
1853 module_arch_cleanup(mod);
1854
1855 /* Module unload stuff */
1856 module_unload_free(mod);
1857
1858 /* Free any allocated parameters. */
1859 destroy_params(mod->kp, mod->num_kp);
1860
1861 /* Now we can delete it from the lists */
1862 mutex_lock(&module_mutex);
1863 stop_machine(__unlink_module, mod, NULL);
1864 mutex_unlock(&module_mutex);
1865
1866 /* This may be NULL, but that's OK */
1867 unset_module_init_ro_nx(mod);
1868 module_free(mod, mod->module_init);
1869 kfree(mod->args);
1870 percpu_modfree(mod);
1871
1872 /* Free lock-classes: */
1873 lockdep_free_key_range(mod->module_core, mod->core_size);
1874
1875 /* Finally, free the core (containing the module structure) */
1876 unset_module_core_ro_nx(mod);
1877 module_free(mod, mod->module_core);
1878
1879 #ifdef CONFIG_MPU
1880 update_protections(current->mm);
1881 #endif
1882 }
1883
1884 void *__symbol_get(const char *symbol)
1885 {
1886 struct module *owner;
1887 const struct kernel_symbol *sym;
1888
1889 preempt_disable();
1890 sym = find_symbol(symbol, &owner, NULL, true, true);
1891 if (sym && strong_try_module_get(owner))
1892 sym = NULL;
1893 preempt_enable();
1894
1895 return sym ? (void *)sym->value : NULL;
1896 }
1897 EXPORT_SYMBOL_GPL(__symbol_get);
1898
1899 /*
1900 * Ensure that an exported symbol [global namespace] does not already exist
1901 * in the kernel or in some other module's exported symbol table.
1902 *
1903 * You must hold the module_mutex.
1904 */
1905 static int verify_export_symbols(struct module *mod)
1906 {
1907 unsigned int i;
1908 struct module *owner;
1909 const struct kernel_symbol *s;
1910 struct {
1911 const struct kernel_symbol *sym;
1912 unsigned int num;
1913 } arr[] = {
1914 { mod->syms, mod->num_syms },
1915 { mod->gpl_syms, mod->num_gpl_syms },
1916 { mod->gpl_future_syms, mod->num_gpl_future_syms },
1917 #ifdef CONFIG_UNUSED_SYMBOLS
1918 { mod->unused_syms, mod->num_unused_syms },
1919 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1920 #endif
1921 };
1922
1923 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1924 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
1925 if (find_symbol(s->name, &owner, NULL, true, false)) {
1926 pr_err("%s: exports duplicate symbol %s"
1927 " (owned by %s)\n",
1928 mod->name, s->name, module_name(owner));
1929 return -ENOEXEC;
1930 }
1931 }
1932 }
1933 return 0;
1934 }
1935
1936 /* Change all symbols so that st_value encodes the pointer directly. */
1937 static int simplify_symbols(struct module *mod, const struct load_info *info)
1938 {
1939 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
1940 Elf_Sym *sym = (void *)symsec->sh_addr;
1941 unsigned long secbase;
1942 unsigned int i;
1943 int ret = 0;
1944 const struct kernel_symbol *ksym;
1945
1946 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
1947 const char *name = info->strtab + sym[i].st_name;
1948
1949 switch (sym[i].st_shndx) {
1950 case SHN_COMMON:
1951 /* Ignore common symbols */
1952 if (!strncmp(name, "__gnu_lto", 9))
1953 break;
1954
1955 /* We compiled with -fno-common. These are not
1956 supposed to happen. */
1957 pr_debug("Common symbol: %s\n", name);
1958 printk("%s: please compile with -fno-common\n",
1959 mod->name);
1960 ret = -ENOEXEC;
1961 break;
1962
1963 case SHN_ABS:
1964 /* Don't need to do anything */
1965 pr_debug("Absolute symbol: 0x%08lx\n",
1966 (long)sym[i].st_value);
1967 break;
1968
1969 case SHN_UNDEF:
1970 ksym = resolve_symbol_wait(mod, info, name);
1971 /* Ok if resolved. */
1972 if (ksym && !IS_ERR(ksym)) {
1973 sym[i].st_value = ksym->value;
1974 break;
1975 }
1976
1977 /* Ok if weak. */
1978 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
1979 break;
1980
1981 pr_warn("%s: Unknown symbol %s (err %li)\n",
1982 mod->name, name, PTR_ERR(ksym));
1983 ret = PTR_ERR(ksym) ?: -ENOENT;
1984 break;
1985
1986 default:
1987 /* Divert to percpu allocation if a percpu var. */
1988 if (sym[i].st_shndx == info->index.pcpu)
1989 secbase = (unsigned long)mod_percpu(mod);
1990 else
1991 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
1992 sym[i].st_value += secbase;
1993 break;
1994 }
1995 }
1996
1997 return ret;
1998 }
1999
2000 static int apply_relocations(struct module *mod, const struct load_info *info)
2001 {
2002 unsigned int i;
2003 int err = 0;
2004
2005 /* Now do relocations. */
2006 for (i = 1; i < info->hdr->e_shnum; i++) {
2007 unsigned int infosec = info->sechdrs[i].sh_info;
2008
2009 /* Not a valid relocation section? */
2010 if (infosec >= info->hdr->e_shnum)
2011 continue;
2012
2013 /* Don't bother with non-allocated sections */
2014 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2015 continue;
2016
2017 if (info->sechdrs[i].sh_type == SHT_REL)
2018 err = apply_relocate(info->sechdrs, info->strtab,
2019 info->index.sym, i, mod);
2020 else if (info->sechdrs[i].sh_type == SHT_RELA)
2021 err = apply_relocate_add(info->sechdrs, info->strtab,
2022 info->index.sym, i, mod);
2023 if (err < 0)
2024 break;
2025 }
2026 return err;
2027 }
2028
2029 /* Additional bytes needed by arch in front of individual sections */
2030 unsigned int __weak arch_mod_section_prepend(struct module *mod,
2031 unsigned int section)
2032 {
2033 /* default implementation just returns zero */
2034 return 0;
2035 }
2036
2037 /* Update size with this section: return offset. */
2038 static long get_offset(struct module *mod, unsigned int *size,
2039 Elf_Shdr *sechdr, unsigned int section)
2040 {
2041 long ret;
2042
2043 *size += arch_mod_section_prepend(mod, section);
2044 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2045 *size = ret + sechdr->sh_size;
2046 return ret;
2047 }
2048
2049 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2050 might -- code, read-only data, read-write data, small data. Tally
2051 sizes, and place the offsets into sh_entsize fields: high bit means it
2052 belongs in init. */
2053 static void layout_sections(struct module *mod, struct load_info *info)
2054 {
2055 static unsigned long const masks[][2] = {
2056 /* NOTE: all executable code must be the first section
2057 * in this array; otherwise modify the text_size
2058 * finder in the two loops below */
2059 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2060 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2061 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2062 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2063 };
2064 unsigned int m, i;
2065
2066 for (i = 0; i < info->hdr->e_shnum; i++)
2067 info->sechdrs[i].sh_entsize = ~0UL;
2068
2069 pr_debug("Core section allocation order:\n");
2070 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2071 for (i = 0; i < info->hdr->e_shnum; ++i) {
2072 Elf_Shdr *s = &info->sechdrs[i];
2073 const char *sname = info->secstrings + s->sh_name;
2074
2075 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2076 || (s->sh_flags & masks[m][1])
2077 || s->sh_entsize != ~0UL
2078 || strstarts(sname, ".init"))
2079 continue;
2080 s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
2081 pr_debug("\t%s\n", sname);
2082 }
2083 switch (m) {
2084 case 0: /* executable */
2085 mod->core_size = debug_align(mod->core_size);
2086 mod->core_text_size = mod->core_size;
2087 break;
2088 case 1: /* RO: text and ro-data */
2089 mod->core_size = debug_align(mod->core_size);
2090 mod->core_ro_size = mod->core_size;
2091 break;
2092 case 3: /* whole core */
2093 mod->core_size = debug_align(mod->core_size);
2094 break;
2095 }
2096 }
2097
2098 pr_debug("Init section allocation order:\n");
2099 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2100 for (i = 0; i < info->hdr->e_shnum; ++i) {
2101 Elf_Shdr *s = &info->sechdrs[i];
2102 const char *sname = info->secstrings + s->sh_name;
2103
2104 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2105 || (s->sh_flags & masks[m][1])
2106 || s->sh_entsize != ~0UL
2107 || !strstarts(sname, ".init"))
2108 continue;
2109 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
2110 | INIT_OFFSET_MASK);
2111 pr_debug("\t%s\n", sname);
2112 }
2113 switch (m) {
2114 case 0: /* executable */
2115 mod->init_size = debug_align(mod->init_size);
2116 mod->init_text_size = mod->init_size;
2117 break;
2118 case 1: /* RO: text and ro-data */
2119 mod->init_size = debug_align(mod->init_size);
2120 mod->init_ro_size = mod->init_size;
2121 break;
2122 case 3: /* whole init */
2123 mod->init_size = debug_align(mod->init_size);
2124 break;
2125 }
2126 }
2127 }
2128
2129 static void set_license(struct module *mod, const char *license)
2130 {
2131 if (!license)
2132 license = "unspecified";
2133
2134 if (!license_is_gpl_compatible(license)) {
2135 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2136 pr_warn("%s: module license '%s' taints kernel.\n",
2137 mod->name, license);
2138 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2139 LOCKDEP_NOW_UNRELIABLE);
2140 }
2141 }
2142
2143 /* Parse tag=value strings from .modinfo section */
2144 static char *next_string(char *string, unsigned long *secsize)
2145 {
2146 /* Skip non-zero chars */
2147 while (string[0]) {
2148 string++;
2149 if ((*secsize)-- <= 1)
2150 return NULL;
2151 }
2152
2153 /* Skip any zero padding. */
2154 while (!string[0]) {
2155 string++;
2156 if ((*secsize)-- <= 1)
2157 return NULL;
2158 }
2159 return string;
2160 }
2161
2162 static char *get_modinfo(struct load_info *info, const char *tag)
2163 {
2164 char *p;
2165 unsigned int taglen = strlen(tag);
2166 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2167 unsigned long size = infosec->sh_size;
2168
2169 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2170 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2171 return p + taglen + 1;
2172 }
2173 return NULL;
2174 }
2175
2176 static void setup_modinfo(struct module *mod, struct load_info *info)
2177 {
2178 struct module_attribute *attr;
2179 int i;
2180
2181 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2182 if (attr->setup)
2183 attr->setup(mod, get_modinfo(info, attr->attr.name));
2184 }
2185 }
2186
2187 static void free_modinfo(struct module *mod)
2188 {
2189 struct module_attribute *attr;
2190 int i;
2191
2192 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2193 if (attr->free)
2194 attr->free(mod);
2195 }
2196 }
2197
2198 #ifdef CONFIG_KALLSYMS
2199
2200 /* lookup symbol in given range of kernel_symbols */
2201 static const struct kernel_symbol *lookup_symbol(const char *name,
2202 const struct kernel_symbol *start,
2203 const struct kernel_symbol *stop)
2204 {
2205 return bsearch(name, start, stop - start,
2206 sizeof(struct kernel_symbol), cmp_name);
2207 }
2208
2209 static int is_exported(const char *name, unsigned long value,
2210 const struct module *mod)
2211 {
2212 const struct kernel_symbol *ks;
2213 if (!mod)
2214 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2215 else
2216 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2217 return ks != NULL && ks->value == value;
2218 }
2219
2220 /* As per nm */
2221 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2222 {
2223 const Elf_Shdr *sechdrs = info->sechdrs;
2224
2225 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2226 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2227 return 'v';
2228 else
2229 return 'w';
2230 }
2231 if (sym->st_shndx == SHN_UNDEF)
2232 return 'U';
2233 if (sym->st_shndx == SHN_ABS)
2234 return 'a';
2235 if (sym->st_shndx >= SHN_LORESERVE)
2236 return '?';
2237 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2238 return 't';
2239 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2240 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2241 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2242 return 'r';
2243 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2244 return 'g';
2245 else
2246 return 'd';
2247 }
2248 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2249 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2250 return 's';
2251 else
2252 return 'b';
2253 }
2254 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2255 ".debug")) {
2256 return 'n';
2257 }
2258 return '?';
2259 }
2260
2261 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2262 unsigned int shnum)
2263 {
2264 const Elf_Shdr *sec;
2265
2266 if (src->st_shndx == SHN_UNDEF
2267 || src->st_shndx >= shnum
2268 || !src->st_name)
2269 return false;
2270
2271 sec = sechdrs + src->st_shndx;
2272 if (!(sec->sh_flags & SHF_ALLOC)
2273 #ifndef CONFIG_KALLSYMS_ALL
2274 || !(sec->sh_flags & SHF_EXECINSTR)
2275 #endif
2276 || (sec->sh_entsize & INIT_OFFSET_MASK))
2277 return false;
2278
2279 return true;
2280 }
2281
2282 /*
2283 * We only allocate and copy the strings needed by the parts of symtab
2284 * we keep. This is simple, but has the effect of making multiple
2285 * copies of duplicates. We could be more sophisticated, see
2286 * linux-kernel thread starting with
2287 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2288 */
2289 static void layout_symtab(struct module *mod, struct load_info *info)
2290 {
2291 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2292 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2293 const Elf_Sym *src;
2294 unsigned int i, nsrc, ndst, strtab_size = 0;
2295
2296 /* Put symbol section at end of init part of module. */
2297 symsect->sh_flags |= SHF_ALLOC;
2298 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
2299 info->index.sym) | INIT_OFFSET_MASK;
2300 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2301
2302 src = (void *)info->hdr + symsect->sh_offset;
2303 nsrc = symsect->sh_size / sizeof(*src);
2304
2305 /* Compute total space required for the core symbols' strtab. */
2306 for (ndst = i = 0; i < nsrc; i++) {
2307 if (i == 0 ||
2308 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2309 strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2310 ndst++;
2311 }
2312 }
2313
2314 /* Append room for core symbols at end of core part. */
2315 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2316 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2317 mod->core_size += strtab_size;
2318
2319 /* Put string table section at end of init part of module. */
2320 strsect->sh_flags |= SHF_ALLOC;
2321 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2322 info->index.str) | INIT_OFFSET_MASK;
2323 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2324 }
2325
2326 static void add_kallsyms(struct module *mod, const struct load_info *info)
2327 {
2328 unsigned int i, ndst;
2329 const Elf_Sym *src;
2330 Elf_Sym *dst;
2331 char *s;
2332 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2333
2334 mod->symtab = (void *)symsec->sh_addr;
2335 mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2336 /* Make sure we get permanent strtab: don't use info->strtab. */
2337 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2338
2339 /* Set types up while we still have access to sections. */
2340 for (i = 0; i < mod->num_symtab; i++)
2341 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2342
2343 mod->core_symtab = dst = mod->module_core + info->symoffs;
2344 mod->core_strtab = s = mod->module_core + info->stroffs;
2345 src = mod->symtab;
2346 for (ndst = i = 0; i < mod->num_symtab; i++) {
2347 if (i == 0 ||
2348 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2349 dst[ndst] = src[i];
2350 dst[ndst++].st_name = s - mod->core_strtab;
2351 s += strlcpy(s, &mod->strtab[src[i].st_name],
2352 KSYM_NAME_LEN) + 1;
2353 }
2354 }
2355 mod->core_num_syms = ndst;
2356 }
2357 #else
2358 static inline void layout_symtab(struct module *mod, struct load_info *info)
2359 {
2360 }
2361
2362 static void add_kallsyms(struct module *mod, const struct load_info *info)
2363 {
2364 }
2365 #endif /* CONFIG_KALLSYMS */
2366
2367 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2368 {
2369 if (!debug)
2370 return;
2371 #ifdef CONFIG_DYNAMIC_DEBUG
2372 if (ddebug_add_module(debug, num, debug->modname))
2373 pr_err("dynamic debug error adding module: %s\n",
2374 debug->modname);
2375 #endif
2376 }
2377
2378 static void dynamic_debug_remove(struct _ddebug *debug)
2379 {
2380 if (debug)
2381 ddebug_remove_module(debug->modname);
2382 }
2383
2384 void * __weak module_alloc(unsigned long size)
2385 {
2386 return vmalloc_exec(size);
2387 }
2388
2389 static void *module_alloc_update_bounds(unsigned long size)
2390 {
2391 void *ret = module_alloc(size);
2392
2393 if (ret) {
2394 mutex_lock(&module_mutex);
2395 /* Update module bounds. */
2396 if ((unsigned long)ret < module_addr_min)
2397 module_addr_min = (unsigned long)ret;
2398 if ((unsigned long)ret + size > module_addr_max)
2399 module_addr_max = (unsigned long)ret + size;
2400 mutex_unlock(&module_mutex);
2401 }
2402 return ret;
2403 }
2404
2405 #ifdef CONFIG_DEBUG_KMEMLEAK
2406 static void kmemleak_load_module(const struct module *mod,
2407 const struct load_info *info)
2408 {
2409 unsigned int i;
2410
2411 /* only scan the sections containing data */
2412 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2413
2414 for (i = 1; i < info->hdr->e_shnum; i++) {
2415 /* Scan all writable sections that's not executable */
2416 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2417 !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2418 (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2419 continue;
2420
2421 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2422 info->sechdrs[i].sh_size, GFP_KERNEL);
2423 }
2424 }
2425 #else
2426 static inline void kmemleak_load_module(const struct module *mod,
2427 const struct load_info *info)
2428 {
2429 }
2430 #endif
2431
2432 #ifdef CONFIG_MODULE_SIG
2433 static int module_sig_check(struct load_info *info)
2434 {
2435 int err = -ENOKEY;
2436 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2437 const void *mod = info->hdr;
2438
2439 if (info->len > markerlen &&
2440 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2441 /* We truncate the module to discard the signature */
2442 info->len -= markerlen;
2443 err = mod_verify_sig(mod, &info->len);
2444 }
2445
2446 if (!err) {
2447 info->sig_ok = true;
2448 return 0;
2449 }
2450
2451 /* Not having a signature is only an error if we're strict. */
2452 if (err < 0 && fips_enabled)
2453 panic("Module verification failed with error %d in FIPS mode\n",
2454 err);
2455 if (err == -ENOKEY && !sig_enforce)
2456 err = 0;
2457
2458 return err;
2459 }
2460 #else /* !CONFIG_MODULE_SIG */
2461 static int module_sig_check(struct load_info *info)
2462 {
2463 return 0;
2464 }
2465 #endif /* !CONFIG_MODULE_SIG */
2466
2467 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2468 static int elf_header_check(struct load_info *info)
2469 {
2470 if (info->len < sizeof(*(info->hdr)))
2471 return -ENOEXEC;
2472
2473 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2474 || info->hdr->e_type != ET_REL
2475 || !elf_check_arch(info->hdr)
2476 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2477 return -ENOEXEC;
2478
2479 if (info->hdr->e_shoff >= info->len
2480 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2481 info->len - info->hdr->e_shoff))
2482 return -ENOEXEC;
2483
2484 return 0;
2485 }
2486
2487 /* Sets info->hdr and info->len. */
2488 static int copy_module_from_user(const void __user *umod, unsigned long len,
2489 struct load_info *info)
2490 {
2491 int err;
2492
2493 info->len = len;
2494 if (info->len < sizeof(*(info->hdr)))
2495 return -ENOEXEC;
2496
2497 err = security_kernel_module_from_file(NULL);
2498 if (err)
2499 return err;
2500
2501 /* Suck in entire file: we'll want most of it. */
2502 info->hdr = vmalloc(info->len);
2503 if (!info->hdr)
2504 return -ENOMEM;
2505
2506 if (copy_from_user(info->hdr, umod, info->len) != 0) {
2507 vfree(info->hdr);
2508 return -EFAULT;
2509 }
2510
2511 return 0;
2512 }
2513
2514 /* Sets info->hdr and info->len. */
2515 static int copy_module_from_fd(int fd, struct load_info *info)
2516 {
2517 struct fd f = fdget(fd);
2518 int err;
2519 struct kstat stat;
2520 loff_t pos;
2521 ssize_t bytes = 0;
2522
2523 if (!f.file)
2524 return -ENOEXEC;
2525
2526 err = security_kernel_module_from_file(f.file);
2527 if (err)
2528 goto out;
2529
2530 err = vfs_getattr(&f.file->f_path, &stat);
2531 if (err)
2532 goto out;
2533
2534 if (stat.size > INT_MAX) {
2535 err = -EFBIG;
2536 goto out;
2537 }
2538
2539 /* Don't hand 0 to vmalloc, it whines. */
2540 if (stat.size == 0) {
2541 err = -EINVAL;
2542 goto out;
2543 }
2544
2545 info->hdr = vmalloc(stat.size);
2546 if (!info->hdr) {
2547 err = -ENOMEM;
2548 goto out;
2549 }
2550
2551 pos = 0;
2552 while (pos < stat.size) {
2553 bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos,
2554 stat.size - pos);
2555 if (bytes < 0) {
2556 vfree(info->hdr);
2557 err = bytes;
2558 goto out;
2559 }
2560 if (bytes == 0)
2561 break;
2562 pos += bytes;
2563 }
2564 info->len = pos;
2565
2566 out:
2567 fdput(f);
2568 return err;
2569 }
2570
2571 static void free_copy(struct load_info *info)
2572 {
2573 vfree(info->hdr);
2574 }
2575
2576 static int rewrite_section_headers(struct load_info *info, int flags)
2577 {
2578 unsigned int i;
2579
2580 /* This should always be true, but let's be sure. */
2581 info->sechdrs[0].sh_addr = 0;
2582
2583 for (i = 1; i < info->hdr->e_shnum; i++) {
2584 Elf_Shdr *shdr = &info->sechdrs[i];
2585 if (shdr->sh_type != SHT_NOBITS
2586 && info->len < shdr->sh_offset + shdr->sh_size) {
2587 pr_err("Module len %lu truncated\n", info->len);
2588 return -ENOEXEC;
2589 }
2590
2591 /* Mark all sections sh_addr with their address in the
2592 temporary image. */
2593 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2594
2595 #ifndef CONFIG_MODULE_UNLOAD
2596 /* Don't load .exit sections */
2597 if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2598 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2599 #endif
2600 }
2601
2602 /* Track but don't keep modinfo and version sections. */
2603 if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2604 info->index.vers = 0; /* Pretend no __versions section! */
2605 else
2606 info->index.vers = find_sec(info, "__versions");
2607 info->index.info = find_sec(info, ".modinfo");
2608 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2609 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2610 return 0;
2611 }
2612
2613 /*
2614 * Set up our basic convenience variables (pointers to section headers,
2615 * search for module section index etc), and do some basic section
2616 * verification.
2617 *
2618 * Return the temporary module pointer (we'll replace it with the final
2619 * one when we move the module sections around).
2620 */
2621 static struct module *setup_load_info(struct load_info *info, int flags)
2622 {
2623 unsigned int i;
2624 int err;
2625 struct module *mod;
2626
2627 /* Set up the convenience variables */
2628 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2629 info->secstrings = (void *)info->hdr
2630 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2631
2632 err = rewrite_section_headers(info, flags);
2633 if (err)
2634 return ERR_PTR(err);
2635
2636 /* Find internal symbols and strings. */
2637 for (i = 1; i < info->hdr->e_shnum; i++) {
2638 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2639 info->index.sym = i;
2640 info->index.str = info->sechdrs[i].sh_link;
2641 info->strtab = (char *)info->hdr
2642 + info->sechdrs[info->index.str].sh_offset;
2643 break;
2644 }
2645 }
2646
2647 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2648 if (!info->index.mod) {
2649 pr_warn("No module found in object\n");
2650 return ERR_PTR(-ENOEXEC);
2651 }
2652 /* This is temporary: point mod into copy of data. */
2653 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2654
2655 if (info->index.sym == 0) {
2656 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
2657 return ERR_PTR(-ENOEXEC);
2658 }
2659
2660 info->index.pcpu = find_pcpusec(info);
2661
2662 /* Check module struct version now, before we try to use module. */
2663 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2664 return ERR_PTR(-ENOEXEC);
2665
2666 return mod;
2667 }
2668
2669 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2670 {
2671 const char *modmagic = get_modinfo(info, "vermagic");
2672 int err;
2673
2674 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2675 modmagic = NULL;
2676
2677 /* This is allowed: modprobe --force will invalidate it. */
2678 if (!modmagic) {
2679 err = try_to_force_load(mod, "bad vermagic");
2680 if (err)
2681 return err;
2682 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2683 pr_err("%s: version magic '%s' should be '%s'\n",
2684 mod->name, modmagic, vermagic);
2685 return -ENOEXEC;
2686 }
2687
2688 if (!get_modinfo(info, "intree"))
2689 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
2690
2691 if (get_modinfo(info, "staging")) {
2692 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
2693 pr_warn("%s: module is from the staging directory, the quality "
2694 "is unknown, you have been warned.\n", mod->name);
2695 }
2696
2697 /* Set up license info based on the info section */
2698 set_license(mod, get_modinfo(info, "license"));
2699
2700 return 0;
2701 }
2702
2703 static int find_module_sections(struct module *mod, struct load_info *info)
2704 {
2705 mod->kp = section_objs(info, "__param",
2706 sizeof(*mod->kp), &mod->num_kp);
2707 mod->syms = section_objs(info, "__ksymtab",
2708 sizeof(*mod->syms), &mod->num_syms);
2709 mod->crcs = section_addr(info, "__kcrctab");
2710 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2711 sizeof(*mod->gpl_syms),
2712 &mod->num_gpl_syms);
2713 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2714 mod->gpl_future_syms = section_objs(info,
2715 "__ksymtab_gpl_future",
2716 sizeof(*mod->gpl_future_syms),
2717 &mod->num_gpl_future_syms);
2718 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2719
2720 #ifdef CONFIG_UNUSED_SYMBOLS
2721 mod->unused_syms = section_objs(info, "__ksymtab_unused",
2722 sizeof(*mod->unused_syms),
2723 &mod->num_unused_syms);
2724 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2725 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2726 sizeof(*mod->unused_gpl_syms),
2727 &mod->num_unused_gpl_syms);
2728 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2729 #endif
2730 #ifdef CONFIG_CONSTRUCTORS
2731 mod->ctors = section_objs(info, ".ctors",
2732 sizeof(*mod->ctors), &mod->num_ctors);
2733 if (!mod->ctors)
2734 mod->ctors = section_objs(info, ".init_array",
2735 sizeof(*mod->ctors), &mod->num_ctors);
2736 else if (find_sec(info, ".init_array")) {
2737 /*
2738 * This shouldn't happen with same compiler and binutils
2739 * building all parts of the module.
2740 */
2741 printk(KERN_WARNING "%s: has both .ctors and .init_array.\n",
2742 mod->name);
2743 return -EINVAL;
2744 }
2745 #endif
2746
2747 #ifdef CONFIG_TRACEPOINTS
2748 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2749 sizeof(*mod->tracepoints_ptrs),
2750 &mod->num_tracepoints);
2751 #endif
2752 #ifdef HAVE_JUMP_LABEL
2753 mod->jump_entries = section_objs(info, "__jump_table",
2754 sizeof(*mod->jump_entries),
2755 &mod->num_jump_entries);
2756 #endif
2757 #ifdef CONFIG_EVENT_TRACING
2758 mod->trace_events = section_objs(info, "_ftrace_events",
2759 sizeof(*mod->trace_events),
2760 &mod->num_trace_events);
2761 #endif
2762 #ifdef CONFIG_TRACING
2763 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2764 sizeof(*mod->trace_bprintk_fmt_start),
2765 &mod->num_trace_bprintk_fmt);
2766 #endif
2767 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2768 /* sechdrs[0].sh_size is always zero */
2769 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2770 sizeof(*mod->ftrace_callsites),
2771 &mod->num_ftrace_callsites);
2772 #endif
2773
2774 mod->extable = section_objs(info, "__ex_table",
2775 sizeof(*mod->extable), &mod->num_exentries);
2776
2777 if (section_addr(info, "__obsparm"))
2778 pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
2779
2780 info->debug = section_objs(info, "__verbose",
2781 sizeof(*info->debug), &info->num_debug);
2782
2783 return 0;
2784 }
2785
2786 static int move_module(struct module *mod, struct load_info *info)
2787 {
2788 int i;
2789 void *ptr;
2790
2791 /* Do the allocs. */
2792 ptr = module_alloc_update_bounds(mod->core_size);
2793 /*
2794 * The pointer to this block is stored in the module structure
2795 * which is inside the block. Just mark it as not being a
2796 * leak.
2797 */
2798 kmemleak_not_leak(ptr);
2799 if (!ptr)
2800 return -ENOMEM;
2801
2802 memset(ptr, 0, mod->core_size);
2803 mod->module_core = ptr;
2804
2805 if (mod->init_size) {
2806 ptr = module_alloc_update_bounds(mod->init_size);
2807 /*
2808 * The pointer to this block is stored in the module structure
2809 * which is inside the block. This block doesn't need to be
2810 * scanned as it contains data and code that will be freed
2811 * after the module is initialized.
2812 */
2813 kmemleak_ignore(ptr);
2814 if (!ptr) {
2815 module_free(mod, mod->module_core);
2816 return -ENOMEM;
2817 }
2818 memset(ptr, 0, mod->init_size);
2819 mod->module_init = ptr;
2820 } else
2821 mod->module_init = NULL;
2822
2823 /* Transfer each section which specifies SHF_ALLOC */
2824 pr_debug("final section addresses:\n");
2825 for (i = 0; i < info->hdr->e_shnum; i++) {
2826 void *dest;
2827 Elf_Shdr *shdr = &info->sechdrs[i];
2828
2829 if (!(shdr->sh_flags & SHF_ALLOC))
2830 continue;
2831
2832 if (shdr->sh_entsize & INIT_OFFSET_MASK)
2833 dest = mod->module_init
2834 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
2835 else
2836 dest = mod->module_core + shdr->sh_entsize;
2837
2838 if (shdr->sh_type != SHT_NOBITS)
2839 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
2840 /* Update sh_addr to point to copy in image. */
2841 shdr->sh_addr = (unsigned long)dest;
2842 pr_debug("\t0x%lx %s\n",
2843 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
2844 }
2845
2846 return 0;
2847 }
2848
2849 static int check_module_license_and_versions(struct module *mod)
2850 {
2851 /*
2852 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2853 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2854 * using GPL-only symbols it needs.
2855 */
2856 if (strcmp(mod->name, "ndiswrapper") == 0)
2857 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
2858
2859 /* driverloader was caught wrongly pretending to be under GPL */
2860 if (strcmp(mod->name, "driverloader") == 0)
2861 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2862 LOCKDEP_NOW_UNRELIABLE);
2863
2864 /* lve claims to be GPL but upstream won't provide source */
2865 if (strcmp(mod->name, "lve") == 0)
2866 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2867 LOCKDEP_NOW_UNRELIABLE);
2868
2869 #ifdef CONFIG_MODVERSIONS
2870 if ((mod->num_syms && !mod->crcs)
2871 || (mod->num_gpl_syms && !mod->gpl_crcs)
2872 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
2873 #ifdef CONFIG_UNUSED_SYMBOLS
2874 || (mod->num_unused_syms && !mod->unused_crcs)
2875 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
2876 #endif
2877 ) {
2878 return try_to_force_load(mod,
2879 "no versions for exported symbols");
2880 }
2881 #endif
2882 return 0;
2883 }
2884
2885 static void flush_module_icache(const struct module *mod)
2886 {
2887 mm_segment_t old_fs;
2888
2889 /* flush the icache in correct context */
2890 old_fs = get_fs();
2891 set_fs(KERNEL_DS);
2892
2893 /*
2894 * Flush the instruction cache, since we've played with text.
2895 * Do it before processing of module parameters, so the module
2896 * can provide parameter accessor functions of its own.
2897 */
2898 if (mod->module_init)
2899 flush_icache_range((unsigned long)mod->module_init,
2900 (unsigned long)mod->module_init
2901 + mod->init_size);
2902 flush_icache_range((unsigned long)mod->module_core,
2903 (unsigned long)mod->module_core + mod->core_size);
2904
2905 set_fs(old_fs);
2906 }
2907
2908 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
2909 Elf_Shdr *sechdrs,
2910 char *secstrings,
2911 struct module *mod)
2912 {
2913 return 0;
2914 }
2915
2916 static struct module *layout_and_allocate(struct load_info *info, int flags)
2917 {
2918 /* Module within temporary copy. */
2919 struct module *mod;
2920 int err;
2921
2922 mod = setup_load_info(info, flags);
2923 if (IS_ERR(mod))
2924 return mod;
2925
2926 err = check_modinfo(mod, info, flags);
2927 if (err)
2928 return ERR_PTR(err);
2929
2930 /* Allow arches to frob section contents and sizes. */
2931 err = module_frob_arch_sections(info->hdr, info->sechdrs,
2932 info->secstrings, mod);
2933 if (err < 0)
2934 return ERR_PTR(err);
2935
2936 /* We will do a special allocation for per-cpu sections later. */
2937 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
2938
2939 /* Determine total sizes, and put offsets in sh_entsize. For now
2940 this is done generically; there doesn't appear to be any
2941 special cases for the architectures. */
2942 layout_sections(mod, info);
2943 layout_symtab(mod, info);
2944
2945 /* Allocate and move to the final place */
2946 err = move_module(mod, info);
2947 if (err)
2948 return ERR_PTR(err);
2949
2950 /* Module has been copied to its final place now: return it. */
2951 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2952 kmemleak_load_module(mod, info);
2953 return mod;
2954 }
2955
2956 /* mod is no longer valid after this! */
2957 static void module_deallocate(struct module *mod, struct load_info *info)
2958 {
2959 percpu_modfree(mod);
2960 module_free(mod, mod->module_init);
2961 module_free(mod, mod->module_core);
2962 }
2963
2964 int __weak module_finalize(const Elf_Ehdr *hdr,
2965 const Elf_Shdr *sechdrs,
2966 struct module *me)
2967 {
2968 return 0;
2969 }
2970
2971 static int post_relocation(struct module *mod, const struct load_info *info)
2972 {
2973 /* Sort exception table now relocations are done. */
2974 sort_extable(mod->extable, mod->extable + mod->num_exentries);
2975
2976 /* Copy relocated percpu area over. */
2977 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
2978 info->sechdrs[info->index.pcpu].sh_size);
2979
2980 /* Setup kallsyms-specific fields. */
2981 add_kallsyms(mod, info);
2982
2983 /* Arch-specific module finalizing. */
2984 return module_finalize(info->hdr, info->sechdrs, mod);
2985 }
2986
2987 /* Is this module of this name done loading? No locks held. */
2988 static bool finished_loading(const char *name)
2989 {
2990 struct module *mod;
2991 bool ret;
2992
2993 mutex_lock(&module_mutex);
2994 mod = find_module_all(name, strlen(name), true);
2995 ret = !mod || mod->state == MODULE_STATE_LIVE
2996 || mod->state == MODULE_STATE_GOING;
2997 mutex_unlock(&module_mutex);
2998
2999 return ret;
3000 }
3001
3002 /* Call module constructors. */
3003 static void do_mod_ctors(struct module *mod)
3004 {
3005 #ifdef CONFIG_CONSTRUCTORS
3006 unsigned long i;
3007
3008 for (i = 0; i < mod->num_ctors; i++)
3009 mod->ctors[i]();
3010 #endif
3011 }
3012
3013 /* This is where the real work happens */
3014 static int do_init_module(struct module *mod)
3015 {
3016 int ret = 0;
3017
3018 /*
3019 * We want to find out whether @mod uses async during init. Clear
3020 * PF_USED_ASYNC. async_schedule*() will set it.
3021 */
3022 current->flags &= ~PF_USED_ASYNC;
3023
3024 blocking_notifier_call_chain(&module_notify_list,
3025 MODULE_STATE_COMING, mod);
3026
3027 /* Set RO and NX regions for core */
3028 set_section_ro_nx(mod->module_core,
3029 mod->core_text_size,
3030 mod->core_ro_size,
3031 mod->core_size);
3032
3033 /* Set RO and NX regions for init */
3034 set_section_ro_nx(mod->module_init,
3035 mod->init_text_size,
3036 mod->init_ro_size,
3037 mod->init_size);
3038
3039 do_mod_ctors(mod);
3040 /* Start the module */
3041 if (mod->init != NULL)
3042 ret = do_one_initcall(mod->init);
3043 if (ret < 0) {
3044 /* Init routine failed: abort. Try to protect us from
3045 buggy refcounters. */
3046 mod->state = MODULE_STATE_GOING;
3047 synchronize_sched();
3048 module_put(mod);
3049 blocking_notifier_call_chain(&module_notify_list,
3050 MODULE_STATE_GOING, mod);
3051 free_module(mod);
3052 wake_up_all(&module_wq);
3053 return ret;
3054 }
3055 if (ret > 0) {
3056 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3057 "follow 0/-E convention\n"
3058 "%s: loading module anyway...\n",
3059 __func__, mod->name, ret, __func__);
3060 dump_stack();
3061 }
3062
3063 /* Now it's a first class citizen! */
3064 mod->state = MODULE_STATE_LIVE;
3065 blocking_notifier_call_chain(&module_notify_list,
3066 MODULE_STATE_LIVE, mod);
3067
3068 /*
3069 * We need to finish all async code before the module init sequence
3070 * is done. This has potential to deadlock. For example, a newly
3071 * detected block device can trigger request_module() of the
3072 * default iosched from async probing task. Once userland helper
3073 * reaches here, async_synchronize_full() will wait on the async
3074 * task waiting on request_module() and deadlock.
3075 *
3076 * This deadlock is avoided by perfomring async_synchronize_full()
3077 * iff module init queued any async jobs. This isn't a full
3078 * solution as it will deadlock the same if module loading from
3079 * async jobs nests more than once; however, due to the various
3080 * constraints, this hack seems to be the best option for now.
3081 * Please refer to the following thread for details.
3082 *
3083 * http://thread.gmane.org/gmane.linux.kernel/1420814
3084 */
3085 if (current->flags & PF_USED_ASYNC)
3086 async_synchronize_full();
3087
3088 mutex_lock(&module_mutex);
3089 /* Drop initial reference. */
3090 module_put(mod);
3091 trim_init_extable(mod);
3092 #ifdef CONFIG_KALLSYMS
3093 mod->num_symtab = mod->core_num_syms;
3094 mod->symtab = mod->core_symtab;
3095 mod->strtab = mod->core_strtab;
3096 #endif
3097 unset_module_init_ro_nx(mod);
3098 module_free(mod, mod->module_init);
3099 mod->module_init = NULL;
3100 mod->init_size = 0;
3101 mod->init_ro_size = 0;
3102 mod->init_text_size = 0;
3103 mutex_unlock(&module_mutex);
3104 wake_up_all(&module_wq);
3105
3106 return 0;
3107 }
3108
3109 static int may_init_module(void)
3110 {
3111 if (!capable(CAP_SYS_MODULE) || modules_disabled)
3112 return -EPERM;
3113
3114 return 0;
3115 }
3116
3117 /*
3118 * We try to place it in the list now to make sure it's unique before
3119 * we dedicate too many resources. In particular, temporary percpu
3120 * memory exhaustion.
3121 */
3122 static int add_unformed_module(struct module *mod)
3123 {
3124 int err;
3125 struct module *old;
3126
3127 mod->state = MODULE_STATE_UNFORMED;
3128
3129 again:
3130 mutex_lock(&module_mutex);
3131 old = find_module_all(mod->name, strlen(mod->name), true);
3132 if (old != NULL) {
3133 if (old->state == MODULE_STATE_COMING
3134 || old->state == MODULE_STATE_UNFORMED) {
3135 /* Wait in case it fails to load. */
3136 mutex_unlock(&module_mutex);
3137 err = wait_event_interruptible(module_wq,
3138 finished_loading(mod->name));
3139 if (err)
3140 goto out_unlocked;
3141 goto again;
3142 }
3143 err = -EEXIST;
3144 goto out;
3145 }
3146 list_add_rcu(&mod->list, &modules);
3147 err = 0;
3148
3149 out:
3150 mutex_unlock(&module_mutex);
3151 out_unlocked:
3152 return err;
3153 }
3154
3155 static int complete_formation(struct module *mod, struct load_info *info)
3156 {
3157 int err;
3158
3159 mutex_lock(&module_mutex);
3160
3161 /* Find duplicate symbols (must be called under lock). */
3162 err = verify_export_symbols(mod);
3163 if (err < 0)
3164 goto out;
3165
3166 /* This relies on module_mutex for list integrity. */
3167 module_bug_finalize(info->hdr, info->sechdrs, mod);
3168
3169 /* Mark state as coming so strong_try_module_get() ignores us,
3170 * but kallsyms etc. can see us. */
3171 mod->state = MODULE_STATE_COMING;
3172
3173 out:
3174 mutex_unlock(&module_mutex);
3175 return err;
3176 }
3177
3178 static int unknown_module_param_cb(char *param, char *val, const char *modname)
3179 {
3180 /* Check for magic 'dyndbg' arg */
3181 int ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3182 if (ret != 0)
3183 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3184 return 0;
3185 }
3186
3187 /* Allocate and load the module: note that size of section 0 is always
3188 zero, and we rely on this for optional sections. */
3189 static int load_module(struct load_info *info, const char __user *uargs,
3190 int flags)
3191 {
3192 struct module *mod;
3193 long err;
3194
3195 err = module_sig_check(info);
3196 if (err)
3197 goto free_copy;
3198
3199 err = elf_header_check(info);
3200 if (err)
3201 goto free_copy;
3202
3203 /* Figure out module layout, and allocate all the memory. */
3204 mod = layout_and_allocate(info, flags);
3205 if (IS_ERR(mod)) {
3206 err = PTR_ERR(mod);
3207 goto free_copy;
3208 }
3209
3210 /* Reserve our place in the list. */
3211 err = add_unformed_module(mod);
3212 if (err)
3213 goto free_module;
3214
3215 #ifdef CONFIG_MODULE_SIG
3216 mod->sig_ok = info->sig_ok;
3217 if (!mod->sig_ok) {
3218 pr_notice_once("%s: module verification failed: signature "
3219 "and/or required key missing - tainting "
3220 "kernel\n", mod->name);
3221 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_STILL_OK);
3222 }
3223 #endif
3224
3225 /* To avoid stressing percpu allocator, do this once we're unique. */
3226 err = percpu_modalloc(mod, info);
3227 if (err)
3228 goto unlink_mod;
3229
3230 /* Now module is in final location, initialize linked lists, etc. */
3231 err = module_unload_init(mod);
3232 if (err)
3233 goto unlink_mod;
3234
3235 /* Now we've got everything in the final locations, we can
3236 * find optional sections. */
3237 err = find_module_sections(mod, info);
3238 if (err)
3239 goto free_unload;
3240
3241 err = check_module_license_and_versions(mod);
3242 if (err)
3243 goto free_unload;
3244
3245 /* Set up MODINFO_ATTR fields */
3246 setup_modinfo(mod, info);
3247
3248 /* Fix up syms, so that st_value is a pointer to location. */
3249 err = simplify_symbols(mod, info);
3250 if (err < 0)
3251 goto free_modinfo;
3252
3253 err = apply_relocations(mod, info);
3254 if (err < 0)
3255 goto free_modinfo;
3256
3257 err = post_relocation(mod, info);
3258 if (err < 0)
3259 goto free_modinfo;
3260
3261 flush_module_icache(mod);
3262
3263 /* Now copy in args */
3264 mod->args = strndup_user(uargs, ~0UL >> 1);
3265 if (IS_ERR(mod->args)) {
3266 err = PTR_ERR(mod->args);
3267 goto free_arch_cleanup;
3268 }
3269
3270 dynamic_debug_setup(info->debug, info->num_debug);
3271
3272 /* Finally it's fully formed, ready to start executing. */
3273 err = complete_formation(mod, info);
3274 if (err)
3275 goto ddebug_cleanup;
3276
3277 /* Module is ready to execute: parsing args may do that. */
3278 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3279 -32768, 32767, unknown_module_param_cb);
3280 if (err < 0)
3281 goto bug_cleanup;
3282
3283 /* Link in to syfs. */
3284 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3285 if (err < 0)
3286 goto bug_cleanup;
3287
3288 /* Get rid of temporary copy. */
3289 free_copy(info);
3290
3291 /* Done! */
3292 trace_module_load(mod);
3293
3294 return do_init_module(mod);
3295
3296 bug_cleanup:
3297 /* module_bug_cleanup needs module_mutex protection */
3298 mutex_lock(&module_mutex);
3299 module_bug_cleanup(mod);
3300 mutex_unlock(&module_mutex);
3301 ddebug_cleanup:
3302 dynamic_debug_remove(info->debug);
3303 synchronize_sched();
3304 kfree(mod->args);
3305 free_arch_cleanup:
3306 module_arch_cleanup(mod);
3307 free_modinfo:
3308 free_modinfo(mod);
3309 free_unload:
3310 module_unload_free(mod);
3311 unlink_mod:
3312 mutex_lock(&module_mutex);
3313 /* Unlink carefully: kallsyms could be walking list. */
3314 list_del_rcu(&mod->list);
3315 wake_up_all(&module_wq);
3316 mutex_unlock(&module_mutex);
3317 free_module:
3318 module_deallocate(mod, info);
3319 free_copy:
3320 free_copy(info);
3321 return err;
3322 }
3323
3324 SYSCALL_DEFINE3(init_module, void __user *, umod,
3325 unsigned long, len, const char __user *, uargs)
3326 {
3327 int err;
3328 struct load_info info = { };
3329
3330 err = may_init_module();
3331 if (err)
3332 return err;
3333
3334 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3335 umod, len, uargs);
3336
3337 err = copy_module_from_user(umod, len, &info);
3338 if (err)
3339 return err;
3340
3341 return load_module(&info, uargs, 0);
3342 }
3343
3344 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3345 {
3346 int err;
3347 struct load_info info = { };
3348
3349 err = may_init_module();
3350 if (err)
3351 return err;
3352
3353 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3354
3355 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3356 |MODULE_INIT_IGNORE_VERMAGIC))
3357 return -EINVAL;
3358
3359 err = copy_module_from_fd(fd, &info);
3360 if (err)
3361 return err;
3362
3363 return load_module(&info, uargs, flags);
3364 }
3365
3366 static inline int within(unsigned long addr, void *start, unsigned long size)
3367 {
3368 return ((void *)addr >= start && (void *)addr < start + size);
3369 }
3370
3371 #ifdef CONFIG_KALLSYMS
3372 /*
3373 * This ignores the intensely annoying "mapping symbols" found
3374 * in ARM ELF files: $a, $t and $d.
3375 */
3376 static inline int is_arm_mapping_symbol(const char *str)
3377 {
3378 return str[0] == '$' && strchr("atd", str[1])
3379 && (str[2] == '\0' || str[2] == '.');
3380 }
3381
3382 static const char *get_ksymbol(struct module *mod,
3383 unsigned long addr,
3384 unsigned long *size,
3385 unsigned long *offset)
3386 {
3387 unsigned int i, best = 0;
3388 unsigned long nextval;
3389
3390 /* At worse, next value is at end of module */
3391 if (within_module_init(addr, mod))
3392 nextval = (unsigned long)mod->module_init+mod->init_text_size;
3393 else
3394 nextval = (unsigned long)mod->module_core+mod->core_text_size;
3395
3396 /* Scan for closest preceding symbol, and next symbol. (ELF
3397 starts real symbols at 1). */
3398 for (i = 1; i < mod->num_symtab; i++) {
3399 if (mod->symtab[i].st_shndx == SHN_UNDEF)
3400 continue;
3401
3402 /* We ignore unnamed symbols: they're uninformative
3403 * and inserted at a whim. */
3404 if (mod->symtab[i].st_value <= addr
3405 && mod->symtab[i].st_value > mod->symtab[best].st_value
3406 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3407 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3408 best = i;
3409 if (mod->symtab[i].st_value > addr
3410 && mod->symtab[i].st_value < nextval
3411 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3412 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3413 nextval = mod->symtab[i].st_value;
3414 }
3415
3416 if (!best)
3417 return NULL;
3418
3419 if (size)
3420 *size = nextval - mod->symtab[best].st_value;
3421 if (offset)
3422 *offset = addr - mod->symtab[best].st_value;
3423 return mod->strtab + mod->symtab[best].st_name;
3424 }
3425
3426 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3427 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3428 const char *module_address_lookup(unsigned long addr,
3429 unsigned long *size,
3430 unsigned long *offset,
3431 char **modname,
3432 char *namebuf)
3433 {
3434 struct module *mod;
3435 const char *ret = NULL;
3436
3437 preempt_disable();
3438 list_for_each_entry_rcu(mod, &modules, list) {
3439 if (mod->state == MODULE_STATE_UNFORMED)
3440 continue;
3441 if (within_module_init(addr, mod) ||
3442 within_module_core(addr, mod)) {
3443 if (modname)
3444 *modname = mod->name;
3445 ret = get_ksymbol(mod, addr, size, offset);
3446 break;
3447 }
3448 }
3449 /* Make a copy in here where it's safe */
3450 if (ret) {
3451 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3452 ret = namebuf;
3453 }
3454 preempt_enable();
3455 return ret;
3456 }
3457
3458 int lookup_module_symbol_name(unsigned long addr, char *symname)
3459 {
3460 struct module *mod;
3461
3462 preempt_disable();
3463 list_for_each_entry_rcu(mod, &modules, list) {
3464 if (mod->state == MODULE_STATE_UNFORMED)
3465 continue;
3466 if (within_module_init(addr, mod) ||
3467 within_module_core(addr, mod)) {
3468 const char *sym;
3469
3470 sym = get_ksymbol(mod, addr, NULL, NULL);
3471 if (!sym)
3472 goto out;
3473 strlcpy(symname, sym, KSYM_NAME_LEN);
3474 preempt_enable();
3475 return 0;
3476 }
3477 }
3478 out:
3479 preempt_enable();
3480 return -ERANGE;
3481 }
3482
3483 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3484 unsigned long *offset, char *modname, char *name)
3485 {
3486 struct module *mod;
3487
3488 preempt_disable();
3489 list_for_each_entry_rcu(mod, &modules, list) {
3490 if (mod->state == MODULE_STATE_UNFORMED)
3491 continue;
3492 if (within_module_init(addr, mod) ||
3493 within_module_core(addr, mod)) {
3494 const char *sym;
3495
3496 sym = get_ksymbol(mod, addr, size, offset);
3497 if (!sym)
3498 goto out;
3499 if (modname)
3500 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3501 if (name)
3502 strlcpy(name, sym, KSYM_NAME_LEN);
3503 preempt_enable();
3504 return 0;
3505 }
3506 }
3507 out:
3508 preempt_enable();
3509 return -ERANGE;
3510 }
3511
3512 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3513 char *name, char *module_name, int *exported)
3514 {
3515 struct module *mod;
3516
3517 preempt_disable();
3518 list_for_each_entry_rcu(mod, &modules, list) {
3519 if (mod->state == MODULE_STATE_UNFORMED)
3520 continue;
3521 if (symnum < mod->num_symtab) {
3522 *value = mod->symtab[symnum].st_value;
3523 *type = mod->symtab[symnum].st_info;
3524 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
3525 KSYM_NAME_LEN);
3526 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3527 *exported = is_exported(name, *value, mod);
3528 preempt_enable();
3529 return 0;
3530 }
3531 symnum -= mod->num_symtab;
3532 }
3533 preempt_enable();
3534 return -ERANGE;
3535 }
3536
3537 static unsigned long mod_find_symname(struct module *mod, const char *name)
3538 {
3539 unsigned int i;
3540
3541 for (i = 0; i < mod->num_symtab; i++)
3542 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
3543 mod->symtab[i].st_info != 'U')
3544 return mod->symtab[i].st_value;
3545 return 0;
3546 }
3547
3548 /* Look for this name: can be of form module:name. */
3549 unsigned long module_kallsyms_lookup_name(const char *name)
3550 {
3551 struct module *mod;
3552 char *colon;
3553 unsigned long ret = 0;
3554
3555 /* Don't lock: we're in enough trouble already. */
3556 preempt_disable();
3557 if ((colon = strchr(name, ':')) != NULL) {
3558 if ((mod = find_module_all(name, colon - name, false)) != NULL)
3559 ret = mod_find_symname(mod, colon+1);
3560 } else {
3561 list_for_each_entry_rcu(mod, &modules, list) {
3562 if (mod->state == MODULE_STATE_UNFORMED)
3563 continue;
3564 if ((ret = mod_find_symname(mod, name)) != 0)
3565 break;
3566 }
3567 }
3568 preempt_enable();
3569 return ret;
3570 }
3571
3572 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3573 struct module *, unsigned long),
3574 void *data)
3575 {
3576 struct module *mod;
3577 unsigned int i;
3578 int ret;
3579
3580 list_for_each_entry(mod, &modules, list) {
3581 if (mod->state == MODULE_STATE_UNFORMED)
3582 continue;
3583 for (i = 0; i < mod->num_symtab; i++) {
3584 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3585 mod, mod->symtab[i].st_value);
3586 if (ret != 0)
3587 return ret;
3588 }
3589 }
3590 return 0;
3591 }
3592 #endif /* CONFIG_KALLSYMS */
3593
3594 static char *module_flags(struct module *mod, char *buf)
3595 {
3596 int bx = 0;
3597
3598 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
3599 if (mod->taints ||
3600 mod->state == MODULE_STATE_GOING ||
3601 mod->state == MODULE_STATE_COMING) {
3602 buf[bx++] = '(';
3603 bx += module_flags_taint(mod, buf + bx);
3604 /* Show a - for module-is-being-unloaded */
3605 if (mod->state == MODULE_STATE_GOING)
3606 buf[bx++] = '-';
3607 /* Show a + for module-is-being-loaded */
3608 if (mod->state == MODULE_STATE_COMING)
3609 buf[bx++] = '+';
3610 buf[bx++] = ')';
3611 }
3612 buf[bx] = '\0';
3613
3614 return buf;
3615 }
3616
3617 #ifdef CONFIG_PROC_FS
3618 /* Called by the /proc file system to return a list of modules. */
3619 static void *m_start(struct seq_file *m, loff_t *pos)
3620 {
3621 mutex_lock(&module_mutex);
3622 return seq_list_start(&modules, *pos);
3623 }
3624
3625 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3626 {
3627 return seq_list_next(p, &modules, pos);
3628 }
3629
3630 static void m_stop(struct seq_file *m, void *p)
3631 {
3632 mutex_unlock(&module_mutex);
3633 }
3634
3635 static int m_show(struct seq_file *m, void *p)
3636 {
3637 struct module *mod = list_entry(p, struct module, list);
3638 char buf[8];
3639
3640 /* We always ignore unformed modules. */
3641 if (mod->state == MODULE_STATE_UNFORMED)
3642 return 0;
3643
3644 seq_printf(m, "%s %u",
3645 mod->name, mod->init_size + mod->core_size);
3646 print_unload_info(m, mod);
3647
3648 /* Informative for users. */
3649 seq_printf(m, " %s",
3650 mod->state == MODULE_STATE_GOING ? "Unloading":
3651 mod->state == MODULE_STATE_COMING ? "Loading":
3652 "Live");
3653 /* Used by oprofile and other similar tools. */
3654 seq_printf(m, " 0x%pK", mod->module_core);
3655
3656 /* Taints info */
3657 if (mod->taints)
3658 seq_printf(m, " %s", module_flags(mod, buf));
3659
3660 seq_printf(m, "\n");
3661 return 0;
3662 }
3663
3664 /* Format: modulename size refcount deps address
3665
3666 Where refcount is a number or -, and deps is a comma-separated list
3667 of depends or -.
3668 */
3669 static const struct seq_operations modules_op = {
3670 .start = m_start,
3671 .next = m_next,
3672 .stop = m_stop,
3673 .show = m_show
3674 };
3675
3676 static int modules_open(struct inode *inode, struct file *file)
3677 {
3678 return seq_open(file, &modules_op);
3679 }
3680
3681 static const struct file_operations proc_modules_operations = {
3682 .open = modules_open,
3683 .read = seq_read,
3684 .llseek = seq_lseek,
3685 .release = seq_release,
3686 };
3687
3688 static int __init proc_modules_init(void)
3689 {
3690 proc_create("modules", 0, NULL, &proc_modules_operations);
3691 return 0;
3692 }
3693 module_init(proc_modules_init);
3694 #endif
3695
3696 /* Given an address, look for it in the module exception tables. */
3697 const struct exception_table_entry *search_module_extables(unsigned long addr)
3698 {
3699 const struct exception_table_entry *e = NULL;
3700 struct module *mod;
3701
3702 preempt_disable();
3703 list_for_each_entry_rcu(mod, &modules, list) {
3704 if (mod->state == MODULE_STATE_UNFORMED)
3705 continue;
3706 if (mod->num_exentries == 0)
3707 continue;
3708
3709 e = search_extable(mod->extable,
3710 mod->extable + mod->num_exentries - 1,
3711 addr);
3712 if (e)
3713 break;
3714 }
3715 preempt_enable();
3716
3717 /* Now, if we found one, we are running inside it now, hence
3718 we cannot unload the module, hence no refcnt needed. */
3719 return e;
3720 }
3721
3722 /*
3723 * is_module_address - is this address inside a module?
3724 * @addr: the address to check.
3725 *
3726 * See is_module_text_address() if you simply want to see if the address
3727 * is code (not data).
3728 */
3729 bool is_module_address(unsigned long addr)
3730 {
3731 bool ret;
3732
3733 preempt_disable();
3734 ret = __module_address(addr) != NULL;
3735 preempt_enable();
3736
3737 return ret;
3738 }
3739
3740 /*
3741 * __module_address - get the module which contains an address.
3742 * @addr: the address.
3743 *
3744 * Must be called with preempt disabled or module mutex held so that
3745 * module doesn't get freed during this.
3746 */
3747 struct module *__module_address(unsigned long addr)
3748 {
3749 struct module *mod;
3750
3751 if (addr < module_addr_min || addr > module_addr_max)
3752 return NULL;
3753
3754 list_for_each_entry_rcu(mod, &modules, list) {
3755 if (mod->state == MODULE_STATE_UNFORMED)
3756 continue;
3757 if (within_module_core(addr, mod)
3758 || within_module_init(addr, mod))
3759 return mod;
3760 }
3761 return NULL;
3762 }
3763 EXPORT_SYMBOL_GPL(__module_address);
3764
3765 /*
3766 * is_module_text_address - is this address inside module code?
3767 * @addr: the address to check.
3768 *
3769 * See is_module_address() if you simply want to see if the address is
3770 * anywhere in a module. See kernel_text_address() for testing if an
3771 * address corresponds to kernel or module code.
3772 */
3773 bool is_module_text_address(unsigned long addr)
3774 {
3775 bool ret;
3776
3777 preempt_disable();
3778 ret = __module_text_address(addr) != NULL;
3779 preempt_enable();
3780
3781 return ret;
3782 }
3783
3784 /*
3785 * __module_text_address - get the module whose code contains an address.
3786 * @addr: the address.
3787 *
3788 * Must be called with preempt disabled or module mutex held so that
3789 * module doesn't get freed during this.
3790 */
3791 struct module *__module_text_address(unsigned long addr)
3792 {
3793 struct module *mod = __module_address(addr);
3794 if (mod) {
3795 /* Make sure it's within the text section. */
3796 if (!within(addr, mod->module_init, mod->init_text_size)
3797 && !within(addr, mod->module_core, mod->core_text_size))
3798 mod = NULL;
3799 }
3800 return mod;
3801 }
3802 EXPORT_SYMBOL_GPL(__module_text_address);
3803
3804 /* Don't grab lock, we're oopsing. */
3805 void print_modules(void)
3806 {
3807 struct module *mod;
3808 char buf[8];
3809
3810 printk(KERN_DEFAULT "Modules linked in:");
3811 /* Most callers should already have preempt disabled, but make sure */
3812 preempt_disable();
3813 list_for_each_entry_rcu(mod, &modules, list) {
3814 if (mod->state == MODULE_STATE_UNFORMED)
3815 continue;
3816 printk(" %s%s", mod->name, module_flags(mod, buf));
3817 }
3818 preempt_enable();
3819 if (last_unloaded_module[0])
3820 printk(" [last unloaded: %s]", last_unloaded_module);
3821 printk("\n");
3822 }
3823
3824 #ifdef CONFIG_MODVERSIONS
3825 /* Generate the signature for all relevant module structures here.
3826 * If these change, we don't want to try to parse the module. */
3827 void module_layout(struct module *mod,
3828 struct modversion_info *ver,
3829 struct kernel_param *kp,
3830 struct kernel_symbol *ks,
3831 struct tracepoint * const *tp)
3832 {
3833 }
3834 EXPORT_SYMBOL(module_layout);
3835 #endif