ia64/pv_ops/bp/module: support binary patching for kernel module.
[deliverable/linux.git] / arch / ia64 / kernel / module.c
1 /*
2 * IA-64-specific support for kernel module loader.
3 *
4 * Copyright (C) 2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 *
7 * Loosely based on patch by Rusty Russell.
8 */
9
10 /* relocs tested so far:
11
12 DIR64LSB
13 FPTR64LSB
14 GPREL22
15 LDXMOV
16 LDXMOV
17 LTOFF22
18 LTOFF22X
19 LTOFF22X
20 LTOFF_FPTR22
21 PCREL21B (for br.call only; br.cond is not supported out of modules!)
22 PCREL60B (for brl.cond only; brl.call is not supported for modules!)
23 PCREL64LSB
24 SECREL32LSB
25 SEGREL64LSB
26 */
27
28
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/elf.h>
32 #include <linux/moduleloader.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
35
36 #include <asm/patch.h>
37 #include <asm/unaligned.h>
38
39 #define ARCH_MODULE_DEBUG 0
40
41 #if ARCH_MODULE_DEBUG
42 # define DEBUGP printk
43 # define inline
44 #else
45 # define DEBUGP(fmt , a...)
46 #endif
47
48 #ifdef CONFIG_ITANIUM
49 # define USE_BRL 0
50 #else
51 # define USE_BRL 1
52 #endif
53
54 #define MAX_LTOFF ((uint64_t) (1 << 22)) /* max. allowable linkage-table offset */
55
56 /* Define some relocation helper macros/types: */
57
58 #define FORMAT_SHIFT 0
59 #define FORMAT_BITS 3
60 #define FORMAT_MASK ((1 << FORMAT_BITS) - 1)
61 #define VALUE_SHIFT 3
62 #define VALUE_BITS 5
63 #define VALUE_MASK ((1 << VALUE_BITS) - 1)
64
65 enum reloc_target_format {
66 /* direct encoded formats: */
67 RF_NONE = 0,
68 RF_INSN14 = 1,
69 RF_INSN22 = 2,
70 RF_INSN64 = 3,
71 RF_32MSB = 4,
72 RF_32LSB = 5,
73 RF_64MSB = 6,
74 RF_64LSB = 7,
75
76 /* formats that cannot be directly decoded: */
77 RF_INSN60,
78 RF_INSN21B, /* imm21 form 1 */
79 RF_INSN21M, /* imm21 form 2 */
80 RF_INSN21F /* imm21 form 3 */
81 };
82
83 enum reloc_value_formula {
84 RV_DIRECT = 4, /* S + A */
85 RV_GPREL = 5, /* @gprel(S + A) */
86 RV_LTREL = 6, /* @ltoff(S + A) */
87 RV_PLTREL = 7, /* @pltoff(S + A) */
88 RV_FPTR = 8, /* @fptr(S + A) */
89 RV_PCREL = 9, /* S + A - P */
90 RV_LTREL_FPTR = 10, /* @ltoff(@fptr(S + A)) */
91 RV_SEGREL = 11, /* @segrel(S + A) */
92 RV_SECREL = 12, /* @secrel(S + A) */
93 RV_BDREL = 13, /* BD + A */
94 RV_LTV = 14, /* S + A (like RV_DIRECT, except frozen at static link-time) */
95 RV_PCREL2 = 15, /* S + A - P */
96 RV_SPECIAL = 16, /* various (see below) */
97 RV_RSVD17 = 17,
98 RV_TPREL = 18, /* @tprel(S + A) */
99 RV_LTREL_TPREL = 19, /* @ltoff(@tprel(S + A)) */
100 RV_DTPMOD = 20, /* @dtpmod(S + A) */
101 RV_LTREL_DTPMOD = 21, /* @ltoff(@dtpmod(S + A)) */
102 RV_DTPREL = 22, /* @dtprel(S + A) */
103 RV_LTREL_DTPREL = 23, /* @ltoff(@dtprel(S + A)) */
104 RV_RSVD24 = 24,
105 RV_RSVD25 = 25,
106 RV_RSVD26 = 26,
107 RV_RSVD27 = 27
108 /* 28-31 reserved for implementation-specific purposes. */
109 };
110
111 #define N(reloc) [R_IA64_##reloc] = #reloc
112
113 static const char *reloc_name[256] = {
114 N(NONE), N(IMM14), N(IMM22), N(IMM64),
115 N(DIR32MSB), N(DIR32LSB), N(DIR64MSB), N(DIR64LSB),
116 N(GPREL22), N(GPREL64I), N(GPREL32MSB), N(GPREL32LSB),
117 N(GPREL64MSB), N(GPREL64LSB), N(LTOFF22), N(LTOFF64I),
118 N(PLTOFF22), N(PLTOFF64I), N(PLTOFF64MSB), N(PLTOFF64LSB),
119 N(FPTR64I), N(FPTR32MSB), N(FPTR32LSB), N(FPTR64MSB),
120 N(FPTR64LSB), N(PCREL60B), N(PCREL21B), N(PCREL21M),
121 N(PCREL21F), N(PCREL32MSB), N(PCREL32LSB), N(PCREL64MSB),
122 N(PCREL64LSB), N(LTOFF_FPTR22), N(LTOFF_FPTR64I), N(LTOFF_FPTR32MSB),
123 N(LTOFF_FPTR32LSB), N(LTOFF_FPTR64MSB), N(LTOFF_FPTR64LSB), N(SEGREL32MSB),
124 N(SEGREL32LSB), N(SEGREL64MSB), N(SEGREL64LSB), N(SECREL32MSB),
125 N(SECREL32LSB), N(SECREL64MSB), N(SECREL64LSB), N(REL32MSB),
126 N(REL32LSB), N(REL64MSB), N(REL64LSB), N(LTV32MSB),
127 N(LTV32LSB), N(LTV64MSB), N(LTV64LSB), N(PCREL21BI),
128 N(PCREL22), N(PCREL64I), N(IPLTMSB), N(IPLTLSB),
129 N(COPY), N(LTOFF22X), N(LDXMOV), N(TPREL14),
130 N(TPREL22), N(TPREL64I), N(TPREL64MSB), N(TPREL64LSB),
131 N(LTOFF_TPREL22), N(DTPMOD64MSB), N(DTPMOD64LSB), N(LTOFF_DTPMOD22),
132 N(DTPREL14), N(DTPREL22), N(DTPREL64I), N(DTPREL32MSB),
133 N(DTPREL32LSB), N(DTPREL64MSB), N(DTPREL64LSB), N(LTOFF_DTPREL22)
134 };
135
136 #undef N
137
138 /* Opaque struct for insns, to protect against derefs. */
139 struct insn;
140
141 static inline uint64_t
142 bundle (const struct insn *insn)
143 {
144 return (uint64_t) insn & ~0xfUL;
145 }
146
147 static inline int
148 slot (const struct insn *insn)
149 {
150 return (uint64_t) insn & 0x3;
151 }
152
153 static int
154 apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
155 {
156 if (slot(insn) != 2) {
157 printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
158 mod->name, slot(insn));
159 return 0;
160 }
161 ia64_patch_imm64((u64) insn, val);
162 return 1;
163 }
164
165 static int
166 apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
167 {
168 if (slot(insn) != 2) {
169 printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
170 mod->name, slot(insn));
171 return 0;
172 }
173 if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
174 printk(KERN_ERR "%s: value %ld out of IMM60 range\n", mod->name, (int64_t) val);
175 return 0;
176 }
177 ia64_patch_imm60((u64) insn, val);
178 return 1;
179 }
180
181 static int
182 apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
183 {
184 if (val + (1 << 21) >= (1 << 22)) {
185 printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (int64_t)val);
186 return 0;
187 }
188 ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
189 | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
190 | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
191 | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
192 return 1;
193 }
194
195 static int
196 apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
197 {
198 if (val + (1 << 20) >= (1 << 21)) {
199 printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (int64_t)val);
200 return 0;
201 }
202 ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
203 | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */));
204 return 1;
205 }
206
207 #if USE_BRL
208
209 struct plt_entry {
210 /* Three instruction bundles in PLT. */
211 unsigned char bundle[2][16];
212 };
213
214 static const struct plt_entry ia64_plt_template = {
215 {
216 {
217 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
219 0x00, 0x00, 0x00, 0x60
220 },
221 {
222 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many gp=TARGET_GP */
224 0x08, 0x00, 0x00, 0xc0
225 }
226 }
227 };
228
229 static int
230 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
231 {
232 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp)
233 && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2),
234 (target_ip - (int64_t) plt->bundle[1]) / 16))
235 return 1;
236 return 0;
237 }
238
239 unsigned long
240 plt_target (struct plt_entry *plt)
241 {
242 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1];
243 long off;
244
245 b0 = b[0]; b1 = b[1];
246 off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */
247 | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */
248 | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */
249 return (long) plt->bundle[1] + 16*off;
250 }
251
252 #else /* !USE_BRL */
253
254 struct plt_entry {
255 /* Three instruction bundles in PLT. */
256 unsigned char bundle[3][16];
257 };
258
259 static const struct plt_entry ia64_plt_template = {
260 {
261 {
262 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */
264 0x02, 0x00, 0x00, 0x60
265 },
266 {
267 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
269 0x00, 0x00, 0x00, 0x60
270 },
271 {
272 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
273 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
274 0x60, 0x00, 0x80, 0x00 /* br.few b6 */
275 }
276 }
277 };
278
279 static int
280 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
281 {
282 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip)
283 && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp))
284 return 1;
285 return 0;
286 }
287
288 unsigned long
289 plt_target (struct plt_entry *plt)
290 {
291 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0];
292
293 b0 = b[0]; b1 = b[1];
294 return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */
295 | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */
296 | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */
297 | ((b1 & 0x0000100000000000) >> 23) /* ic -> bit 21 */
298 | ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40) /* imm41 -> bit 22 */
299 | ((b1 & 0x0800000000000000) << 4)); /* i -> bit 63 */
300 }
301
302 #endif /* !USE_BRL */
303
304 void *
305 module_alloc (unsigned long size)
306 {
307 if (!size)
308 return NULL;
309 return vmalloc(size);
310 }
311
312 void
313 module_free (struct module *mod, void *module_region)
314 {
315 if (mod && mod->arch.init_unw_table &&
316 module_region == mod->module_init) {
317 unw_remove_unwind_table(mod->arch.init_unw_table);
318 mod->arch.init_unw_table = NULL;
319 }
320 vfree(module_region);
321 }
322
323 /* Have we already seen one of these relocations? */
324 /* FIXME: we could look in other sections, too --RR */
325 static int
326 duplicate_reloc (const Elf64_Rela *rela, unsigned int num)
327 {
328 unsigned int i;
329
330 for (i = 0; i < num; i++) {
331 if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
332 return 1;
333 }
334 return 0;
335 }
336
337 /* Count how many GOT entries we may need */
338 static unsigned int
339 count_gots (const Elf64_Rela *rela, unsigned int num)
340 {
341 unsigned int i, ret = 0;
342
343 /* Sure, this is order(n^2), but it's usually short, and not
344 time critical */
345 for (i = 0; i < num; i++) {
346 switch (ELF64_R_TYPE(rela[i].r_info)) {
347 case R_IA64_LTOFF22:
348 case R_IA64_LTOFF22X:
349 case R_IA64_LTOFF64I:
350 case R_IA64_LTOFF_FPTR22:
351 case R_IA64_LTOFF_FPTR64I:
352 case R_IA64_LTOFF_FPTR32MSB:
353 case R_IA64_LTOFF_FPTR32LSB:
354 case R_IA64_LTOFF_FPTR64MSB:
355 case R_IA64_LTOFF_FPTR64LSB:
356 if (!duplicate_reloc(rela, i))
357 ret++;
358 break;
359 }
360 }
361 return ret;
362 }
363
364 /* Count how many PLT entries we may need */
365 static unsigned int
366 count_plts (const Elf64_Rela *rela, unsigned int num)
367 {
368 unsigned int i, ret = 0;
369
370 /* Sure, this is order(n^2), but it's usually short, and not
371 time critical */
372 for (i = 0; i < num; i++) {
373 switch (ELF64_R_TYPE(rela[i].r_info)) {
374 case R_IA64_PCREL21B:
375 case R_IA64_PLTOFF22:
376 case R_IA64_PLTOFF64I:
377 case R_IA64_PLTOFF64MSB:
378 case R_IA64_PLTOFF64LSB:
379 case R_IA64_IPLTMSB:
380 case R_IA64_IPLTLSB:
381 if (!duplicate_reloc(rela, i))
382 ret++;
383 break;
384 }
385 }
386 return ret;
387 }
388
389 /* We need to create an function-descriptors for any internal function
390 which is referenced. */
391 static unsigned int
392 count_fdescs (const Elf64_Rela *rela, unsigned int num)
393 {
394 unsigned int i, ret = 0;
395
396 /* Sure, this is order(n^2), but it's usually short, and not time critical. */
397 for (i = 0; i < num; i++) {
398 switch (ELF64_R_TYPE(rela[i].r_info)) {
399 case R_IA64_FPTR64I:
400 case R_IA64_FPTR32LSB:
401 case R_IA64_FPTR32MSB:
402 case R_IA64_FPTR64LSB:
403 case R_IA64_FPTR64MSB:
404 case R_IA64_LTOFF_FPTR22:
405 case R_IA64_LTOFF_FPTR32LSB:
406 case R_IA64_LTOFF_FPTR32MSB:
407 case R_IA64_LTOFF_FPTR64I:
408 case R_IA64_LTOFF_FPTR64LSB:
409 case R_IA64_LTOFF_FPTR64MSB:
410 case R_IA64_IPLTMSB:
411 case R_IA64_IPLTLSB:
412 /*
413 * Jumps to static functions sometimes go straight to their
414 * offset. Of course, that may not be possible if the jump is
415 * from init -> core or vice. versa, so we need to generate an
416 * FDESC (and PLT etc) for that.
417 */
418 case R_IA64_PCREL21B:
419 if (!duplicate_reloc(rela, i))
420 ret++;
421 break;
422 }
423 }
424 return ret;
425 }
426
427 int
428 module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
429 struct module *mod)
430 {
431 unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
432 Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
433
434 /*
435 * To store the PLTs and function-descriptors, we expand the .text section for
436 * core module-code and the .init.text section for initialization code.
437 */
438 for (s = sechdrs; s < sechdrs_end; ++s)
439 if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
440 mod->arch.core_plt = s;
441 else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
442 mod->arch.init_plt = s;
443 else if (strcmp(".got", secstrings + s->sh_name) == 0)
444 mod->arch.got = s;
445 else if (strcmp(".opd", secstrings + s->sh_name) == 0)
446 mod->arch.opd = s;
447 else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
448 mod->arch.unwind = s;
449 #ifdef CONFIG_PARAVIRT
450 else if (strcmp(".paravirt_bundles",
451 secstrings + s->sh_name) == 0)
452 mod->arch.paravirt_bundles = s;
453 else if (strcmp(".paravirt_insts",
454 secstrings + s->sh_name) == 0)
455 mod->arch.paravirt_insts = s;
456 #endif
457
458 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
459 printk(KERN_ERR "%s: sections missing\n", mod->name);
460 return -ENOEXEC;
461 }
462
463 /* GOT and PLTs can occur in any relocated section... */
464 for (s = sechdrs + 1; s < sechdrs_end; ++s) {
465 const Elf64_Rela *rels = (void *)ehdr + s->sh_offset;
466 unsigned long numrels = s->sh_size/sizeof(Elf64_Rela);
467
468 if (s->sh_type != SHT_RELA)
469 continue;
470
471 gots += count_gots(rels, numrels);
472 fdescs += count_fdescs(rels, numrels);
473 if (strstr(secstrings + s->sh_name, ".init"))
474 init_plts += count_plts(rels, numrels);
475 else
476 core_plts += count_plts(rels, numrels);
477 }
478
479 mod->arch.core_plt->sh_type = SHT_NOBITS;
480 mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
481 mod->arch.core_plt->sh_addralign = 16;
482 mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry);
483 mod->arch.init_plt->sh_type = SHT_NOBITS;
484 mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
485 mod->arch.init_plt->sh_addralign = 16;
486 mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry);
487 mod->arch.got->sh_type = SHT_NOBITS;
488 mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC;
489 mod->arch.got->sh_addralign = 8;
490 mod->arch.got->sh_size = gots * sizeof(struct got_entry);
491 mod->arch.opd->sh_type = SHT_NOBITS;
492 mod->arch.opd->sh_flags = SHF_ALLOC;
493 mod->arch.opd->sh_addralign = 8;
494 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
495 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
496 __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
497 mod->arch.got->sh_size, mod->arch.opd->sh_size);
498 return 0;
499 }
500
501 static inline int
502 in_init (const struct module *mod, uint64_t addr)
503 {
504 return addr - (uint64_t) mod->module_init < mod->init_size;
505 }
506
507 static inline int
508 in_core (const struct module *mod, uint64_t addr)
509 {
510 return addr - (uint64_t) mod->module_core < mod->core_size;
511 }
512
513 static inline int
514 is_internal (const struct module *mod, uint64_t value)
515 {
516 return in_init(mod, value) || in_core(mod, value);
517 }
518
519 /*
520 * Get gp-relative offset for the linkage-table entry of VALUE.
521 */
522 static uint64_t
523 get_ltoff (struct module *mod, uint64_t value, int *okp)
524 {
525 struct got_entry *got, *e;
526
527 if (!*okp)
528 return 0;
529
530 got = (void *) mod->arch.got->sh_addr;
531 for (e = got; e < got + mod->arch.next_got_entry; ++e)
532 if (e->val == value)
533 goto found;
534
535 /* Not enough GOT entries? */
536 if (e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size))
537 BUG();
538
539 e->val = value;
540 ++mod->arch.next_got_entry;
541 found:
542 return (uint64_t) e - mod->arch.gp;
543 }
544
545 static inline int
546 gp_addressable (struct module *mod, uint64_t value)
547 {
548 return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF;
549 }
550
551 /* Get PC-relative PLT entry for this value. Returns 0 on failure. */
552 static uint64_t
553 get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
554 {
555 struct plt_entry *plt, *plt_end;
556 uint64_t target_ip, target_gp;
557
558 if (!*okp)
559 return 0;
560
561 if (in_init(mod, (uint64_t) insn)) {
562 plt = (void *) mod->arch.init_plt->sh_addr;
563 plt_end = (void *) plt + mod->arch.init_plt->sh_size;
564 } else {
565 plt = (void *) mod->arch.core_plt->sh_addr;
566 plt_end = (void *) plt + mod->arch.core_plt->sh_size;
567 }
568
569 /* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
570 target_ip = ((uint64_t *) value)[0];
571 target_gp = ((uint64_t *) value)[1];
572
573 /* Look for existing PLT entry. */
574 while (plt->bundle[0][0]) {
575 if (plt_target(plt) == target_ip)
576 goto found;
577 if (++plt >= plt_end)
578 BUG();
579 }
580 *plt = ia64_plt_template;
581 if (!patch_plt(mod, plt, target_ip, target_gp)) {
582 *okp = 0;
583 return 0;
584 }
585 #if ARCH_MODULE_DEBUG
586 if (plt_target(plt) != target_ip) {
587 printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
588 __func__, target_ip, plt_target(plt));
589 *okp = 0;
590 return 0;
591 }
592 #endif
593 found:
594 return (uint64_t) plt;
595 }
596
597 /* Get function descriptor for VALUE. */
598 static uint64_t
599 get_fdesc (struct module *mod, uint64_t value, int *okp)
600 {
601 struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr;
602
603 if (!*okp)
604 return 0;
605
606 if (!value) {
607 printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name);
608 return 0;
609 }
610
611 if (!is_internal(mod, value))
612 /*
613 * If it's not a module-local entry-point, "value" already points to a
614 * function-descriptor.
615 */
616 return value;
617
618 /* Look for existing function descriptor. */
619 while (fdesc->ip) {
620 if (fdesc->ip == value)
621 return (uint64_t)fdesc;
622 if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
623 BUG();
624 }
625
626 /* Create new one */
627 fdesc->ip = value;
628 fdesc->gp = mod->arch.gp;
629 return (uint64_t) fdesc;
630 }
631
632 static inline int
633 do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
634 Elf64_Shdr *sec, void *location)
635 {
636 enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK;
637 enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK;
638 uint64_t val;
639 int ok = 1;
640
641 val = sym->st_value + addend;
642
643 switch (formula) {
644 case RV_SEGREL: /* segment base is arbitrarily chosen to be 0 for kernel modules */
645 case RV_DIRECT:
646 break;
647
648 case RV_GPREL: val -= mod->arch.gp; break;
649 case RV_LTREL: val = get_ltoff(mod, val, &ok); break;
650 case RV_PLTREL: val = get_plt(mod, location, val, &ok); break;
651 case RV_FPTR: val = get_fdesc(mod, val, &ok); break;
652 case RV_SECREL: val -= sec->sh_addr; break;
653 case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break;
654
655 case RV_PCREL:
656 switch (r_type) {
657 case R_IA64_PCREL21B:
658 if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) ||
659 (in_core(mod, val) && in_init(mod, (uint64_t)location))) {
660 /*
661 * Init section may have been allocated far away from core,
662 * if the branch won't reach, then allocate a plt for it.
663 */
664 uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
665 if (delta + (1 << 20) >= (1 << 21)) {
666 val = get_fdesc(mod, val, &ok);
667 val = get_plt(mod, location, val, &ok);
668 }
669 } else if (!is_internal(mod, val))
670 val = get_plt(mod, location, val, &ok);
671 /* FALL THROUGH */
672 default:
673 val -= bundle(location);
674 break;
675
676 case R_IA64_PCREL32MSB:
677 case R_IA64_PCREL32LSB:
678 case R_IA64_PCREL64MSB:
679 case R_IA64_PCREL64LSB:
680 val -= (uint64_t) location;
681 break;
682
683 }
684 switch (r_type) {
685 case R_IA64_PCREL60B: format = RF_INSN60; break;
686 case R_IA64_PCREL21B: format = RF_INSN21B; break;
687 case R_IA64_PCREL21M: format = RF_INSN21M; break;
688 case R_IA64_PCREL21F: format = RF_INSN21F; break;
689 default: break;
690 }
691 break;
692
693 case RV_BDREL:
694 val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
695 break;
696
697 case RV_LTV:
698 /* can link-time value relocs happen here? */
699 BUG();
700 break;
701
702 case RV_PCREL2:
703 if (r_type == R_IA64_PCREL21BI) {
704 if (!is_internal(mod, val)) {
705 printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n",
706 __func__, reloc_name[r_type], val);
707 return -ENOEXEC;
708 }
709 format = RF_INSN21B;
710 }
711 val -= bundle(location);
712 break;
713
714 case RV_SPECIAL:
715 switch (r_type) {
716 case R_IA64_IPLTMSB:
717 case R_IA64_IPLTLSB:
718 val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
719 format = RF_64LSB;
720 if (r_type == R_IA64_IPLTMSB)
721 format = RF_64MSB;
722 break;
723
724 case R_IA64_SUB:
725 val = addend - sym->st_value;
726 format = RF_INSN64;
727 break;
728
729 case R_IA64_LTOFF22X:
730 if (gp_addressable(mod, val))
731 val -= mod->arch.gp;
732 else
733 val = get_ltoff(mod, val, &ok);
734 format = RF_INSN22;
735 break;
736
737 case R_IA64_LDXMOV:
738 if (gp_addressable(mod, val)) {
739 /* turn "ld8" into "mov": */
740 DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
741 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
742 }
743 return 0;
744
745 default:
746 if (reloc_name[r_type])
747 printk(KERN_ERR "%s: special reloc %s not supported",
748 mod->name, reloc_name[r_type]);
749 else
750 printk(KERN_ERR "%s: unknown special reloc %x\n",
751 mod->name, r_type);
752 return -ENOEXEC;
753 }
754 break;
755
756 case RV_TPREL:
757 case RV_LTREL_TPREL:
758 case RV_DTPMOD:
759 case RV_LTREL_DTPMOD:
760 case RV_DTPREL:
761 case RV_LTREL_DTPREL:
762 printk(KERN_ERR "%s: %s reloc not supported\n",
763 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?");
764 return -ENOEXEC;
765
766 default:
767 printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type);
768 return -ENOEXEC;
769 }
770
771 if (!ok)
772 return -ENOEXEC;
773
774 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
775 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
776
777 switch (format) {
778 case RF_INSN21B: ok = apply_imm21b(mod, location, (int64_t) val / 16); break;
779 case RF_INSN22: ok = apply_imm22(mod, location, val); break;
780 case RF_INSN64: ok = apply_imm64(mod, location, val); break;
781 case RF_INSN60: ok = apply_imm60(mod, location, (int64_t) val / 16); break;
782 case RF_32LSB: put_unaligned(val, (uint32_t *) location); break;
783 case RF_64LSB: put_unaligned(val, (uint64_t *) location); break;
784 case RF_32MSB: /* ia64 Linux is little-endian... */
785 case RF_64MSB: /* ia64 Linux is little-endian... */
786 case RF_INSN14: /* must be within-module, i.e., resolved by "ld -r" */
787 case RF_INSN21M: /* must be within-module, i.e., resolved by "ld -r" */
788 case RF_INSN21F: /* must be within-module, i.e., resolved by "ld -r" */
789 printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
790 mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?");
791 return -ENOEXEC;
792
793 default:
794 printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
795 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format);
796 return -ENOEXEC;
797 }
798 return ok ? 0 : -ENOEXEC;
799 }
800
801 int
802 apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
803 unsigned int relsec, struct module *mod)
804 {
805 unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela);
806 Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr;
807 Elf64_Shdr *target_sec;
808 int ret;
809
810 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
811 relsec, n, sechdrs[relsec].sh_info);
812
813 target_sec = sechdrs + sechdrs[relsec].sh_info;
814
815 if (target_sec->sh_entsize == ~0UL)
816 /*
817 * If target section wasn't allocated, we don't need to relocate it.
818 * Happens, e.g., for debug sections.
819 */
820 return 0;
821
822 if (!mod->arch.gp) {
823 /*
824 * XXX Should have an arch-hook for running this after final section
825 * addresses have been selected...
826 */
827 uint64_t gp;
828 if (mod->core_size > MAX_LTOFF)
829 /*
830 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
831 * at the end of the module.
832 */
833 gp = mod->core_size - MAX_LTOFF / 2;
834 else
835 gp = mod->core_size / 2;
836 gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
837 mod->arch.gp = gp;
838 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
839 }
840
841 for (i = 0; i < n; i++) {
842 ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info),
843 ((Elf64_Sym *) sechdrs[symindex].sh_addr
844 + ELF64_R_SYM(rela[i].r_info)),
845 rela[i].r_addend, target_sec,
846 (void *) target_sec->sh_addr + rela[i].r_offset);
847 if (ret < 0)
848 return ret;
849 }
850 return 0;
851 }
852
853 int
854 apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
855 unsigned int relsec, struct module *mod)
856 {
857 printk(KERN_ERR "module %s: REL relocs in section %u unsupported\n", mod->name, relsec);
858 return -ENOEXEC;
859 }
860
861 /*
862 * Modules contain a single unwind table which covers both the core and the init text
863 * sections but since the two are not contiguous, we need to split this table up such that
864 * we can register (and unregister) each "segment" separately. Fortunately, this sounds
865 * more complicated than it really is.
866 */
867 static void
868 register_unwind_table (struct module *mod)
869 {
870 struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
871 struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
872 struct unw_table_entry tmp, *e1, *e2, *core, *init;
873 unsigned long num_init = 0, num_core = 0;
874
875 /* First, count how many init and core unwind-table entries there are. */
876 for (e1 = start; e1 < end; ++e1)
877 if (in_init(mod, e1->start_offset))
878 ++num_init;
879 else
880 ++num_core;
881 /*
882 * Second, sort the table such that all unwind-table entries for the init and core
883 * text sections are nicely separated. We do this with a stupid bubble sort
884 * (unwind tables don't get ridiculously huge).
885 */
886 for (e1 = start; e1 < end; ++e1) {
887 for (e2 = e1 + 1; e2 < end; ++e2) {
888 if (e2->start_offset < e1->start_offset) {
889 tmp = *e1;
890 *e1 = *e2;
891 *e2 = tmp;
892 }
893 }
894 }
895 /*
896 * Third, locate the init and core segments in the unwind table:
897 */
898 if (in_init(mod, start->start_offset)) {
899 init = start;
900 core = start + num_init;
901 } else {
902 core = start;
903 init = start + num_core;
904 }
905
906 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
907 mod->name, mod->arch.gp, num_init, num_core);
908
909 /*
910 * Fourth, register both tables (if not empty).
911 */
912 if (num_core > 0) {
913 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
914 core, core + num_core);
915 DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__,
916 mod->arch.core_unw_table, core, core + num_core);
917 }
918 if (num_init > 0) {
919 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
920 init, init + num_init);
921 DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__,
922 mod->arch.init_unw_table, init, init + num_init);
923 }
924 }
925
926 int
927 module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
928 {
929 DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
930 if (mod->arch.unwind)
931 register_unwind_table(mod);
932 #ifdef CONFIG_PARAVIRT
933 if (mod->arch.paravirt_bundles) {
934 struct paravirt_patch_site_bundle *start =
935 (struct paravirt_patch_site_bundle *)
936 mod->arch.paravirt_bundles->sh_addr;
937 struct paravirt_patch_site_bundle *end =
938 (struct paravirt_patch_site_bundle *)
939 (mod->arch.paravirt_bundles->sh_addr +
940 mod->arch.paravirt_bundles->sh_size);
941
942 paravirt_patch_apply_bundle(start, end);
943 }
944 if (mod->arch.paravirt_insts) {
945 struct paravirt_patch_site_inst *start =
946 (struct paravirt_patch_site_inst *)
947 mod->arch.paravirt_insts->sh_addr;
948 struct paravirt_patch_site_inst *end =
949 (struct paravirt_patch_site_inst *)
950 (mod->arch.paravirt_insts->sh_addr +
951 mod->arch.paravirt_insts->sh_size);
952
953 paravirt_patch_apply_inst(start, end);
954 }
955 #endif
956 return 0;
957 }
958
959 void
960 module_arch_cleanup (struct module *mod)
961 {
962 if (mod->arch.init_unw_table)
963 unw_remove_unwind_table(mod->arch.init_unw_table);
964 if (mod->arch.core_unw_table)
965 unw_remove_unwind_table(mod->arch.core_unw_table);
966 }
This page took 0.093377 seconds and 5 git commands to generate.