Commit | Line | Data |
---|---|---|
bd353861 MF |
1 | /* |
2 | * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org> | |
3 | * | |
4 | * This file is subject to the terms and conditions of the GNU General Public | |
5 | * License. See the file "COPYING" in the main directory of this archive | |
6 | * for more details. | |
7 | * | |
8 | * This is an implementation of a DWARF unwinder. Its main purpose is | |
9 | * for generating stacktrace information. Based on the DWARF 3 | |
10 | * specification from http://www.dwarfstd.org. | |
11 | * | |
12 | * TODO: | |
13 | * - DWARF64 doesn't work. | |
14 | */ | |
15 | ||
16 | /* #define DEBUG */ | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/io.h> | |
19 | #include <linux/list.h> | |
20 | #include <linux/mm.h> | |
21 | #include <asm/dwarf.h> | |
22 | #include <asm/unwinder.h> | |
23 | #include <asm/sections.h> | |
24 | #include <asm-generic/unaligned.h> | |
25 | #include <asm/dwarf.h> | |
26 | #include <asm/stacktrace.h> | |
27 | ||
28 | static LIST_HEAD(dwarf_cie_list); | |
29 | DEFINE_SPINLOCK(dwarf_cie_lock); | |
30 | ||
31 | static LIST_HEAD(dwarf_fde_list); | |
32 | DEFINE_SPINLOCK(dwarf_fde_lock); | |
33 | ||
34 | static struct dwarf_cie *cached_cie; | |
35 | ||
36 | /* | |
37 | * Figure out whether we need to allocate some dwarf registers. If dwarf | |
38 | * registers have already been allocated then we may need to realloc | |
39 | * them. "reg" is a register number that we need to be able to access | |
40 | * after this call. | |
41 | * | |
42 | * Register numbers start at zero, therefore we need to allocate space | |
43 | * for "reg" + 1 registers. | |
44 | */ | |
45 | static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, | |
46 | unsigned int reg) | |
47 | { | |
48 | struct dwarf_reg *regs; | |
49 | unsigned int num_regs = reg + 1; | |
50 | size_t new_size; | |
51 | size_t old_size; | |
52 | ||
53 | new_size = num_regs * sizeof(*regs); | |
54 | old_size = frame->num_regs * sizeof(*regs); | |
55 | ||
56 | /* Fast path: don't allocate any regs if we've already got enough. */ | |
57 | if (frame->num_regs >= num_regs) | |
58 | return; | |
59 | ||
60 | regs = kzalloc(new_size, GFP_KERNEL); | |
61 | if (!regs) { | |
62 | printk(KERN_WARNING "Unable to allocate DWARF registers\n"); | |
63 | /* | |
64 | * Let's just bomb hard here, we have no way to | |
65 | * gracefully recover. | |
66 | */ | |
67 | BUG(); | |
68 | } | |
69 | ||
70 | if (frame->regs) { | |
71 | memcpy(regs, frame->regs, old_size); | |
72 | kfree(frame->regs); | |
73 | } | |
74 | ||
75 | frame->regs = regs; | |
76 | frame->num_regs = num_regs; | |
77 | } | |
78 | ||
79 | /** | |
80 | * dwarf_read_addr - read dwarf data | |
81 | * @src: source address of data | |
82 | * @dst: destination address to store the data to | |
83 | * | |
84 | * Read 'n' bytes from @src, where 'n' is the size of an address on | |
85 | * the native machine. We return the number of bytes read, which | |
86 | * should always be 'n'. We also have to be careful when reading | |
87 | * from @src and writing to @dst, because they can be arbitrarily | |
88 | * aligned. Return 'n' - the number of bytes read. | |
89 | */ | |
90 | static inline int dwarf_read_addr(void *src, void *dst) | |
91 | { | |
92 | u32 val = __get_unaligned_cpu32(src); | |
93 | __put_unaligned_cpu32(val, dst); | |
94 | ||
95 | return sizeof(unsigned long *); | |
96 | } | |
97 | ||
98 | /** | |
99 | * dwarf_read_uleb128 - read unsigned LEB128 data | |
100 | * @addr: the address where the ULEB128 data is stored | |
101 | * @ret: address to store the result | |
102 | * | |
103 | * Decode an unsigned LEB128 encoded datum. The algorithm is taken | |
104 | * from Appendix C of the DWARF 3 spec. For information on the | |
105 | * encodings refer to section "7.6 - Variable Length Data". Return | |
106 | * the number of bytes read. | |
107 | */ | |
108 | static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret) | |
109 | { | |
110 | unsigned int result; | |
111 | unsigned char byte; | |
112 | int shift, count; | |
113 | ||
114 | result = 0; | |
115 | shift = 0; | |
116 | count = 0; | |
117 | ||
118 | while (1) { | |
119 | byte = __raw_readb(addr); | |
120 | addr++; | |
121 | count++; | |
122 | ||
123 | result |= (byte & 0x7f) << shift; | |
124 | shift += 7; | |
125 | ||
126 | if (!(byte & 0x80)) | |
127 | break; | |
128 | } | |
129 | ||
130 | *ret = result; | |
131 | ||
132 | return count; | |
133 | } | |
134 | ||
135 | /** | |
136 | * dwarf_read_leb128 - read signed LEB128 data | |
137 | * @addr: the address of the LEB128 encoded data | |
138 | * @ret: address to store the result | |
139 | * | |
140 | * Decode signed LEB128 data. The algorithm is taken from Appendix | |
141 | * C of the DWARF 3 spec. Return the number of bytes read. | |
142 | */ | |
143 | static inline unsigned long dwarf_read_leb128(char *addr, int *ret) | |
144 | { | |
145 | unsigned char byte; | |
146 | int result, shift; | |
147 | int num_bits; | |
148 | int count; | |
149 | ||
150 | result = 0; | |
151 | shift = 0; | |
152 | count = 0; | |
153 | ||
154 | while (1) { | |
155 | byte = __raw_readb(addr); | |
156 | addr++; | |
157 | result |= (byte & 0x7f) << shift; | |
158 | shift += 7; | |
159 | count++; | |
160 | ||
161 | if (!(byte & 0x80)) | |
162 | break; | |
163 | } | |
164 | ||
165 | /* The number of bits in a signed integer. */ | |
166 | num_bits = 8 * sizeof(result); | |
167 | ||
168 | if ((shift < num_bits) && (byte & 0x40)) | |
169 | result |= (-1 << shift); | |
170 | ||
171 | *ret = result; | |
172 | ||
173 | return count; | |
174 | } | |
175 | ||
176 | /** | |
177 | * dwarf_read_encoded_value - return the decoded value at @addr | |
178 | * @addr: the address of the encoded value | |
179 | * @val: where to write the decoded value | |
180 | * @encoding: the encoding with which we can decode @addr | |
181 | * | |
182 | * GCC emits encoded address in the .eh_frame FDE entries. Decode | |
183 | * the value at @addr using @encoding. The decoded value is written | |
184 | * to @val and the number of bytes read is returned. | |
185 | */ | |
186 | static int dwarf_read_encoded_value(char *addr, unsigned long *val, | |
187 | char encoding) | |
188 | { | |
189 | unsigned long decoded_addr = 0; | |
190 | int count = 0; | |
191 | ||
192 | switch (encoding & 0x70) { | |
193 | case DW_EH_PE_absptr: | |
194 | break; | |
195 | case DW_EH_PE_pcrel: | |
196 | decoded_addr = (unsigned long)addr; | |
197 | break; | |
198 | default: | |
199 | pr_debug("encoding=0x%x\n", (encoding & 0x70)); | |
200 | BUG(); | |
201 | } | |
202 | ||
203 | if ((encoding & 0x07) == 0x00) | |
204 | encoding |= DW_EH_PE_udata4; | |
205 | ||
206 | switch (encoding & 0x0f) { | |
207 | case DW_EH_PE_sdata4: | |
208 | case DW_EH_PE_udata4: | |
209 | count += 4; | |
210 | decoded_addr += __get_unaligned_cpu32(addr); | |
211 | __raw_writel(decoded_addr, val); | |
212 | break; | |
213 | default: | |
214 | pr_debug("encoding=0x%x\n", encoding); | |
215 | BUG(); | |
216 | } | |
217 | ||
218 | return count; | |
219 | } | |
220 | ||
221 | /** | |
222 | * dwarf_entry_len - return the length of an FDE or CIE | |
223 | * @addr: the address of the entry | |
224 | * @len: the length of the entry | |
225 | * | |
226 | * Read the initial_length field of the entry and store the size of | |
227 | * the entry in @len. We return the number of bytes read. Return a | |
228 | * count of 0 on error. | |
229 | */ | |
230 | static inline int dwarf_entry_len(char *addr, unsigned long *len) | |
231 | { | |
232 | u32 initial_len; | |
233 | int count; | |
234 | ||
235 | initial_len = __get_unaligned_cpu32(addr); | |
236 | count = 4; | |
237 | ||
238 | /* | |
239 | * An initial length field value in the range DW_LEN_EXT_LO - | |
240 | * DW_LEN_EXT_HI indicates an extension, and should not be | |
241 | * interpreted as a length. The only extension that we currently | |
242 | * understand is the use of DWARF64 addresses. | |
243 | */ | |
244 | if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) { | |
245 | /* | |
246 | * The 64-bit length field immediately follows the | |
247 | * compulsory 32-bit length field. | |
248 | */ | |
249 | if (initial_len == DW_EXT_DWARF64) { | |
250 | *len = __get_unaligned_cpu64(addr + 4); | |
251 | count = 12; | |
252 | } else { | |
253 | printk(KERN_WARNING "Unknown DWARF extension\n"); | |
254 | count = 0; | |
255 | } | |
256 | } else | |
257 | *len = initial_len; | |
258 | ||
259 | return count; | |
260 | } | |
261 | ||
262 | /** | |
263 | * dwarf_lookup_cie - locate the cie | |
264 | * @cie_ptr: pointer to help with lookup | |
265 | */ | |
266 | static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) | |
267 | { | |
268 | struct dwarf_cie *cie, *n; | |
269 | unsigned long flags; | |
270 | ||
271 | spin_lock_irqsave(&dwarf_cie_lock, flags); | |
272 | ||
273 | /* | |
274 | * We've cached the last CIE we looked up because chances are | |
275 | * that the FDE wants this CIE. | |
276 | */ | |
277 | if (cached_cie && cached_cie->cie_pointer == cie_ptr) { | |
278 | cie = cached_cie; | |
279 | goto out; | |
280 | } | |
281 | ||
282 | list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) { | |
283 | if (cie->cie_pointer == cie_ptr) { | |
284 | cached_cie = cie; | |
285 | break; | |
286 | } | |
287 | } | |
288 | ||
289 | /* Couldn't find the entry in the list. */ | |
290 | if (&cie->link == &dwarf_cie_list) | |
291 | cie = NULL; | |
292 | out: | |
293 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | |
294 | return cie; | |
295 | } | |
296 | ||
297 | /** | |
298 | * dwarf_lookup_fde - locate the FDE that covers pc | |
299 | * @pc: the program counter | |
300 | */ | |
301 | struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) | |
302 | { | |
303 | unsigned long flags; | |
304 | struct dwarf_fde *fde, *n; | |
305 | ||
306 | spin_lock_irqsave(&dwarf_fde_lock, flags); | |
307 | list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) { | |
308 | unsigned long start, end; | |
309 | ||
310 | start = fde->initial_location; | |
311 | end = fde->initial_location + fde->address_range; | |
312 | ||
313 | if (pc >= start && pc < end) | |
314 | break; | |
315 | } | |
316 | ||
317 | /* Couldn't find the entry in the list. */ | |
318 | if (&fde->link == &dwarf_fde_list) | |
319 | fde = NULL; | |
320 | ||
321 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | |
322 | ||
323 | return fde; | |
324 | } | |
325 | ||
326 | /** | |
327 | * dwarf_cfa_execute_insns - execute instructions to calculate a CFA | |
328 | * @insn_start: address of the first instruction | |
329 | * @insn_end: address of the last instruction | |
330 | * @cie: the CIE for this function | |
331 | * @fde: the FDE for this function | |
332 | * @frame: the instructions calculate the CFA for this frame | |
333 | * @pc: the program counter of the address we're interested in | |
334 | * | |
335 | * Execute the Call Frame instruction sequence starting at | |
336 | * @insn_start and ending at @insn_end. The instructions describe | |
337 | * how to calculate the Canonical Frame Address of a stackframe. | |
338 | * Store the results in @frame. | |
339 | */ | |
340 | static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |
341 | unsigned char *insn_end, | |
342 | struct dwarf_cie *cie, | |
343 | struct dwarf_fde *fde, | |
344 | struct dwarf_frame *frame, | |
345 | unsigned long pc) | |
346 | { | |
347 | unsigned char insn; | |
348 | unsigned char *current_insn; | |
349 | unsigned int count, delta, reg, expr_len, offset; | |
350 | ||
351 | current_insn = insn_start; | |
352 | ||
353 | while (current_insn < insn_end && frame->pc <= pc) { | |
354 | insn = __raw_readb(current_insn++); | |
355 | ||
356 | /* | |
357 | * Firstly, handle the opcodes that embed their operands | |
358 | * in the instructions. | |
359 | */ | |
360 | switch (DW_CFA_opcode(insn)) { | |
361 | case DW_CFA_advance_loc: | |
362 | delta = DW_CFA_operand(insn); | |
363 | delta *= cie->code_alignment_factor; | |
364 | frame->pc += delta; | |
365 | continue; | |
366 | /* NOTREACHED */ | |
367 | case DW_CFA_offset: | |
368 | reg = DW_CFA_operand(insn); | |
369 | count = dwarf_read_uleb128(current_insn, &offset); | |
370 | current_insn += count; | |
371 | offset *= cie->data_alignment_factor; | |
372 | dwarf_frame_alloc_regs(frame, reg); | |
373 | frame->regs[reg].addr = offset; | |
374 | frame->regs[reg].flags |= DWARF_REG_OFFSET; | |
375 | continue; | |
376 | /* NOTREACHED */ | |
377 | case DW_CFA_restore: | |
378 | reg = DW_CFA_operand(insn); | |
379 | continue; | |
380 | /* NOTREACHED */ | |
381 | } | |
382 | ||
383 | /* | |
384 | * Secondly, handle the opcodes that don't embed their | |
385 | * operands in the instruction. | |
386 | */ | |
387 | switch (insn) { | |
388 | case DW_CFA_nop: | |
389 | continue; | |
390 | case DW_CFA_advance_loc1: | |
391 | delta = *current_insn++; | |
392 | frame->pc += delta * cie->code_alignment_factor; | |
393 | break; | |
394 | case DW_CFA_advance_loc2: | |
395 | delta = __get_unaligned_cpu16(current_insn); | |
396 | current_insn += 2; | |
397 | frame->pc += delta * cie->code_alignment_factor; | |
398 | break; | |
399 | case DW_CFA_advance_loc4: | |
400 | delta = __get_unaligned_cpu32(current_insn); | |
401 | current_insn += 4; | |
402 | frame->pc += delta * cie->code_alignment_factor; | |
403 | break; | |
404 | case DW_CFA_offset_extended: | |
405 | count = dwarf_read_uleb128(current_insn, ®); | |
406 | current_insn += count; | |
407 | count = dwarf_read_uleb128(current_insn, &offset); | |
408 | current_insn += count; | |
409 | offset *= cie->data_alignment_factor; | |
410 | break; | |
411 | case DW_CFA_restore_extended: | |
412 | count = dwarf_read_uleb128(current_insn, ®); | |
413 | current_insn += count; | |
414 | break; | |
415 | case DW_CFA_undefined: | |
416 | count = dwarf_read_uleb128(current_insn, ®); | |
417 | current_insn += count; | |
418 | break; | |
419 | case DW_CFA_def_cfa: | |
420 | count = dwarf_read_uleb128(current_insn, | |
421 | &frame->cfa_register); | |
422 | current_insn += count; | |
423 | count = dwarf_read_uleb128(current_insn, | |
424 | &frame->cfa_offset); | |
425 | current_insn += count; | |
426 | ||
427 | frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; | |
428 | break; | |
429 | case DW_CFA_def_cfa_register: | |
430 | count = dwarf_read_uleb128(current_insn, | |
431 | &frame->cfa_register); | |
432 | current_insn += count; | |
433 | frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; | |
434 | break; | |
435 | case DW_CFA_def_cfa_offset: | |
436 | count = dwarf_read_uleb128(current_insn, &offset); | |
437 | current_insn += count; | |
438 | frame->cfa_offset = offset; | |
439 | break; | |
440 | case DW_CFA_def_cfa_expression: | |
441 | count = dwarf_read_uleb128(current_insn, &expr_len); | |
442 | current_insn += count; | |
443 | ||
444 | frame->cfa_expr = current_insn; | |
445 | frame->cfa_expr_len = expr_len; | |
446 | current_insn += expr_len; | |
447 | ||
448 | frame->flags |= DWARF_FRAME_CFA_REG_EXP; | |
449 | break; | |
450 | case DW_CFA_offset_extended_sf: | |
451 | count = dwarf_read_uleb128(current_insn, ®); | |
452 | current_insn += count; | |
453 | count = dwarf_read_leb128(current_insn, &offset); | |
454 | current_insn += count; | |
455 | offset *= cie->data_alignment_factor; | |
456 | dwarf_frame_alloc_regs(frame, reg); | |
457 | frame->regs[reg].flags |= DWARF_REG_OFFSET; | |
458 | frame->regs[reg].addr = offset; | |
459 | break; | |
460 | case DW_CFA_val_offset: | |
461 | count = dwarf_read_uleb128(current_insn, ®); | |
462 | current_insn += count; | |
463 | count = dwarf_read_leb128(current_insn, &offset); | |
464 | offset *= cie->data_alignment_factor; | |
465 | frame->regs[reg].flags |= DWARF_REG_OFFSET; | |
466 | frame->regs[reg].addr = offset; | |
467 | break; | |
468 | default: | |
469 | pr_debug("unhandled DWARF instruction 0x%x\n", insn); | |
470 | break; | |
471 | } | |
472 | } | |
473 | ||
474 | return 0; | |
475 | } | |
476 | ||
477 | /** | |
478 | * dwarf_unwind_stack - recursively unwind the stack | |
479 | * @pc: address of the function to unwind | |
480 | * @prev: struct dwarf_frame of the previous stackframe on the callstack | |
481 | * | |
482 | * Return a struct dwarf_frame representing the most recent frame | |
483 | * on the callstack. Each of the lower (older) stack frames are | |
484 | * linked via the "prev" member. | |
485 | */ | |
486 | struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, | |
487 | struct dwarf_frame *prev) | |
488 | { | |
489 | struct dwarf_frame *frame; | |
490 | struct dwarf_cie *cie; | |
491 | struct dwarf_fde *fde; | |
492 | unsigned long addr; | |
493 | int i, offset; | |
494 | ||
495 | /* | |
496 | * If this is the first invocation of this recursive function we | |
497 | * need get the contents of a physical register to get the CFA | |
498 | * in order to begin the virtual unwinding of the stack. | |
499 | * | |
500 | * The constant DWARF_ARCH_UNWIND_OFFSET is added to the address of | |
501 | * this function because the return address register | |
502 | * (DWARF_ARCH_RA_REG) will probably not be initialised until a | |
503 | * few instructions into the prologue. | |
504 | */ | |
505 | if (!pc && !prev) { | |
506 | pc = (unsigned long)&dwarf_unwind_stack; | |
507 | pc += DWARF_ARCH_UNWIND_OFFSET; | |
508 | } | |
509 | ||
510 | frame = kzalloc(sizeof(*frame), GFP_KERNEL); | |
511 | if (!frame) | |
512 | return NULL; | |
513 | ||
514 | frame->prev = prev; | |
515 | ||
516 | fde = dwarf_lookup_fde(pc); | |
517 | if (!fde) { | |
518 | /* | |
519 | * This is our normal exit path - the one that stops the | |
520 | * recursion. There's two reasons why we might exit | |
521 | * here, | |
522 | * | |
523 | * a) pc has no asscociated DWARF frame info and so | |
524 | * we don't know how to unwind this frame. This is | |
525 | * usually the case when we're trying to unwind a | |
526 | * frame that was called from some assembly code | |
527 | * that has no DWARF info, e.g. syscalls. | |
528 | * | |
529 | * b) the DEBUG info for pc is bogus. There's | |
530 | * really no way to distinguish this case from the | |
531 | * case above, which sucks because we could print a | |
532 | * warning here. | |
533 | */ | |
534 | return NULL; | |
535 | } | |
536 | ||
537 | cie = dwarf_lookup_cie(fde->cie_pointer); | |
538 | ||
539 | frame->pc = fde->initial_location; | |
540 | ||
541 | /* CIE initial instructions */ | |
542 | dwarf_cfa_execute_insns(cie->initial_instructions, | |
543 | cie->instructions_end, cie, fde, frame, pc); | |
544 | ||
545 | /* FDE instructions */ | |
546 | dwarf_cfa_execute_insns(fde->instructions, fde->end, cie, | |
547 | fde, frame, pc); | |
548 | ||
549 | /* Calculate the CFA */ | |
550 | switch (frame->flags) { | |
551 | case DWARF_FRAME_CFA_REG_OFFSET: | |
552 | if (prev) { | |
553 | BUG_ON(!prev->regs[frame->cfa_register].flags); | |
554 | ||
555 | addr = prev->cfa; | |
556 | addr += prev->regs[frame->cfa_register].addr; | |
557 | frame->cfa = __raw_readl(addr); | |
558 | ||
559 | } else { | |
560 | /* | |
561 | * Again, this is the first invocation of this | |
562 | * recurisve function. We need to physically | |
563 | * read the contents of a register in order to | |
564 | * get the Canonical Frame Address for this | |
565 | * function. | |
566 | */ | |
567 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); | |
568 | } | |
569 | ||
570 | frame->cfa += frame->cfa_offset; | |
571 | break; | |
572 | default: | |
573 | BUG(); | |
574 | } | |
575 | ||
576 | /* If we haven't seen the return address reg, we're screwed. */ | |
577 | BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags); | |
578 | ||
579 | for (i = 0; i <= frame->num_regs; i++) { | |
580 | struct dwarf_reg *reg = &frame->regs[i]; | |
581 | ||
582 | if (!reg->flags) | |
583 | continue; | |
584 | ||
585 | offset = reg->addr; | |
586 | offset += frame->cfa; | |
587 | } | |
588 | ||
589 | addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr; | |
590 | frame->return_addr = __raw_readl(addr); | |
591 | ||
592 | frame->next = dwarf_unwind_stack(frame->return_addr, frame); | |
593 | return frame; | |
594 | } | |
595 | ||
596 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |
597 | unsigned char *end) | |
598 | { | |
599 | struct dwarf_cie *cie; | |
600 | unsigned long flags; | |
601 | int count; | |
602 | ||
603 | cie = kzalloc(sizeof(*cie), GFP_KERNEL); | |
604 | if (!cie) | |
605 | return -ENOMEM; | |
606 | ||
607 | cie->length = len; | |
608 | ||
609 | /* | |
610 | * Record the offset into the .eh_frame section | |
611 | * for this CIE. It allows this CIE to be | |
612 | * quickly and easily looked up from the | |
613 | * corresponding FDE. | |
614 | */ | |
615 | cie->cie_pointer = (unsigned long)entry; | |
616 | ||
617 | cie->version = *(char *)p++; | |
618 | BUG_ON(cie->version != 1); | |
619 | ||
620 | cie->augmentation = p; | |
621 | p += strlen(cie->augmentation) + 1; | |
622 | ||
623 | count = dwarf_read_uleb128(p, &cie->code_alignment_factor); | |
624 | p += count; | |
625 | ||
626 | count = dwarf_read_leb128(p, &cie->data_alignment_factor); | |
627 | p += count; | |
628 | ||
629 | /* | |
630 | * Which column in the rule table contains the | |
631 | * return address? | |
632 | */ | |
633 | if (cie->version == 1) { | |
634 | cie->return_address_reg = __raw_readb(p); | |
635 | p++; | |
636 | } else { | |
637 | count = dwarf_read_uleb128(p, &cie->return_address_reg); | |
638 | p += count; | |
639 | } | |
640 | ||
641 | if (cie->augmentation[0] == 'z') { | |
642 | unsigned int length, count; | |
643 | cie->flags |= DWARF_CIE_Z_AUGMENTATION; | |
644 | ||
645 | count = dwarf_read_uleb128(p, &length); | |
646 | p += count; | |
647 | ||
648 | BUG_ON((unsigned char *)p > end); | |
649 | ||
650 | cie->initial_instructions = p + length; | |
651 | cie->augmentation++; | |
652 | } | |
653 | ||
654 | while (*cie->augmentation) { | |
655 | /* | |
656 | * "L" indicates a byte showing how the | |
657 | * LSDA pointer is encoded. Skip it. | |
658 | */ | |
659 | if (*cie->augmentation == 'L') { | |
660 | p++; | |
661 | cie->augmentation++; | |
662 | } else if (*cie->augmentation == 'R') { | |
663 | /* | |
664 | * "R" indicates a byte showing | |
665 | * how FDE addresses are | |
666 | * encoded. | |
667 | */ | |
668 | cie->encoding = *(char *)p++; | |
669 | cie->augmentation++; | |
670 | } else if (*cie->augmentation == 'P') { | |
671 | /* | |
672 | * "R" indicates a personality | |
673 | * routine in the CIE | |
674 | * augmentation. | |
675 | */ | |
676 | BUG(); | |
677 | } else if (*cie->augmentation == 'S') { | |
678 | BUG(); | |
679 | } else { | |
680 | /* | |
681 | * Unknown augmentation. Assume | |
682 | * 'z' augmentation. | |
683 | */ | |
684 | p = cie->initial_instructions; | |
685 | BUG_ON(!p); | |
686 | break; | |
687 | } | |
688 | } | |
689 | ||
690 | cie->initial_instructions = p; | |
691 | cie->instructions_end = end; | |
692 | ||
693 | /* Add to list */ | |
694 | spin_lock_irqsave(&dwarf_cie_lock, flags); | |
695 | list_add_tail(&cie->link, &dwarf_cie_list); | |
696 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | |
697 | ||
698 | return 0; | |
699 | } | |
700 | ||
701 | static int dwarf_parse_fde(void *entry, u32 entry_type, | |
702 | void *start, unsigned long len) | |
703 | { | |
704 | struct dwarf_fde *fde; | |
705 | struct dwarf_cie *cie; | |
706 | unsigned long flags; | |
707 | int count; | |
708 | void *p = start; | |
709 | ||
710 | fde = kzalloc(sizeof(*fde), GFP_KERNEL); | |
711 | if (!fde) | |
712 | return -ENOMEM; | |
713 | ||
714 | fde->length = len; | |
715 | ||
716 | /* | |
717 | * In a .eh_frame section the CIE pointer is the | |
718 | * delta between the address within the FDE | |
719 | */ | |
720 | fde->cie_pointer = (unsigned long)(p - entry_type - 4); | |
721 | ||
722 | cie = dwarf_lookup_cie(fde->cie_pointer); | |
723 | fde->cie = cie; | |
724 | ||
725 | if (cie->encoding) | |
726 | count = dwarf_read_encoded_value(p, &fde->initial_location, | |
727 | cie->encoding); | |
728 | else | |
729 | count = dwarf_read_addr(p, &fde->initial_location); | |
730 | ||
731 | p += count; | |
732 | ||
733 | if (cie->encoding) | |
734 | count = dwarf_read_encoded_value(p, &fde->address_range, | |
735 | cie->encoding & 0x0f); | |
736 | else | |
737 | count = dwarf_read_addr(p, &fde->address_range); | |
738 | ||
739 | p += count; | |
740 | ||
741 | if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) { | |
742 | unsigned int length; | |
743 | count = dwarf_read_uleb128(p, &length); | |
744 | p += count + length; | |
745 | } | |
746 | ||
747 | /* Call frame instructions. */ | |
748 | fde->instructions = p; | |
749 | fde->end = start + len; | |
750 | ||
751 | /* Add to list. */ | |
752 | spin_lock_irqsave(&dwarf_fde_lock, flags); | |
753 | list_add_tail(&fde->link, &dwarf_fde_list); | |
754 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | |
755 | ||
756 | return 0; | |
757 | } | |
758 | ||
759 | static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs, | |
760 | unsigned long *sp, | |
761 | const struct stacktrace_ops *ops, void *data) | |
762 | { | |
763 | struct dwarf_frame *frame; | |
764 | ||
765 | frame = dwarf_unwind_stack(0, NULL); | |
766 | ||
767 | while (frame && frame->return_addr) { | |
768 | ops->address(data, frame->return_addr, 1); | |
769 | frame = frame->next; | |
770 | } | |
771 | } | |
772 | ||
773 | static struct unwinder dwarf_unwinder = { | |
774 | .name = "dwarf-unwinder", | |
775 | .dump = dwarf_unwinder_dump, | |
776 | .rating = 150, | |
777 | }; | |
778 | ||
779 | static void dwarf_unwinder_cleanup(void) | |
780 | { | |
781 | struct dwarf_cie *cie, *m; | |
782 | struct dwarf_fde *fde, *n; | |
783 | unsigned long flags; | |
784 | ||
785 | /* | |
786 | * Deallocate all the memory allocated for the DWARF unwinder. | |
787 | * Traverse all the FDE/CIE lists and remove and free all the | |
788 | * memory associated with those data structures. | |
789 | */ | |
790 | spin_lock_irqsave(&dwarf_cie_lock, flags); | |
791 | list_for_each_entry_safe(cie, m, &dwarf_cie_list, link) | |
792 | kfree(cie); | |
793 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | |
794 | ||
795 | spin_lock_irqsave(&dwarf_fde_lock, flags); | |
796 | list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) | |
797 | kfree(fde); | |
798 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | |
799 | } | |
800 | ||
801 | /** | |
802 | * dwarf_unwinder_init - initialise the dwarf unwinder | |
803 | * | |
804 | * Build the data structures describing the .dwarf_frame section to | |
805 | * make it easier to lookup CIE and FDE entries. Because the | |
806 | * .eh_frame section is packed as tightly as possible it is not | |
807 | * easy to lookup the FDE for a given PC, so we build a list of FDE | |
808 | * and CIE entries that make it easier. | |
809 | */ | |
810 | void dwarf_unwinder_init(void) | |
811 | { | |
812 | u32 entry_type; | |
813 | void *p, *entry; | |
814 | int count, err; | |
815 | unsigned long len; | |
816 | unsigned int c_entries, f_entries; | |
817 | unsigned char *end; | |
818 | INIT_LIST_HEAD(&dwarf_cie_list); | |
819 | INIT_LIST_HEAD(&dwarf_fde_list); | |
820 | ||
821 | c_entries = 0; | |
822 | f_entries = 0; | |
823 | entry = &__start_eh_frame; | |
824 | ||
825 | while ((char *)entry < __stop_eh_frame) { | |
826 | p = entry; | |
827 | ||
828 | count = dwarf_entry_len(p, &len); | |
829 | if (count == 0) { | |
830 | /* | |
831 | * We read a bogus length field value. There is | |
832 | * nothing we can do here apart from disabling | |
833 | * the DWARF unwinder. We can't even skip this | |
834 | * entry and move to the next one because 'len' | |
835 | * tells us where our next entry is. | |
836 | */ | |
837 | goto out; | |
838 | } else | |
839 | p += count; | |
840 | ||
841 | /* initial length does not include itself */ | |
842 | end = p + len; | |
843 | ||
844 | entry_type = __get_unaligned_cpu32(p); | |
845 | p += 4; | |
846 | ||
847 | if (entry_type == DW_EH_FRAME_CIE) { | |
848 | err = dwarf_parse_cie(entry, p, len, end); | |
849 | if (err < 0) | |
850 | goto out; | |
851 | else | |
852 | c_entries++; | |
853 | } else { | |
854 | err = dwarf_parse_fde(entry, entry_type, p, len); | |
855 | if (err < 0) | |
856 | goto out; | |
857 | else | |
858 | f_entries++; | |
859 | } | |
860 | ||
861 | entry = (char *)entry + len + 4; | |
862 | } | |
863 | ||
864 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", | |
865 | c_entries, f_entries); | |
866 | ||
867 | err = unwinder_register(&dwarf_unwinder); | |
868 | if (err) | |
869 | goto out; | |
870 | ||
871 | return; | |
872 | ||
873 | out: | |
874 | printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); | |
875 | dwarf_unwinder_cleanup(); | |
876 | } |