Commit | Line | Data |
---|---|---|
82da3ff8 IM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify it | |
3 | * under the terms of the GNU General Public License as published by the | |
4 | * Free Software Foundation; either version 2, or (at your option) any | |
5 | * later version. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, but | |
8 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
10 | * General Public License for more details. | |
11 | * | |
12 | */ | |
13 | ||
14 | /* | |
15 | * Copyright (C) 2004 Amit S. Kale <amitkale@linsyssoft.com> | |
16 | * Copyright (C) 2000-2001 VERITAS Software Corporation. | |
17 | * Copyright (C) 2002 Andi Kleen, SuSE Labs | |
18 | * Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd. | |
19 | * Copyright (C) 2007 MontaVista Software, Inc. | |
20 | * Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc. | |
21 | */ | |
22 | /**************************************************************************** | |
23 | * Contributor: Lake Stevens Instrument Division$ | |
24 | * Written by: Glenn Engel $ | |
25 | * Updated by: Amit Kale<akale@veritas.com> | |
26 | * Updated by: Tom Rini <trini@kernel.crashing.org> | |
27 | * Updated by: Jason Wessel <jason.wessel@windriver.com> | |
28 | * Modified for 386 by Jim Kingdon, Cygnus Support. | |
29 | * Origianl kgdb, compatibility with 2.1.xx kernel by | |
30 | * David Grothe <dave@gcom.com> | |
31 | * Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com> | |
32 | * X86_64 changes from Andi Kleen's patch merged by Jim Houston | |
33 | */ | |
34 | #include <linux/spinlock.h> | |
35 | #include <linux/kdebug.h> | |
36 | #include <linux/string.h> | |
37 | #include <linux/kernel.h> | |
38 | #include <linux/ptrace.h> | |
39 | #include <linux/sched.h> | |
40 | #include <linux/delay.h> | |
41 | #include <linux/kgdb.h> | |
42 | #include <linux/init.h> | |
43 | #include <linux/smp.h> | |
d3597524 | 44 | #include <linux/nmi.h> |
cc096749 | 45 | #include <linux/hw_breakpoint.h> |
3751d3e8 JW |
46 | #include <linux/uaccess.h> |
47 | #include <linux/memory.h> | |
82da3ff8 | 48 | |
62edab90 | 49 | #include <asm/debugreg.h> |
82da3ff8 | 50 | #include <asm/apicdef.h> |
7b6aa335 | 51 | #include <asm/apic.h> |
166d7514 | 52 | #include <asm/nmi.h> |
82da3ff8 | 53 | |
12bfa3de | 54 | struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = |
82da3ff8 | 55 | { |
12bfa3de JW |
56 | #ifdef CONFIG_X86_32 |
57 | { "ax", 4, offsetof(struct pt_regs, ax) }, | |
58 | { "cx", 4, offsetof(struct pt_regs, cx) }, | |
59 | { "dx", 4, offsetof(struct pt_regs, dx) }, | |
60 | { "bx", 4, offsetof(struct pt_regs, bx) }, | |
61 | { "sp", 4, offsetof(struct pt_regs, sp) }, | |
62 | { "bp", 4, offsetof(struct pt_regs, bp) }, | |
63 | { "si", 4, offsetof(struct pt_regs, si) }, | |
64 | { "di", 4, offsetof(struct pt_regs, di) }, | |
65 | { "ip", 4, offsetof(struct pt_regs, ip) }, | |
66 | { "flags", 4, offsetof(struct pt_regs, flags) }, | |
67 | { "cs", 4, offsetof(struct pt_regs, cs) }, | |
68 | { "ss", 4, offsetof(struct pt_regs, ss) }, | |
69 | { "ds", 4, offsetof(struct pt_regs, ds) }, | |
70 | { "es", 4, offsetof(struct pt_regs, es) }, | |
12bfa3de JW |
71 | #else |
72 | { "ax", 8, offsetof(struct pt_regs, ax) }, | |
73 | { "bx", 8, offsetof(struct pt_regs, bx) }, | |
74 | { "cx", 8, offsetof(struct pt_regs, cx) }, | |
75 | { "dx", 8, offsetof(struct pt_regs, dx) }, | |
76 | { "si", 8, offsetof(struct pt_regs, dx) }, | |
77 | { "di", 8, offsetof(struct pt_regs, di) }, | |
78 | { "bp", 8, offsetof(struct pt_regs, bp) }, | |
79 | { "sp", 8, offsetof(struct pt_regs, sp) }, | |
80 | { "r8", 8, offsetof(struct pt_regs, r8) }, | |
81 | { "r9", 8, offsetof(struct pt_regs, r9) }, | |
82 | { "r10", 8, offsetof(struct pt_regs, r10) }, | |
83 | { "r11", 8, offsetof(struct pt_regs, r11) }, | |
84 | { "r12", 8, offsetof(struct pt_regs, r12) }, | |
85 | { "r13", 8, offsetof(struct pt_regs, r13) }, | |
86 | { "r14", 8, offsetof(struct pt_regs, r14) }, | |
87 | { "r15", 8, offsetof(struct pt_regs, r15) }, | |
88 | { "ip", 8, offsetof(struct pt_regs, ip) }, | |
89 | { "flags", 4, offsetof(struct pt_regs, flags) }, | |
90 | { "cs", 4, offsetof(struct pt_regs, cs) }, | |
91 | { "ss", 4, offsetof(struct pt_regs, ss) }, | |
639077fb JK |
92 | { "ds", 4, -1 }, |
93 | { "es", 4, -1 }, | |
703a1edc | 94 | #endif |
639077fb JK |
95 | { "fs", 4, -1 }, |
96 | { "gs", 4, -1 }, | |
12bfa3de JW |
97 | }; |
98 | ||
99 | int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) | |
100 | { | |
101 | if ( | |
82da3ff8 | 102 | #ifdef CONFIG_X86_32 |
12bfa3de JW |
103 | regno == GDB_SS || regno == GDB_FS || regno == GDB_GS || |
104 | #endif | |
105 | regno == GDB_SP || regno == GDB_ORIG_AX) | |
106 | return 0; | |
107 | ||
108 | if (dbg_reg_def[regno].offset != -1) | |
109 | memcpy((void *)regs + dbg_reg_def[regno].offset, mem, | |
110 | dbg_reg_def[regno].size); | |
111 | return 0; | |
112 | } | |
113 | ||
114 | char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) | |
115 | { | |
116 | if (regno == GDB_ORIG_AX) { | |
117 | memcpy(mem, ®s->orig_ax, sizeof(regs->orig_ax)); | |
118 | return "orig_ax"; | |
cf6f196d | 119 | } |
12bfa3de JW |
120 | if (regno >= DBG_MAX_REG_NUM || regno < 0) |
121 | return NULL; | |
122 | ||
123 | if (dbg_reg_def[regno].offset != -1) | |
124 | memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, | |
125 | dbg_reg_def[regno].size); | |
126 | ||
12bfa3de | 127 | #ifdef CONFIG_X86_32 |
21431c29 | 128 | switch (regno) { |
12bfa3de JW |
129 | case GDB_SS: |
130 | if (!user_mode_vm(regs)) | |
131 | *(unsigned long *)mem = __KERNEL_DS; | |
132 | break; | |
133 | case GDB_SP: | |
134 | if (!user_mode_vm(regs)) | |
135 | *(unsigned long *)mem = kernel_stack_pointer(regs); | |
136 | break; | |
137 | case GDB_GS: | |
138 | case GDB_FS: | |
139 | *(unsigned long *)mem = 0xFFFF; | |
140 | break; | |
12bfa3de | 141 | } |
21431c29 | 142 | #endif |
12bfa3de | 143 | return dbg_reg_def[regno].name; |
82da3ff8 IM |
144 | } |
145 | ||
146 | /** | |
147 | * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs | |
148 | * @gdb_regs: A pointer to hold the registers in the order GDB wants. | |
149 | * @p: The &struct task_struct of the desired process. | |
150 | * | |
151 | * Convert the register values of the sleeping process in @p to | |
152 | * the format that GDB expects. | |
153 | * This function is called when kgdb does not have access to the | |
154 | * &struct pt_regs and therefore it should fill the gdb registers | |
155 | * @gdb_regs with what has been saved in &struct thread_struct | |
156 | * thread field during switch_to. | |
157 | */ | |
158 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | |
159 | { | |
703a1edc JW |
160 | #ifndef CONFIG_X86_32 |
161 | u32 *gdb_regs32 = (u32 *)gdb_regs; | |
162 | #endif | |
82da3ff8 IM |
163 | gdb_regs[GDB_AX] = 0; |
164 | gdb_regs[GDB_BX] = 0; | |
165 | gdb_regs[GDB_CX] = 0; | |
166 | gdb_regs[GDB_DX] = 0; | |
167 | gdb_regs[GDB_SI] = 0; | |
168 | gdb_regs[GDB_DI] = 0; | |
169 | gdb_regs[GDB_BP] = *(unsigned long *)p->thread.sp; | |
170 | #ifdef CONFIG_X86_32 | |
171 | gdb_regs[GDB_DS] = __KERNEL_DS; | |
172 | gdb_regs[GDB_ES] = __KERNEL_DS; | |
173 | gdb_regs[GDB_PS] = 0; | |
174 | gdb_regs[GDB_CS] = __KERNEL_CS; | |
175 | gdb_regs[GDB_PC] = p->thread.ip; | |
176 | gdb_regs[GDB_SS] = __KERNEL_DS; | |
177 | gdb_regs[GDB_FS] = 0xFFFF; | |
178 | gdb_regs[GDB_GS] = 0xFFFF; | |
179 | #else | |
703a1edc JW |
180 | gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8); |
181 | gdb_regs32[GDB_CS] = __KERNEL_CS; | |
182 | gdb_regs32[GDB_SS] = __KERNEL_DS; | |
0c23590f | 183 | gdb_regs[GDB_PC] = 0; |
82da3ff8 IM |
184 | gdb_regs[GDB_R8] = 0; |
185 | gdb_regs[GDB_R9] = 0; | |
186 | gdb_regs[GDB_R10] = 0; | |
187 | gdb_regs[GDB_R11] = 0; | |
188 | gdb_regs[GDB_R12] = 0; | |
189 | gdb_regs[GDB_R13] = 0; | |
190 | gdb_regs[GDB_R14] = 0; | |
191 | gdb_regs[GDB_R15] = 0; | |
192 | #endif | |
193 | gdb_regs[GDB_SP] = p->thread.sp; | |
194 | } | |
195 | ||
64e9ee30 JW |
196 | static struct hw_breakpoint { |
197 | unsigned enabled; | |
64e9ee30 | 198 | unsigned long addr; |
cc096749 JW |
199 | int len; |
200 | int type; | |
8c8aefce | 201 | struct perf_event * __percpu *pev; |
df493935 | 202 | } breakinfo[HBP_NUM]; |
64e9ee30 | 203 | |
031acd8c JW |
204 | static unsigned long early_dr7; |
205 | ||
64e9ee30 JW |
206 | static void kgdb_correct_hw_break(void) |
207 | { | |
64e9ee30 JW |
208 | int breakno; |
209 | ||
df493935 | 210 | for (breakno = 0; breakno < HBP_NUM; breakno++) { |
cc096749 JW |
211 | struct perf_event *bp; |
212 | struct arch_hw_breakpoint *info; | |
213 | int val; | |
214 | int cpu = raw_smp_processor_id(); | |
215 | if (!breakinfo[breakno].enabled) | |
216 | continue; | |
031acd8c JW |
217 | if (dbg_is_early) { |
218 | set_debugreg(breakinfo[breakno].addr, breakno); | |
219 | early_dr7 |= encode_dr7(breakno, | |
220 | breakinfo[breakno].len, | |
221 | breakinfo[breakno].type); | |
222 | set_debugreg(early_dr7, 7); | |
223 | continue; | |
224 | } | |
cc096749 JW |
225 | bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); |
226 | info = counter_arch_bp(bp); | |
227 | if (bp->attr.disabled != 1) | |
228 | continue; | |
229 | bp->attr.bp_addr = breakinfo[breakno].addr; | |
230 | bp->attr.bp_len = breakinfo[breakno].len; | |
231 | bp->attr.bp_type = breakinfo[breakno].type; | |
232 | info->address = breakinfo[breakno].addr; | |
233 | info->len = breakinfo[breakno].len; | |
234 | info->type = breakinfo[breakno].type; | |
235 | val = arch_install_hw_breakpoint(bp); | |
236 | if (!val) | |
237 | bp->attr.disabled = 0; | |
64e9ee30 | 238 | } |
031acd8c JW |
239 | if (!dbg_is_early) |
240 | hw_breakpoint_restore(); | |
64e9ee30 JW |
241 | } |
242 | ||
5352ae63 JW |
243 | static int hw_break_reserve_slot(int breakno) |
244 | { | |
245 | int cpu; | |
246 | int cnt = 0; | |
247 | struct perf_event **pevent; | |
248 | ||
031acd8c JW |
249 | if (dbg_is_early) |
250 | return 0; | |
251 | ||
5352ae63 JW |
252 | for_each_online_cpu(cpu) { |
253 | cnt++; | |
254 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | |
255 | if (dbg_reserve_bp_slot(*pevent)) | |
256 | goto fail; | |
257 | } | |
258 | ||
259 | return 0; | |
260 | ||
261 | fail: | |
262 | for_each_online_cpu(cpu) { | |
263 | cnt--; | |
264 | if (!cnt) | |
265 | break; | |
266 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | |
267 | dbg_release_bp_slot(*pevent); | |
268 | } | |
269 | return -1; | |
270 | } | |
271 | ||
272 | static int hw_break_release_slot(int breakno) | |
273 | { | |
274 | struct perf_event **pevent; | |
275 | int cpu; | |
276 | ||
031acd8c JW |
277 | if (dbg_is_early) |
278 | return 0; | |
279 | ||
5352ae63 JW |
280 | for_each_online_cpu(cpu) { |
281 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | |
282 | if (dbg_release_bp_slot(*pevent)) | |
283 | /* | |
0d2eb44f | 284 | * The debugger is responsible for handing the retry on |
5352ae63 JW |
285 | * remove failure. |
286 | */ | |
287 | return -1; | |
288 | } | |
289 | return 0; | |
290 | } | |
291 | ||
64e9ee30 JW |
292 | static int |
293 | kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |
294 | { | |
295 | int i; | |
296 | ||
df493935 | 297 | for (i = 0; i < HBP_NUM; i++) |
64e9ee30 JW |
298 | if (breakinfo[i].addr == addr && breakinfo[i].enabled) |
299 | break; | |
df493935 | 300 | if (i == HBP_NUM) |
64e9ee30 JW |
301 | return -1; |
302 | ||
5352ae63 JW |
303 | if (hw_break_release_slot(i)) { |
304 | printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr); | |
305 | return -1; | |
306 | } | |
64e9ee30 JW |
307 | breakinfo[i].enabled = 0; |
308 | ||
309 | return 0; | |
310 | } | |
311 | ||
312 | static void kgdb_remove_all_hw_break(void) | |
313 | { | |
314 | int i; | |
cc096749 JW |
315 | int cpu = raw_smp_processor_id(); |
316 | struct perf_event *bp; | |
64e9ee30 | 317 | |
df493935 | 318 | for (i = 0; i < HBP_NUM; i++) { |
cc096749 JW |
319 | if (!breakinfo[i].enabled) |
320 | continue; | |
321 | bp = *per_cpu_ptr(breakinfo[i].pev, cpu); | |
10a6e676 JW |
322 | if (!bp->attr.disabled) { |
323 | arch_uninstall_hw_breakpoint(bp); | |
324 | bp->attr.disabled = 1; | |
cc096749 | 325 | continue; |
10a6e676 | 326 | } |
031acd8c JW |
327 | if (dbg_is_early) |
328 | early_dr7 &= ~encode_dr7(i, breakinfo[i].len, | |
329 | breakinfo[i].type); | |
10a6e676 JW |
330 | else if (hw_break_release_slot(i)) |
331 | printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n", | |
332 | breakinfo[i].addr); | |
333 | breakinfo[i].enabled = 0; | |
cc096749 | 334 | } |
64e9ee30 JW |
335 | } |
336 | ||
337 | static int | |
338 | kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |
339 | { | |
64e9ee30 JW |
340 | int i; |
341 | ||
df493935 | 342 | for (i = 0; i < HBP_NUM; i++) |
64e9ee30 JW |
343 | if (!breakinfo[i].enabled) |
344 | break; | |
df493935 | 345 | if (i == HBP_NUM) |
64e9ee30 JW |
346 | return -1; |
347 | ||
348 | switch (bptype) { | |
349 | case BP_HARDWARE_BREAKPOINT: | |
cc096749 JW |
350 | len = 1; |
351 | breakinfo[i].type = X86_BREAKPOINT_EXECUTE; | |
64e9ee30 JW |
352 | break; |
353 | case BP_WRITE_WATCHPOINT: | |
cc096749 | 354 | breakinfo[i].type = X86_BREAKPOINT_WRITE; |
64e9ee30 JW |
355 | break; |
356 | case BP_ACCESS_WATCHPOINT: | |
cc096749 | 357 | breakinfo[i].type = X86_BREAKPOINT_RW; |
64e9ee30 JW |
358 | break; |
359 | default: | |
360 | return -1; | |
361 | } | |
cc096749 JW |
362 | switch (len) { |
363 | case 1: | |
364 | breakinfo[i].len = X86_BREAKPOINT_LEN_1; | |
365 | break; | |
366 | case 2: | |
367 | breakinfo[i].len = X86_BREAKPOINT_LEN_2; | |
368 | break; | |
369 | case 4: | |
370 | breakinfo[i].len = X86_BREAKPOINT_LEN_4; | |
371 | break; | |
372 | #ifdef CONFIG_X86_64 | |
373 | case 8: | |
374 | breakinfo[i].len = X86_BREAKPOINT_LEN_8; | |
375 | break; | |
376 | #endif | |
377 | default: | |
64e9ee30 | 378 | return -1; |
cc096749 | 379 | } |
64e9ee30 | 380 | breakinfo[i].addr = addr; |
5352ae63 JW |
381 | if (hw_break_reserve_slot(i)) { |
382 | breakinfo[i].addr = 0; | |
383 | return -1; | |
384 | } | |
cc096749 | 385 | breakinfo[i].enabled = 1; |
64e9ee30 JW |
386 | |
387 | return 0; | |
388 | } | |
389 | ||
390 | /** | |
391 | * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb. | |
392 | * @regs: Current &struct pt_regs. | |
393 | * | |
394 | * This function will be called if the particular architecture must | |
395 | * disable hardware debugging while it is processing gdb packets or | |
396 | * handling exception. | |
397 | */ | |
d7ba979d | 398 | static void kgdb_disable_hw_debug(struct pt_regs *regs) |
64e9ee30 | 399 | { |
cc096749 JW |
400 | int i; |
401 | int cpu = raw_smp_processor_id(); | |
402 | struct perf_event *bp; | |
403 | ||
64e9ee30 JW |
404 | /* Disable hardware debugging while we are in kgdb: */ |
405 | set_debugreg(0UL, 7); | |
df493935 | 406 | for (i = 0; i < HBP_NUM; i++) { |
cc096749 JW |
407 | if (!breakinfo[i].enabled) |
408 | continue; | |
031acd8c JW |
409 | if (dbg_is_early) { |
410 | early_dr7 &= ~encode_dr7(i, breakinfo[i].len, | |
411 | breakinfo[i].type); | |
412 | continue; | |
413 | } | |
cc096749 JW |
414 | bp = *per_cpu_ptr(breakinfo[i].pev, cpu); |
415 | if (bp->attr.disabled == 1) | |
416 | continue; | |
417 | arch_uninstall_hw_breakpoint(bp); | |
418 | bp->attr.disabled = 1; | |
419 | } | |
64e9ee30 JW |
420 | } |
421 | ||
82da3ff8 IM |
422 | #ifdef CONFIG_SMP |
423 | /** | |
424 | * kgdb_roundup_cpus - Get other CPUs into a holding pattern | |
425 | * @flags: Current IRQ state | |
426 | * | |
427 | * On SMP systems, we need to get the attention of the other CPUs | |
428 | * and get them be in a known state. This should do what is needed | |
429 | * to get the other CPUs to call kgdb_wait(). Note that on some arches, | |
430 | * the NMI approach is not used for rounding up all the CPUs. For example, | |
431 | * in case of MIPS, smp_call_function() is used to roundup CPUs. In | |
432 | * this case, we have to make sure that interrupts are enabled before | |
433 | * calling smp_call_function(). The argument to this function is | |
434 | * the flags that will be used when restoring the interrupts. There is | |
435 | * local_irq_save() call before kgdb_roundup_cpus(). | |
436 | * | |
437 | * On non-SMP systems, this is not called. | |
438 | */ | |
439 | void kgdb_roundup_cpus(unsigned long flags) | |
440 | { | |
dac5f412 | 441 | apic->send_IPI_allbutself(APIC_DM_NMI); |
82da3ff8 IM |
442 | } |
443 | #endif | |
444 | ||
445 | /** | |
446 | * kgdb_arch_handle_exception - Handle architecture specific GDB packets. | |
c15acff3 | 447 | * @e_vector: The error vector of the exception that happened. |
82da3ff8 IM |
448 | * @signo: The signal number of the exception that happened. |
449 | * @err_code: The error code of the exception that happened. | |
c15acff3 WL |
450 | * @remcomInBuffer: The buffer of the packet we have read. |
451 | * @remcomOutBuffer: The buffer of %BUFMAX bytes to write a packet into. | |
452 | * @linux_regs: The &struct pt_regs of the current process. | |
82da3ff8 IM |
453 | * |
454 | * This function MUST handle the 'c' and 's' command packets, | |
455 | * as well packets to set / remove a hardware breakpoint, if used. | |
456 | * If there are additional packets which the hardware needs to handle, | |
457 | * they are handled here. The code should return -1 if it wants to | |
458 | * process more packets, and a %0 or %1 if it wants to exit from the | |
459 | * kgdb callback. | |
460 | */ | |
461 | int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | |
462 | char *remcomInBuffer, char *remcomOutBuffer, | |
463 | struct pt_regs *linux_regs) | |
464 | { | |
465 | unsigned long addr; | |
466 | char *ptr; | |
82da3ff8 IM |
467 | |
468 | switch (remcomInBuffer[0]) { | |
469 | case 'c': | |
470 | case 's': | |
471 | /* try to read optional parameter, pc unchanged if no parm */ | |
472 | ptr = &remcomInBuffer[1]; | |
473 | if (kgdb_hex2long(&ptr, &addr)) | |
474 | linux_regs->ip = addr; | |
737a460f JW |
475 | case 'D': |
476 | case 'k': | |
82da3ff8 | 477 | /* clear the trace bit */ |
fda31d7d | 478 | linux_regs->flags &= ~X86_EFLAGS_TF; |
82da3ff8 IM |
479 | atomic_set(&kgdb_cpu_doing_single_step, -1); |
480 | ||
481 | /* set the trace bit if we're stepping */ | |
482 | if (remcomInBuffer[0] == 's') { | |
fda31d7d | 483 | linux_regs->flags |= X86_EFLAGS_TF; |
d7161a65 JW |
484 | atomic_set(&kgdb_cpu_doing_single_step, |
485 | raw_smp_processor_id()); | |
82da3ff8 IM |
486 | } |
487 | ||
488 | return 0; | |
489 | } | |
490 | ||
491 | /* this means that we do not want to exit from the handler: */ | |
492 | return -1; | |
493 | } | |
494 | ||
495 | static inline int | |
496 | single_step_cont(struct pt_regs *regs, struct die_args *args) | |
497 | { | |
498 | /* | |
499 | * Single step exception from kernel space to user space so | |
500 | * eat the exception and continue the process: | |
501 | */ | |
502 | printk(KERN_ERR "KGDB: trap/step from kernel to user space, " | |
503 | "resuming...\n"); | |
504 | kgdb_arch_handle_exception(args->trapnr, args->signr, | |
505 | args->err, "c", "", regs); | |
62edab90 P |
506 | /* |
507 | * Reset the BS bit in dr6 (pointed by args->err) to | |
508 | * denote completion of processing | |
509 | */ | |
510 | (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; | |
82da3ff8 IM |
511 | |
512 | return NOTIFY_STOP; | |
513 | } | |
514 | ||
d3597524 JW |
515 | static int was_in_debug_nmi[NR_CPUS]; |
516 | ||
9c48f1c6 | 517 | static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs) |
82da3ff8 | 518 | { |
82da3ff8 | 519 | switch (cmd) { |
9c48f1c6 | 520 | case NMI_LOCAL: |
82da3ff8 IM |
521 | if (atomic_read(&kgdb_active) != -1) { |
522 | /* KGDB CPU roundup */ | |
523 | kgdb_nmicallback(raw_smp_processor_id(), regs); | |
d3597524 JW |
524 | was_in_debug_nmi[raw_smp_processor_id()] = 1; |
525 | touch_nmi_watchdog(); | |
9c48f1c6 | 526 | return NMI_HANDLED; |
82da3ff8 | 527 | } |
9c48f1c6 | 528 | break; |
82da3ff8 | 529 | |
9c48f1c6 | 530 | case NMI_UNKNOWN: |
d3597524 JW |
531 | if (was_in_debug_nmi[raw_smp_processor_id()]) { |
532 | was_in_debug_nmi[raw_smp_processor_id()] = 0; | |
9c48f1c6 | 533 | return NMI_HANDLED; |
82da3ff8 | 534 | } |
9c48f1c6 DZ |
535 | break; |
536 | default: | |
537 | /* do nothing */ | |
538 | break; | |
539 | } | |
540 | return NMI_DONE; | |
541 | } | |
542 | ||
543 | static int __kgdb_notify(struct die_args *args, unsigned long cmd) | |
544 | { | |
545 | struct pt_regs *regs = args->regs; | |
82da3ff8 | 546 | |
9c48f1c6 | 547 | switch (cmd) { |
82da3ff8 | 548 | case DIE_DEBUG: |
cc096749 | 549 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { |
d7161a65 JW |
550 | if (user_mode(regs)) |
551 | return single_step_cont(regs, args); | |
552 | break; | |
553 | } else if (test_thread_flag(TIF_SINGLESTEP)) | |
554 | /* This means a user thread is single stepping | |
555 | * a system call which should be ignored | |
556 | */ | |
557 | return NOTIFY_DONE; | |
82da3ff8 IM |
558 | /* fall through */ |
559 | default: | |
560 | if (user_mode(regs)) | |
561 | return NOTIFY_DONE; | |
562 | } | |
563 | ||
f503b5ae | 564 | if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs)) |
82da3ff8 IM |
565 | return NOTIFY_DONE; |
566 | ||
737a460f JW |
567 | /* Must touch watchdog before return to normal operation */ |
568 | touch_nmi_watchdog(); | |
82da3ff8 IM |
569 | return NOTIFY_STOP; |
570 | } | |
571 | ||
f503b5ae JW |
572 | int kgdb_ll_trap(int cmd, const char *str, |
573 | struct pt_regs *regs, long err, int trap, int sig) | |
574 | { | |
575 | struct die_args args = { | |
576 | .regs = regs, | |
577 | .str = str, | |
578 | .err = err, | |
579 | .trapnr = trap, | |
580 | .signr = sig, | |
581 | ||
582 | }; | |
583 | ||
584 | if (!kgdb_io_module_registered) | |
585 | return NOTIFY_DONE; | |
586 | ||
587 | return __kgdb_notify(&args, cmd); | |
588 | } | |
f503b5ae | 589 | |
82da3ff8 IM |
590 | static int |
591 | kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) | |
592 | { | |
593 | unsigned long flags; | |
594 | int ret; | |
595 | ||
596 | local_irq_save(flags); | |
597 | ret = __kgdb_notify(ptr, cmd); | |
598 | local_irq_restore(flags); | |
599 | ||
600 | return ret; | |
601 | } | |
602 | ||
603 | static struct notifier_block kgdb_notifier = { | |
604 | .notifier_call = kgdb_notify, | |
82da3ff8 IM |
605 | }; |
606 | ||
607 | /** | |
608 | * kgdb_arch_init - Perform any architecture specific initalization. | |
609 | * | |
610 | * This function will handle the initalization of any architecture | |
611 | * specific callbacks. | |
612 | */ | |
613 | int kgdb_arch_init(void) | |
0b4b3827 | 614 | { |
9c48f1c6 DZ |
615 | int retval; |
616 | ||
617 | retval = register_die_notifier(&kgdb_notifier); | |
618 | if (retval) | |
619 | goto out; | |
620 | ||
621 | retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler, | |
622 | 0, "kgdb"); | |
623 | if (retval) | |
624 | goto out1; | |
625 | ||
626 | retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler, | |
627 | 0, "kgdb"); | |
628 | ||
629 | if (retval) | |
630 | goto out2; | |
631 | ||
632 | return retval; | |
633 | ||
634 | out2: | |
635 | unregister_nmi_handler(NMI_LOCAL, "kgdb"); | |
636 | out1: | |
637 | unregister_die_notifier(&kgdb_notifier); | |
638 | out: | |
639 | return retval; | |
0b4b3827 JW |
640 | } |
641 | ||
a8b0ca17 | 642 | static void kgdb_hw_overflow_handler(struct perf_event *event, |
ba773f7c JW |
643 | struct perf_sample_data *data, struct pt_regs *regs) |
644 | { | |
fad99fac JW |
645 | struct task_struct *tsk = current; |
646 | int i; | |
647 | ||
648 | for (i = 0; i < 4; i++) | |
649 | if (breakinfo[i].enabled) | |
650 | tsk->thread.debugreg6 |= (DR_TRAP0 << i); | |
ba773f7c JW |
651 | } |
652 | ||
0b4b3827 | 653 | void kgdb_arch_late(void) |
82da3ff8 | 654 | { |
cc096749 | 655 | int i, cpu; |
cc096749 JW |
656 | struct perf_event_attr attr; |
657 | struct perf_event **pevent; | |
658 | ||
cc096749 JW |
659 | /* |
660 | * Pre-allocate the hw breakpoint structions in the non-atomic | |
661 | * portion of kgdb because this operation requires mutexs to | |
662 | * complete. | |
663 | */ | |
ab310b5e | 664 | hw_breakpoint_init(&attr); |
cc096749 | 665 | attr.bp_addr = (unsigned long)kgdb_arch_init; |
cc096749 JW |
666 | attr.bp_len = HW_BREAKPOINT_LEN_1; |
667 | attr.bp_type = HW_BREAKPOINT_W; | |
668 | attr.disabled = 1; | |
df493935 | 669 | for (i = 0; i < HBP_NUM; i++) { |
0b4b3827 JW |
670 | if (breakinfo[i].pev) |
671 | continue; | |
4dc0da86 | 672 | breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL); |
91b152aa | 673 | if (IS_ERR((void * __force)breakinfo[i].pev)) { |
0b4b3827 JW |
674 | printk(KERN_ERR "kgdb: Could not allocate hw" |
675 | "breakpoints\nDisabling the kernel debugger\n"); | |
cc096749 JW |
676 | breakinfo[i].pev = NULL; |
677 | kgdb_arch_exit(); | |
0b4b3827 | 678 | return; |
cc096749 JW |
679 | } |
680 | for_each_online_cpu(cpu) { | |
681 | pevent = per_cpu_ptr(breakinfo[i].pev, cpu); | |
682 | pevent[0]->hw.sample_period = 1; | |
ba773f7c | 683 | pevent[0]->overflow_handler = kgdb_hw_overflow_handler; |
cc096749 JW |
684 | if (pevent[0]->destroy != NULL) { |
685 | pevent[0]->destroy = NULL; | |
686 | release_bp_slot(*pevent); | |
687 | } | |
688 | } | |
689 | } | |
82da3ff8 IM |
690 | } |
691 | ||
692 | /** | |
693 | * kgdb_arch_exit - Perform any architecture specific uninitalization. | |
694 | * | |
695 | * This function will handle the uninitalization of any architecture | |
696 | * specific callbacks, for dynamic registration and unregistration. | |
697 | */ | |
698 | void kgdb_arch_exit(void) | |
699 | { | |
cc096749 JW |
700 | int i; |
701 | for (i = 0; i < 4; i++) { | |
702 | if (breakinfo[i].pev) { | |
703 | unregister_wide_hw_breakpoint(breakinfo[i].pev); | |
704 | breakinfo[i].pev = NULL; | |
705 | } | |
706 | } | |
9c48f1c6 DZ |
707 | unregister_nmi_handler(NMI_UNKNOWN, "kgdb"); |
708 | unregister_nmi_handler(NMI_LOCAL, "kgdb"); | |
82da3ff8 IM |
709 | unregister_die_notifier(&kgdb_notifier); |
710 | } | |
711 | ||
712 | /** | |
713 | * | |
714 | * kgdb_skipexception - Bail out of KGDB when we've been triggered. | |
715 | * @exception: Exception vector number | |
716 | * @regs: Current &struct pt_regs. | |
717 | * | |
718 | * On some architectures we need to skip a breakpoint exception when | |
719 | * it occurs after a breakpoint has been removed. | |
720 | * | |
721 | * Skip an int3 exception when it occurs after a breakpoint has been | |
722 | * removed. Backtrack eip by 1 since the int3 would have caused it to | |
723 | * increment by 1. | |
724 | */ | |
725 | int kgdb_skipexception(int exception, struct pt_regs *regs) | |
726 | { | |
727 | if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) { | |
728 | regs->ip -= 1; | |
729 | return 1; | |
730 | } | |
731 | return 0; | |
732 | } | |
733 | ||
734 | unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) | |
735 | { | |
736 | if (exception == 3) | |
737 | return instruction_pointer(regs) - 1; | |
738 | return instruction_pointer(regs); | |
739 | } | |
740 | ||
dcc78711 JW |
741 | void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) |
742 | { | |
743 | regs->ip = ip; | |
744 | } | |
745 | ||
3751d3e8 JW |
746 | int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) |
747 | { | |
748 | int err; | |
749 | char opc[BREAK_INSTR_SIZE]; | |
750 | ||
751 | bpt->type = BP_BREAKPOINT; | |
752 | err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, | |
753 | BREAK_INSTR_SIZE); | |
754 | if (err) | |
755 | return err; | |
756 | err = probe_kernel_write((char *)bpt->bpt_addr, | |
757 | arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); | |
758 | #ifdef CONFIG_DEBUG_RODATA | |
759 | if (!err) | |
760 | return err; | |
761 | /* | |
762 | * It is safe to call text_poke() because normal kernel execution | |
763 | * is stopped on all cores, so long as the text_mutex is not locked. | |
764 | */ | |
765 | if (mutex_is_locked(&text_mutex)) | |
766 | return -EBUSY; | |
767 | text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, | |
768 | BREAK_INSTR_SIZE); | |
769 | err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); | |
770 | if (err) | |
771 | return err; | |
772 | if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE)) | |
773 | return -EINVAL; | |
774 | bpt->type = BP_POKE_BREAKPOINT; | |
775 | #endif /* CONFIG_DEBUG_RODATA */ | |
776 | return err; | |
777 | } | |
778 | ||
779 | int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) | |
780 | { | |
781 | #ifdef CONFIG_DEBUG_RODATA | |
782 | int err; | |
783 | char opc[BREAK_INSTR_SIZE]; | |
784 | ||
785 | if (bpt->type != BP_POKE_BREAKPOINT) | |
786 | goto knl_write; | |
787 | /* | |
788 | * It is safe to call text_poke() because normal kernel execution | |
789 | * is stopped on all cores, so long as the text_mutex is not locked. | |
790 | */ | |
791 | if (mutex_is_locked(&text_mutex)) | |
792 | goto knl_write; | |
793 | text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE); | |
794 | err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); | |
795 | if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE)) | |
796 | goto knl_write; | |
797 | return err; | |
798 | knl_write: | |
799 | #endif /* CONFIG_DEBUG_RODATA */ | |
800 | return probe_kernel_write((char *)bpt->bpt_addr, | |
801 | (char *)bpt->saved_instr, BREAK_INSTR_SIZE); | |
802 | } | |
803 | ||
82da3ff8 IM |
804 | struct kgdb_arch arch_kgdb_ops = { |
805 | /* Breakpoint instruction: */ | |
806 | .gdb_bpt_instr = { 0xcc }, | |
64e9ee30 JW |
807 | .flags = KGDB_HW_BREAKPOINT, |
808 | .set_hw_breakpoint = kgdb_set_hw_break, | |
809 | .remove_hw_breakpoint = kgdb_remove_hw_break, | |
d7ba979d | 810 | .disable_hw_break = kgdb_disable_hw_debug, |
64e9ee30 JW |
811 | .remove_all_hw_break = kgdb_remove_all_hw_break, |
812 | .correct_hw_break = kgdb_correct_hw_break, | |
82da3ff8 | 813 | }; |