89e8a050c43a8fd90b143df8de825219502d57c5
[deliverable/linux.git] / kernel / hw_breakpoint.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
18 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
19 *
20 * Thanks to Ingo Molnar for his many suggestions.
21 *
22 * Authors: Alan Stern <stern@rowland.harvard.edu>
23 * K.Prasad <prasad@linux.vnet.ibm.com>
24 * Frederic Weisbecker <fweisbec@gmail.com>
25 */
26
27 /*
28 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29 * using the CPU's debug registers.
30 * This file contains the arch-independent routines.
31 */
32
33 #include <linux/irqflags.h>
34 #include <linux/kallsyms.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/percpu.h>
41 #include <linux/sched.h>
42 #include <linux/init.h>
43 #include <linux/cpu.h>
44 #include <linux/smp.h>
45
46 #include <linux/hw_breakpoint.h>
47
48 /*
49 * Constraints data
50 */
51
52 /* Number of pinned cpu breakpoints in a cpu */
53 static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
54
55 /* Number of pinned task breakpoints in a cpu */
56 static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]);
57
58 /* Number of non-pinned cpu/task breakpoints in a cpu */
59 static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
60
61 /* Gather the number of total pinned and un-pinned bp in a cpuset */
62 struct bp_busy_slots {
63 unsigned int pinned;
64 unsigned int flexible;
65 };
66
67 /* Serialize accesses to the above constraints */
68 static DEFINE_MUTEX(nr_bp_mutex);
69
70 /*
71 * Report the maximum number of pinned breakpoints a task
72 * have in this cpu
73 */
74 static unsigned int max_task_bp_pinned(int cpu)
75 {
76 int i;
77 unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
78
79 for (i = HBP_NUM -1; i >= 0; i--) {
80 if (tsk_pinned[i] > 0)
81 return i + 1;
82 }
83
84 return 0;
85 }
86
87 static int task_bp_pinned(struct task_struct *tsk)
88 {
89 struct perf_event_context *ctx = tsk->perf_event_ctxp;
90 struct list_head *list;
91 struct perf_event *bp;
92 unsigned long flags;
93 int count = 0;
94
95 if (WARN_ONCE(!ctx, "No perf context for this task"))
96 return 0;
97
98 list = &ctx->event_list;
99
100 raw_spin_lock_irqsave(&ctx->lock, flags);
101
102 /*
103 * The current breakpoint counter is not included in the list
104 * at the open() callback time
105 */
106 list_for_each_entry(bp, list, event_entry) {
107 if (bp->attr.type == PERF_TYPE_BREAKPOINT)
108 count++;
109 }
110
111 raw_spin_unlock_irqrestore(&ctx->lock, flags);
112
113 return count;
114 }
115
116 /*
117 * Report the number of pinned/un-pinned breakpoints we have in
118 * a given cpu (cpu > -1) or in all of them (cpu = -1).
119 */
120 static void
121 fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
122 {
123 int cpu = bp->cpu;
124 struct task_struct *tsk = bp->ctx->task;
125
126 if (cpu >= 0) {
127 slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
128 if (!tsk)
129 slots->pinned += max_task_bp_pinned(cpu);
130 else
131 slots->pinned += task_bp_pinned(tsk);
132 slots->flexible = per_cpu(nr_bp_flexible, cpu);
133
134 return;
135 }
136
137 for_each_online_cpu(cpu) {
138 unsigned int nr;
139
140 nr = per_cpu(nr_cpu_bp_pinned, cpu);
141 if (!tsk)
142 nr += max_task_bp_pinned(cpu);
143 else
144 nr += task_bp_pinned(tsk);
145
146 if (nr > slots->pinned)
147 slots->pinned = nr;
148
149 nr = per_cpu(nr_bp_flexible, cpu);
150
151 if (nr > slots->flexible)
152 slots->flexible = nr;
153 }
154 }
155
156 /*
157 * Add a pinned breakpoint for the given task in our constraint table
158 */
159 static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
160 {
161 unsigned int *tsk_pinned;
162 int count = 0;
163
164 count = task_bp_pinned(tsk);
165
166 tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
167 if (enable) {
168 tsk_pinned[count]++;
169 if (count > 0)
170 tsk_pinned[count-1]--;
171 } else {
172 tsk_pinned[count]--;
173 if (count > 0)
174 tsk_pinned[count-1]++;
175 }
176 }
177
178 /*
179 * Add/remove the given breakpoint in our constraint table
180 */
181 static void toggle_bp_slot(struct perf_event *bp, bool enable)
182 {
183 int cpu = bp->cpu;
184 struct task_struct *tsk = bp->ctx->task;
185
186 /* Pinned counter task profiling */
187 if (tsk) {
188 if (cpu >= 0) {
189 toggle_bp_task_slot(tsk, cpu, enable);
190 return;
191 }
192
193 for_each_online_cpu(cpu)
194 toggle_bp_task_slot(tsk, cpu, enable);
195 return;
196 }
197
198 /* Pinned counter cpu profiling */
199 if (enable)
200 per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
201 else
202 per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
203 }
204
205 /*
206 * Contraints to check before allowing this new breakpoint counter:
207 *
208 * == Non-pinned counter == (Considered as pinned for now)
209 *
210 * - If attached to a single cpu, check:
211 *
212 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
213 * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
214 *
215 * -> If there are already non-pinned counters in this cpu, it means
216 * there is already a free slot for them.
217 * Otherwise, we check that the maximum number of per task
218 * breakpoints (for this cpu) plus the number of per cpu breakpoint
219 * (for this cpu) doesn't cover every registers.
220 *
221 * - If attached to every cpus, check:
222 *
223 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
224 * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
225 *
226 * -> This is roughly the same, except we check the number of per cpu
227 * bp for every cpu and we keep the max one. Same for the per tasks
228 * breakpoints.
229 *
230 *
231 * == Pinned counter ==
232 *
233 * - If attached to a single cpu, check:
234 *
235 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
236 * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
237 *
238 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
239 * one register at least (or they will never be fed).
240 *
241 * - If attached to every cpus, check:
242 *
243 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
244 * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
245 */
246 static int __reserve_bp_slot(struct perf_event *bp)
247 {
248 struct bp_busy_slots slots = {0};
249
250 fetch_bp_busy_slots(&slots, bp);
251
252 /* Flexible counters need to keep at least one slot */
253 if (slots.pinned + (!!slots.flexible) == HBP_NUM)
254 return -ENOSPC;
255
256 toggle_bp_slot(bp, true);
257
258 return 0;
259 }
260
261 int reserve_bp_slot(struct perf_event *bp)
262 {
263 int ret;
264
265 mutex_lock(&nr_bp_mutex);
266
267 ret = __reserve_bp_slot(bp);
268
269 mutex_unlock(&nr_bp_mutex);
270
271 return ret;
272 }
273
274 static void __release_bp_slot(struct perf_event *bp)
275 {
276 toggle_bp_slot(bp, false);
277 }
278
279 void release_bp_slot(struct perf_event *bp)
280 {
281 mutex_lock(&nr_bp_mutex);
282
283 __release_bp_slot(bp);
284
285 mutex_unlock(&nr_bp_mutex);
286 }
287
288 /*
289 * Allow the kernel debugger to reserve breakpoint slots without
290 * taking a lock using the dbg_* variant of for the reserve and
291 * release breakpoint slots.
292 */
293 int dbg_reserve_bp_slot(struct perf_event *bp)
294 {
295 if (mutex_is_locked(&nr_bp_mutex))
296 return -1;
297
298 return __reserve_bp_slot(bp);
299 }
300
301 int dbg_release_bp_slot(struct perf_event *bp)
302 {
303 if (mutex_is_locked(&nr_bp_mutex))
304 return -1;
305
306 __release_bp_slot(bp);
307
308 return 0;
309 }
310
311 static int validate_hw_breakpoint(struct perf_event *bp)
312 {
313 int ret;
314
315 ret = arch_validate_hwbkpt_settings(bp);
316 if (ret)
317 return ret;
318
319 if (arch_check_bp_in_kernelspace(bp)) {
320 if (bp->attr.exclude_kernel)
321 return -EINVAL;
322 /*
323 * Don't let unprivileged users set a breakpoint in the trap
324 * path to avoid trap recursion attacks.
325 */
326 if (!capable(CAP_SYS_ADMIN))
327 return -EPERM;
328 }
329
330 return 0;
331 }
332
333 int register_perf_hw_breakpoint(struct perf_event *bp)
334 {
335 int ret;
336
337 ret = reserve_bp_slot(bp);
338 if (ret)
339 return ret;
340
341 ret = validate_hw_breakpoint(bp);
342
343 /* if arch_validate_hwbkpt_settings() fails then release bp slot */
344 if (ret)
345 release_bp_slot(bp);
346
347 return ret;
348 }
349
350 /**
351 * register_user_hw_breakpoint - register a hardware breakpoint for user space
352 * @attr: breakpoint attributes
353 * @triggered: callback to trigger when we hit the breakpoint
354 * @tsk: pointer to 'task_struct' of the process to which the address belongs
355 */
356 struct perf_event *
357 register_user_hw_breakpoint(struct perf_event_attr *attr,
358 perf_overflow_handler_t triggered,
359 struct task_struct *tsk)
360 {
361 return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
362 }
363 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
364
365 /**
366 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
367 * @bp: the breakpoint structure to modify
368 * @attr: new breakpoint attributes
369 * @triggered: callback to trigger when we hit the breakpoint
370 * @tsk: pointer to 'task_struct' of the process to which the address belongs
371 */
372 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
373 {
374 u64 old_addr = bp->attr.bp_addr;
375 u64 old_len = bp->attr.bp_len;
376 int old_type = bp->attr.bp_type;
377 int err = 0;
378
379 perf_event_disable(bp);
380
381 bp->attr.bp_addr = attr->bp_addr;
382 bp->attr.bp_type = attr->bp_type;
383 bp->attr.bp_len = attr->bp_len;
384
385 if (attr->disabled)
386 goto end;
387
388 err = validate_hw_breakpoint(bp);
389 if (!err)
390 perf_event_enable(bp);
391
392 if (err) {
393 bp->attr.bp_addr = old_addr;
394 bp->attr.bp_type = old_type;
395 bp->attr.bp_len = old_len;
396 if (!bp->attr.disabled)
397 perf_event_enable(bp);
398
399 return err;
400 }
401
402 end:
403 bp->attr.disabled = attr->disabled;
404
405 return 0;
406 }
407 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
408
409 /**
410 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
411 * @bp: the breakpoint structure to unregister
412 */
413 void unregister_hw_breakpoint(struct perf_event *bp)
414 {
415 if (!bp)
416 return;
417 perf_event_release_kernel(bp);
418 }
419 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
420
421 /**
422 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
423 * @attr: breakpoint attributes
424 * @triggered: callback to trigger when we hit the breakpoint
425 *
426 * @return a set of per_cpu pointers to perf events
427 */
428 struct perf_event * __percpu *
429 register_wide_hw_breakpoint(struct perf_event_attr *attr,
430 perf_overflow_handler_t triggered)
431 {
432 struct perf_event * __percpu *cpu_events, **pevent, *bp;
433 long err;
434 int cpu;
435
436 cpu_events = alloc_percpu(typeof(*cpu_events));
437 if (!cpu_events)
438 return (void __percpu __force *)ERR_PTR(-ENOMEM);
439
440 get_online_cpus();
441 for_each_online_cpu(cpu) {
442 pevent = per_cpu_ptr(cpu_events, cpu);
443 bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
444
445 *pevent = bp;
446
447 if (IS_ERR(bp)) {
448 err = PTR_ERR(bp);
449 goto fail;
450 }
451 }
452 put_online_cpus();
453
454 return cpu_events;
455
456 fail:
457 for_each_online_cpu(cpu) {
458 pevent = per_cpu_ptr(cpu_events, cpu);
459 if (IS_ERR(*pevent))
460 break;
461 unregister_hw_breakpoint(*pevent);
462 }
463 put_online_cpus();
464
465 free_percpu(cpu_events);
466 return (void __percpu __force *)ERR_PTR(err);
467 }
468 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
469
470 /**
471 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
472 * @cpu_events: the per cpu set of events to unregister
473 */
474 void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
475 {
476 int cpu;
477 struct perf_event **pevent;
478
479 for_each_possible_cpu(cpu) {
480 pevent = per_cpu_ptr(cpu_events, cpu);
481 unregister_hw_breakpoint(*pevent);
482 }
483 free_percpu(cpu_events);
484 }
485 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
486
487 static struct notifier_block hw_breakpoint_exceptions_nb = {
488 .notifier_call = hw_breakpoint_exceptions_notify,
489 /* we need to be notified first */
490 .priority = 0x7fffffff
491 };
492
493 static int __init init_hw_breakpoint(void)
494 {
495 return register_die_notifier(&hw_breakpoint_exceptions_nb);
496 }
497 core_initcall(init_hw_breakpoint);
498
499
500 struct pmu perf_ops_bp = {
501 .enable = arch_install_hw_breakpoint,
502 .disable = arch_uninstall_hw_breakpoint,
503 .read = hw_breakpoint_pmu_read,
504 };
This page took 0.045411 seconds and 4 git commands to generate.