Merge remote branch 'nouveau/for-airlied' into drm-next-stage
[deliverable/linux.git] / include / linux / perf_event.h
1 /*
2 * Performance events:
3 *
4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
7 *
8 * Data type definitions, declarations, prototypes.
9 *
10 * Started by: Thomas Gleixner and Ingo Molnar
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
20
21 /*
22 * User-space ABI bits:
23 */
24
25 /*
26 * attr.type
27 */
28 enum perf_type_id {
29 PERF_TYPE_HARDWARE = 0,
30 PERF_TYPE_SOFTWARE = 1,
31 PERF_TYPE_TRACEPOINT = 2,
32 PERF_TYPE_HW_CACHE = 3,
33 PERF_TYPE_RAW = 4,
34 PERF_TYPE_BREAKPOINT = 5,
35
36 PERF_TYPE_MAX, /* non-ABI */
37 };
38
39 /*
40 * Generalized performance event event_id types, used by the
41 * attr.event_id parameter of the sys_perf_event_open()
42 * syscall:
43 */
44 enum perf_hw_id {
45 /*
46 * Common hardware events, generalized by the kernel:
47 */
48 PERF_COUNT_HW_CPU_CYCLES = 0,
49 PERF_COUNT_HW_INSTRUCTIONS = 1,
50 PERF_COUNT_HW_CACHE_REFERENCES = 2,
51 PERF_COUNT_HW_CACHE_MISSES = 3,
52 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
53 PERF_COUNT_HW_BRANCH_MISSES = 5,
54 PERF_COUNT_HW_BUS_CYCLES = 6,
55
56 PERF_COUNT_HW_MAX, /* non-ABI */
57 };
58
59 /*
60 * Generalized hardware cache events:
61 *
62 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
63 * { read, write, prefetch } x
64 * { accesses, misses }
65 */
66 enum perf_hw_cache_id {
67 PERF_COUNT_HW_CACHE_L1D = 0,
68 PERF_COUNT_HW_CACHE_L1I = 1,
69 PERF_COUNT_HW_CACHE_LL = 2,
70 PERF_COUNT_HW_CACHE_DTLB = 3,
71 PERF_COUNT_HW_CACHE_ITLB = 4,
72 PERF_COUNT_HW_CACHE_BPU = 5,
73
74 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
75 };
76
77 enum perf_hw_cache_op_id {
78 PERF_COUNT_HW_CACHE_OP_READ = 0,
79 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
80 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
81
82 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
83 };
84
85 enum perf_hw_cache_op_result_id {
86 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
87 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
88
89 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
90 };
91
92 /*
93 * Special "software" events provided by the kernel, even if the hardware
94 * does not support performance events. These events measure various
95 * physical and sw events of the kernel (and allow the profiling of them as
96 * well):
97 */
98 enum perf_sw_ids {
99 PERF_COUNT_SW_CPU_CLOCK = 0,
100 PERF_COUNT_SW_TASK_CLOCK = 1,
101 PERF_COUNT_SW_PAGE_FAULTS = 2,
102 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
103 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
104 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
105 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
106 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
107 PERF_COUNT_SW_EMULATION_FAULTS = 8,
108
109 PERF_COUNT_SW_MAX, /* non-ABI */
110 };
111
112 /*
113 * Bits that can be set in attr.sample_type to request information
114 * in the overflow packets.
115 */
116 enum perf_event_sample_format {
117 PERF_SAMPLE_IP = 1U << 0,
118 PERF_SAMPLE_TID = 1U << 1,
119 PERF_SAMPLE_TIME = 1U << 2,
120 PERF_SAMPLE_ADDR = 1U << 3,
121 PERF_SAMPLE_READ = 1U << 4,
122 PERF_SAMPLE_CALLCHAIN = 1U << 5,
123 PERF_SAMPLE_ID = 1U << 6,
124 PERF_SAMPLE_CPU = 1U << 7,
125 PERF_SAMPLE_PERIOD = 1U << 8,
126 PERF_SAMPLE_STREAM_ID = 1U << 9,
127 PERF_SAMPLE_RAW = 1U << 10,
128
129 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
130 };
131
132 /*
133 * The format of the data returned by read() on a perf event fd,
134 * as specified by attr.read_format:
135 *
136 * struct read_format {
137 * { u64 value;
138 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
139 * { u64 time_running; } && PERF_FORMAT_RUNNING
140 * { u64 id; } && PERF_FORMAT_ID
141 * } && !PERF_FORMAT_GROUP
142 *
143 * { u64 nr;
144 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
145 * { u64 time_running; } && PERF_FORMAT_RUNNING
146 * { u64 value;
147 * { u64 id; } && PERF_FORMAT_ID
148 * } cntr[nr];
149 * } && PERF_FORMAT_GROUP
150 * };
151 */
152 enum perf_event_read_format {
153 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
154 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
155 PERF_FORMAT_ID = 1U << 2,
156 PERF_FORMAT_GROUP = 1U << 3,
157
158 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
159 };
160
161 #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
162
163 /*
164 * Hardware event_id to monitor via a performance monitoring event:
165 */
166 struct perf_event_attr {
167
168 /*
169 * Major type: hardware/software/tracepoint/etc.
170 */
171 __u32 type;
172
173 /*
174 * Size of the attr structure, for fwd/bwd compat.
175 */
176 __u32 size;
177
178 /*
179 * Type specific configuration information.
180 */
181 __u64 config;
182
183 union {
184 __u64 sample_period;
185 __u64 sample_freq;
186 };
187
188 __u64 sample_type;
189 __u64 read_format;
190
191 __u64 disabled : 1, /* off by default */
192 inherit : 1, /* children inherit it */
193 pinned : 1, /* must always be on PMU */
194 exclusive : 1, /* only group on PMU */
195 exclude_user : 1, /* don't count user */
196 exclude_kernel : 1, /* ditto kernel */
197 exclude_hv : 1, /* ditto hypervisor */
198 exclude_idle : 1, /* don't count when idle */
199 mmap : 1, /* include mmap data */
200 comm : 1, /* include comm data */
201 freq : 1, /* use freq, not period */
202 inherit_stat : 1, /* per task counts */
203 enable_on_exec : 1, /* next exec enables */
204 task : 1, /* trace fork/exit */
205 watermark : 1, /* wakeup_watermark */
206
207 __reserved_1 : 49;
208
209 union {
210 __u32 wakeup_events; /* wakeup every n events */
211 __u32 wakeup_watermark; /* bytes before wakeup */
212 };
213
214 __u32 bp_type;
215 __u64 bp_addr;
216 __u64 bp_len;
217 };
218
219 /*
220 * Ioctls that can be done on a perf event fd:
221 */
222 #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
223 #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
224 #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
225 #define PERF_EVENT_IOC_RESET _IO ('$', 3)
226 #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
227 #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
228 #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
229
230 enum perf_event_ioc_flags {
231 PERF_IOC_FLAG_GROUP = 1U << 0,
232 };
233
234 /*
235 * Structure of the page that can be mapped via mmap
236 */
237 struct perf_event_mmap_page {
238 __u32 version; /* version number of this structure */
239 __u32 compat_version; /* lowest version this is compat with */
240
241 /*
242 * Bits needed to read the hw events in user-space.
243 *
244 * u32 seq;
245 * s64 count;
246 *
247 * do {
248 * seq = pc->lock;
249 *
250 * barrier()
251 * if (pc->index) {
252 * count = pmc_read(pc->index - 1);
253 * count += pc->offset;
254 * } else
255 * goto regular_read;
256 *
257 * barrier();
258 * } while (pc->lock != seq);
259 *
260 * NOTE: for obvious reason this only works on self-monitoring
261 * processes.
262 */
263 __u32 lock; /* seqlock for synchronization */
264 __u32 index; /* hardware event identifier */
265 __s64 offset; /* add to hardware event value */
266 __u64 time_enabled; /* time event active */
267 __u64 time_running; /* time event on cpu */
268
269 /*
270 * Hole for extension of the self monitor capabilities
271 */
272
273 __u64 __reserved[123]; /* align to 1k */
274
275 /*
276 * Control data for the mmap() data buffer.
277 *
278 * User-space reading the @data_head value should issue an rmb(), on
279 * SMP capable platforms, after reading this value -- see
280 * perf_event_wakeup().
281 *
282 * When the mapping is PROT_WRITE the @data_tail value should be
283 * written by userspace to reflect the last read data. In this case
284 * the kernel will not over-write unread data.
285 */
286 __u64 data_head; /* head in the data section */
287 __u64 data_tail; /* user-space written tail */
288 };
289
290 #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
291 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
292 #define PERF_RECORD_MISC_KERNEL (1 << 0)
293 #define PERF_RECORD_MISC_USER (2 << 0)
294 #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
295
296 struct perf_event_header {
297 __u32 type;
298 __u16 misc;
299 __u16 size;
300 };
301
302 enum perf_event_type {
303
304 /*
305 * The MMAP events record the PROT_EXEC mappings so that we can
306 * correlate userspace IPs to code. They have the following structure:
307 *
308 * struct {
309 * struct perf_event_header header;
310 *
311 * u32 pid, tid;
312 * u64 addr;
313 * u64 len;
314 * u64 pgoff;
315 * char filename[];
316 * };
317 */
318 PERF_RECORD_MMAP = 1,
319
320 /*
321 * struct {
322 * struct perf_event_header header;
323 * u64 id;
324 * u64 lost;
325 * };
326 */
327 PERF_RECORD_LOST = 2,
328
329 /*
330 * struct {
331 * struct perf_event_header header;
332 *
333 * u32 pid, tid;
334 * char comm[];
335 * };
336 */
337 PERF_RECORD_COMM = 3,
338
339 /*
340 * struct {
341 * struct perf_event_header header;
342 * u32 pid, ppid;
343 * u32 tid, ptid;
344 * u64 time;
345 * };
346 */
347 PERF_RECORD_EXIT = 4,
348
349 /*
350 * struct {
351 * struct perf_event_header header;
352 * u64 time;
353 * u64 id;
354 * u64 stream_id;
355 * };
356 */
357 PERF_RECORD_THROTTLE = 5,
358 PERF_RECORD_UNTHROTTLE = 6,
359
360 /*
361 * struct {
362 * struct perf_event_header header;
363 * u32 pid, ppid;
364 * u32 tid, ptid;
365 * u64 time;
366 * };
367 */
368 PERF_RECORD_FORK = 7,
369
370 /*
371 * struct {
372 * struct perf_event_header header;
373 * u32 pid, tid;
374 *
375 * struct read_format values;
376 * };
377 */
378 PERF_RECORD_READ = 8,
379
380 /*
381 * struct {
382 * struct perf_event_header header;
383 *
384 * { u64 ip; } && PERF_SAMPLE_IP
385 * { u32 pid, tid; } && PERF_SAMPLE_TID
386 * { u64 time; } && PERF_SAMPLE_TIME
387 * { u64 addr; } && PERF_SAMPLE_ADDR
388 * { u64 id; } && PERF_SAMPLE_ID
389 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
390 * { u32 cpu, res; } && PERF_SAMPLE_CPU
391 * { u64 period; } && PERF_SAMPLE_PERIOD
392 *
393 * { struct read_format values; } && PERF_SAMPLE_READ
394 *
395 * { u64 nr,
396 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
397 *
398 * #
399 * # The RAW record below is opaque data wrt the ABI
400 * #
401 * # That is, the ABI doesn't make any promises wrt to
402 * # the stability of its content, it may vary depending
403 * # on event, hardware, kernel version and phase of
404 * # the moon.
405 * #
406 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
407 * #
408 *
409 * { u32 size;
410 * char data[size];}&& PERF_SAMPLE_RAW
411 * };
412 */
413 PERF_RECORD_SAMPLE = 9,
414
415 PERF_RECORD_MAX, /* non-ABI */
416 };
417
418 enum perf_callchain_context {
419 PERF_CONTEXT_HV = (__u64)-32,
420 PERF_CONTEXT_KERNEL = (__u64)-128,
421 PERF_CONTEXT_USER = (__u64)-512,
422
423 PERF_CONTEXT_GUEST = (__u64)-2048,
424 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
425 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
426
427 PERF_CONTEXT_MAX = (__u64)-4095,
428 };
429
430 #define PERF_FLAG_FD_NO_GROUP (1U << 0)
431 #define PERF_FLAG_FD_OUTPUT (1U << 1)
432
433 #ifdef __KERNEL__
434 /*
435 * Kernel-internal data types and definitions:
436 */
437
438 #ifdef CONFIG_PERF_EVENTS
439 # include <asm/perf_event.h>
440 #endif
441
442 #ifdef CONFIG_HAVE_HW_BREAKPOINT
443 #include <asm/hw_breakpoint.h>
444 #endif
445
446 #include <linux/list.h>
447 #include <linux/mutex.h>
448 #include <linux/rculist.h>
449 #include <linux/rcupdate.h>
450 #include <linux/spinlock.h>
451 #include <linux/hrtimer.h>
452 #include <linux/fs.h>
453 #include <linux/pid_namespace.h>
454 #include <linux/workqueue.h>
455 #include <asm/atomic.h>
456
457 #define PERF_MAX_STACK_DEPTH 255
458
459 struct perf_callchain_entry {
460 __u64 nr;
461 __u64 ip[PERF_MAX_STACK_DEPTH];
462 };
463
464 struct perf_raw_record {
465 u32 size;
466 void *data;
467 };
468
469 struct task_struct;
470
471 /**
472 * struct hw_perf_event - performance event hardware details:
473 */
474 struct hw_perf_event {
475 #ifdef CONFIG_PERF_EVENTS
476 union {
477 struct { /* hardware */
478 u64 config;
479 unsigned long config_base;
480 unsigned long event_base;
481 int idx;
482 };
483 struct { /* software */
484 s64 remaining;
485 struct hrtimer hrtimer;
486 };
487 #ifdef CONFIG_HAVE_HW_BREAKPOINT
488 union { /* breakpoint */
489 struct arch_hw_breakpoint info;
490 };
491 #endif
492 };
493 atomic64_t prev_count;
494 u64 sample_period;
495 u64 last_period;
496 atomic64_t period_left;
497 u64 interrupts;
498
499 u64 freq_count;
500 u64 freq_interrupts;
501 u64 freq_stamp;
502 #endif
503 };
504
505 struct perf_event;
506
507 /**
508 * struct pmu - generic performance monitoring unit
509 */
510 struct pmu {
511 int (*enable) (struct perf_event *event);
512 void (*disable) (struct perf_event *event);
513 void (*read) (struct perf_event *event);
514 void (*unthrottle) (struct perf_event *event);
515 };
516
517 /**
518 * enum perf_event_active_state - the states of a event
519 */
520 enum perf_event_active_state {
521 PERF_EVENT_STATE_ERROR = -2,
522 PERF_EVENT_STATE_OFF = -1,
523 PERF_EVENT_STATE_INACTIVE = 0,
524 PERF_EVENT_STATE_ACTIVE = 1,
525 };
526
527 struct file;
528
529 struct perf_mmap_data {
530 struct rcu_head rcu_head;
531 #ifdef CONFIG_PERF_USE_VMALLOC
532 struct work_struct work;
533 #endif
534 int data_order;
535 int nr_pages; /* nr of data pages */
536 int writable; /* are we writable */
537 int nr_locked; /* nr pages mlocked */
538
539 atomic_t poll; /* POLL_ for wakeups */
540 atomic_t events; /* event_id limit */
541
542 atomic_long_t head; /* write position */
543 atomic_long_t done_head; /* completed head */
544
545 atomic_t lock; /* concurrent writes */
546 atomic_t wakeup; /* needs a wakeup */
547 atomic_t lost; /* nr records lost */
548
549 long watermark; /* wakeup watermark */
550
551 struct perf_event_mmap_page *user_page;
552 void *data_pages[0];
553 };
554
555 struct perf_pending_entry {
556 struct perf_pending_entry *next;
557 void (*func)(struct perf_pending_entry *);
558 };
559
560 struct perf_sample_data;
561
562 typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
563 struct perf_sample_data *,
564 struct pt_regs *regs);
565
566 /**
567 * struct perf_event - performance event kernel representation:
568 */
569 struct perf_event {
570 #ifdef CONFIG_PERF_EVENTS
571 struct list_head group_entry;
572 struct list_head event_entry;
573 struct list_head sibling_list;
574 int nr_siblings;
575 struct perf_event *group_leader;
576 struct perf_event *output;
577 const struct pmu *pmu;
578
579 enum perf_event_active_state state;
580 atomic64_t count;
581
582 /*
583 * These are the total time in nanoseconds that the event
584 * has been enabled (i.e. eligible to run, and the task has
585 * been scheduled in, if this is a per-task event)
586 * and running (scheduled onto the CPU), respectively.
587 *
588 * They are computed from tstamp_enabled, tstamp_running and
589 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
590 */
591 u64 total_time_enabled;
592 u64 total_time_running;
593
594 /*
595 * These are timestamps used for computing total_time_enabled
596 * and total_time_running when the event is in INACTIVE or
597 * ACTIVE state, measured in nanoseconds from an arbitrary point
598 * in time.
599 * tstamp_enabled: the notional time when the event was enabled
600 * tstamp_running: the notional time when the event was scheduled on
601 * tstamp_stopped: in INACTIVE state, the notional time when the
602 * event was scheduled off.
603 */
604 u64 tstamp_enabled;
605 u64 tstamp_running;
606 u64 tstamp_stopped;
607
608 struct perf_event_attr attr;
609 struct hw_perf_event hw;
610
611 struct perf_event_context *ctx;
612 struct file *filp;
613
614 /*
615 * These accumulate total time (in nanoseconds) that children
616 * events have been enabled and running, respectively.
617 */
618 atomic64_t child_total_time_enabled;
619 atomic64_t child_total_time_running;
620
621 /*
622 * Protect attach/detach and child_list:
623 */
624 struct mutex child_mutex;
625 struct list_head child_list;
626 struct perf_event *parent;
627
628 int oncpu;
629 int cpu;
630
631 struct list_head owner_entry;
632 struct task_struct *owner;
633
634 /* mmap bits */
635 struct mutex mmap_mutex;
636 atomic_t mmap_count;
637 struct perf_mmap_data *data;
638
639 /* poll related */
640 wait_queue_head_t waitq;
641 struct fasync_struct *fasync;
642
643 /* delayed work for NMIs and such */
644 int pending_wakeup;
645 int pending_kill;
646 int pending_disable;
647 struct perf_pending_entry pending;
648
649 atomic_t event_limit;
650
651 void (*destroy)(struct perf_event *);
652 struct rcu_head rcu_head;
653
654 struct pid_namespace *ns;
655 u64 id;
656
657 perf_overflow_handler_t overflow_handler;
658
659 #ifdef CONFIG_EVENT_PROFILE
660 struct event_filter *filter;
661 #endif
662
663 #endif /* CONFIG_PERF_EVENTS */
664 };
665
666 /**
667 * struct perf_event_context - event context structure
668 *
669 * Used as a container for task events and CPU events as well:
670 */
671 struct perf_event_context {
672 /*
673 * Protect the states of the events in the list,
674 * nr_active, and the list:
675 */
676 raw_spinlock_t lock;
677 /*
678 * Protect the list of events. Locking either mutex or lock
679 * is sufficient to ensure the list doesn't change; to change
680 * the list you need to lock both the mutex and the spinlock.
681 */
682 struct mutex mutex;
683
684 struct list_head group_list;
685 struct list_head event_list;
686 int nr_events;
687 int nr_active;
688 int is_active;
689 int nr_stat;
690 atomic_t refcount;
691 struct task_struct *task;
692
693 /*
694 * Context clock, runs when context enabled.
695 */
696 u64 time;
697 u64 timestamp;
698
699 /*
700 * These fields let us detect when two contexts have both
701 * been cloned (inherited) from a common ancestor.
702 */
703 struct perf_event_context *parent_ctx;
704 u64 parent_gen;
705 u64 generation;
706 int pin_count;
707 struct rcu_head rcu_head;
708 };
709
710 /**
711 * struct perf_event_cpu_context - per cpu event context structure
712 */
713 struct perf_cpu_context {
714 struct perf_event_context ctx;
715 struct perf_event_context *task_ctx;
716 int active_oncpu;
717 int max_pertask;
718 int exclusive;
719
720 /*
721 * Recursion avoidance:
722 *
723 * task, softirq, irq, nmi context
724 */
725 int recursion[4];
726 };
727
728 struct perf_output_handle {
729 struct perf_event *event;
730 struct perf_mmap_data *data;
731 unsigned long head;
732 unsigned long offset;
733 int nmi;
734 int sample;
735 int locked;
736 };
737
738 #ifdef CONFIG_PERF_EVENTS
739
740 /*
741 * Set by architecture code:
742 */
743 extern int perf_max_events;
744
745 extern const struct pmu *hw_perf_event_init(struct perf_event *event);
746
747 extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
748 extern void perf_event_task_sched_out(struct task_struct *task,
749 struct task_struct *next, int cpu);
750 extern void perf_event_task_tick(struct task_struct *task, int cpu);
751 extern int perf_event_init_task(struct task_struct *child);
752 extern void perf_event_exit_task(struct task_struct *child);
753 extern void perf_event_free_task(struct task_struct *task);
754 extern void set_perf_event_pending(void);
755 extern void perf_event_do_pending(void);
756 extern void perf_event_print_debug(void);
757 extern void __perf_disable(void);
758 extern bool __perf_enable(void);
759 extern void perf_disable(void);
760 extern void perf_enable(void);
761 extern int perf_event_task_disable(void);
762 extern int perf_event_task_enable(void);
763 extern int hw_perf_group_sched_in(struct perf_event *group_leader,
764 struct perf_cpu_context *cpuctx,
765 struct perf_event_context *ctx, int cpu);
766 extern void perf_event_update_userpage(struct perf_event *event);
767 extern int perf_event_release_kernel(struct perf_event *event);
768 extern struct perf_event *
769 perf_event_create_kernel_counter(struct perf_event_attr *attr,
770 int cpu,
771 pid_t pid,
772 perf_overflow_handler_t callback);
773 extern u64 perf_event_read_value(struct perf_event *event,
774 u64 *enabled, u64 *running);
775
776 struct perf_sample_data {
777 u64 type;
778
779 u64 ip;
780 struct {
781 u32 pid;
782 u32 tid;
783 } tid_entry;
784 u64 time;
785 u64 addr;
786 u64 id;
787 u64 stream_id;
788 struct {
789 u32 cpu;
790 u32 reserved;
791 } cpu_entry;
792 u64 period;
793 struct perf_callchain_entry *callchain;
794 struct perf_raw_record *raw;
795 };
796
797 extern void perf_output_sample(struct perf_output_handle *handle,
798 struct perf_event_header *header,
799 struct perf_sample_data *data,
800 struct perf_event *event);
801 extern void perf_prepare_sample(struct perf_event_header *header,
802 struct perf_sample_data *data,
803 struct perf_event *event,
804 struct pt_regs *regs);
805
806 extern int perf_event_overflow(struct perf_event *event, int nmi,
807 struct perf_sample_data *data,
808 struct pt_regs *regs);
809
810 /*
811 * Return 1 for a software event, 0 for a hardware event
812 */
813 static inline int is_software_event(struct perf_event *event)
814 {
815 switch (event->attr.type) {
816 case PERF_TYPE_SOFTWARE:
817 case PERF_TYPE_TRACEPOINT:
818 /* for now the breakpoint stuff also works as software event */
819 case PERF_TYPE_BREAKPOINT:
820 return 1;
821 }
822 return 0;
823 }
824
825 extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
826
827 extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
828
829 static inline void
830 perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
831 {
832 if (atomic_read(&perf_swevent_enabled[event_id]))
833 __perf_sw_event(event_id, nr, nmi, regs, addr);
834 }
835
836 extern void __perf_event_mmap(struct vm_area_struct *vma);
837
838 static inline void perf_event_mmap(struct vm_area_struct *vma)
839 {
840 if (vma->vm_flags & VM_EXEC)
841 __perf_event_mmap(vma);
842 }
843
844 extern void perf_event_comm(struct task_struct *tsk);
845 extern void perf_event_fork(struct task_struct *tsk);
846
847 extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
848
849 extern int sysctl_perf_event_paranoid;
850 extern int sysctl_perf_event_mlock;
851 extern int sysctl_perf_event_sample_rate;
852
853 extern void perf_event_init(void);
854 extern void perf_tp_event(int event_id, u64 addr, u64 count,
855 void *record, int entry_size);
856 extern void perf_bp_event(struct perf_event *event, void *data);
857
858 #ifndef perf_misc_flags
859 #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
860 PERF_RECORD_MISC_KERNEL)
861 #define perf_instruction_pointer(regs) instruction_pointer(regs)
862 #endif
863
864 extern int perf_output_begin(struct perf_output_handle *handle,
865 struct perf_event *event, unsigned int size,
866 int nmi, int sample);
867 extern void perf_output_end(struct perf_output_handle *handle);
868 extern void perf_output_copy(struct perf_output_handle *handle,
869 const void *buf, unsigned int len);
870 extern int perf_swevent_get_recursion_context(void);
871 extern void perf_swevent_put_recursion_context(int rctx);
872 extern void perf_event_enable(struct perf_event *event);
873 extern void perf_event_disable(struct perf_event *event);
874 #else
875 static inline void
876 perf_event_task_sched_in(struct task_struct *task, int cpu) { }
877 static inline void
878 perf_event_task_sched_out(struct task_struct *task,
879 struct task_struct *next, int cpu) { }
880 static inline void
881 perf_event_task_tick(struct task_struct *task, int cpu) { }
882 static inline int perf_event_init_task(struct task_struct *child) { return 0; }
883 static inline void perf_event_exit_task(struct task_struct *child) { }
884 static inline void perf_event_free_task(struct task_struct *task) { }
885 static inline void perf_event_do_pending(void) { }
886 static inline void perf_event_print_debug(void) { }
887 static inline void perf_disable(void) { }
888 static inline void perf_enable(void) { }
889 static inline int perf_event_task_disable(void) { return -EINVAL; }
890 static inline int perf_event_task_enable(void) { return -EINVAL; }
891
892 static inline void
893 perf_sw_event(u32 event_id, u64 nr, int nmi,
894 struct pt_regs *regs, u64 addr) { }
895 static inline void
896 perf_bp_event(struct perf_event *event, void *data) { }
897
898 static inline void perf_event_mmap(struct vm_area_struct *vma) { }
899 static inline void perf_event_comm(struct task_struct *tsk) { }
900 static inline void perf_event_fork(struct task_struct *tsk) { }
901 static inline void perf_event_init(void) { }
902 static inline int perf_swevent_get_recursion_context(void) { return -1; }
903 static inline void perf_swevent_put_recursion_context(int rctx) { }
904 static inline void perf_event_enable(struct perf_event *event) { }
905 static inline void perf_event_disable(struct perf_event *event) { }
906 #endif
907
908 #define perf_output_put(handle, x) \
909 perf_output_copy((handle), &(x), sizeof(x))
910
911 #endif /* __KERNEL__ */
912 #endif /* _LINUX_PERF_EVENT_H */
This page took 0.048519 seconds and 6 git commands to generate.