perf, x86: Implement user-space RDPMC support, to allow fast, user-space access to...
[deliverable/linux.git] / include / linux / perf_event.h
1 /*
2 * Performance events:
3 *
4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7 *
8 * Data type definitions, declarations, prototypes.
9 *
10 * Started by: Thomas Gleixner and Ingo Molnar
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
20
21 /*
22 * User-space ABI bits:
23 */
24
25 /*
26 * attr.type
27 */
28 enum perf_type_id {
29 PERF_TYPE_HARDWARE = 0,
30 PERF_TYPE_SOFTWARE = 1,
31 PERF_TYPE_TRACEPOINT = 2,
32 PERF_TYPE_HW_CACHE = 3,
33 PERF_TYPE_RAW = 4,
34 PERF_TYPE_BREAKPOINT = 5,
35
36 PERF_TYPE_MAX, /* non-ABI */
37 };
38
39 /*
40 * Generalized performance event event_id types, used by the
41 * attr.event_id parameter of the sys_perf_event_open()
42 * syscall:
43 */
44 enum perf_hw_id {
45 /*
46 * Common hardware events, generalized by the kernel:
47 */
48 PERF_COUNT_HW_CPU_CYCLES = 0,
49 PERF_COUNT_HW_INSTRUCTIONS = 1,
50 PERF_COUNT_HW_CACHE_REFERENCES = 2,
51 PERF_COUNT_HW_CACHE_MISSES = 3,
52 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
53 PERF_COUNT_HW_BRANCH_MISSES = 5,
54 PERF_COUNT_HW_BUS_CYCLES = 6,
55 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
56 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
57 PERF_COUNT_HW_REF_CPU_CYCLES = 9,
58
59 PERF_COUNT_HW_MAX, /* non-ABI */
60 };
61
62 /*
63 * Generalized hardware cache events:
64 *
65 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
66 * { read, write, prefetch } x
67 * { accesses, misses }
68 */
69 enum perf_hw_cache_id {
70 PERF_COUNT_HW_CACHE_L1D = 0,
71 PERF_COUNT_HW_CACHE_L1I = 1,
72 PERF_COUNT_HW_CACHE_LL = 2,
73 PERF_COUNT_HW_CACHE_DTLB = 3,
74 PERF_COUNT_HW_CACHE_ITLB = 4,
75 PERF_COUNT_HW_CACHE_BPU = 5,
76 PERF_COUNT_HW_CACHE_NODE = 6,
77
78 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
79 };
80
81 enum perf_hw_cache_op_id {
82 PERF_COUNT_HW_CACHE_OP_READ = 0,
83 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
84 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
85
86 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
87 };
88
89 enum perf_hw_cache_op_result_id {
90 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
91 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
92
93 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
94 };
95
96 /*
97 * Special "software" events provided by the kernel, even if the hardware
98 * does not support performance events. These events measure various
99 * physical and sw events of the kernel (and allow the profiling of them as
100 * well):
101 */
102 enum perf_sw_ids {
103 PERF_COUNT_SW_CPU_CLOCK = 0,
104 PERF_COUNT_SW_TASK_CLOCK = 1,
105 PERF_COUNT_SW_PAGE_FAULTS = 2,
106 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
107 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
108 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
109 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
110 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
111 PERF_COUNT_SW_EMULATION_FAULTS = 8,
112
113 PERF_COUNT_SW_MAX, /* non-ABI */
114 };
115
116 /*
117 * Bits that can be set in attr.sample_type to request information
118 * in the overflow packets.
119 */
120 enum perf_event_sample_format {
121 PERF_SAMPLE_IP = 1U << 0,
122 PERF_SAMPLE_TID = 1U << 1,
123 PERF_SAMPLE_TIME = 1U << 2,
124 PERF_SAMPLE_ADDR = 1U << 3,
125 PERF_SAMPLE_READ = 1U << 4,
126 PERF_SAMPLE_CALLCHAIN = 1U << 5,
127 PERF_SAMPLE_ID = 1U << 6,
128 PERF_SAMPLE_CPU = 1U << 7,
129 PERF_SAMPLE_PERIOD = 1U << 8,
130 PERF_SAMPLE_STREAM_ID = 1U << 9,
131 PERF_SAMPLE_RAW = 1U << 10,
132
133 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
134 };
135
136 /*
137 * The format of the data returned by read() on a perf event fd,
138 * as specified by attr.read_format:
139 *
140 * struct read_format {
141 * { u64 value;
142 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
143 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
144 * { u64 id; } && PERF_FORMAT_ID
145 * } && !PERF_FORMAT_GROUP
146 *
147 * { u64 nr;
148 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
149 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
150 * { u64 value;
151 * { u64 id; } && PERF_FORMAT_ID
152 * } cntr[nr];
153 * } && PERF_FORMAT_GROUP
154 * };
155 */
156 enum perf_event_read_format {
157 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
158 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
159 PERF_FORMAT_ID = 1U << 2,
160 PERF_FORMAT_GROUP = 1U << 3,
161
162 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
163 };
164
165 #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
166
167 /*
168 * Hardware event_id to monitor via a performance monitoring event:
169 */
170 struct perf_event_attr {
171
172 /*
173 * Major type: hardware/software/tracepoint/etc.
174 */
175 __u32 type;
176
177 /*
178 * Size of the attr structure, for fwd/bwd compat.
179 */
180 __u32 size;
181
182 /*
183 * Type specific configuration information.
184 */
185 __u64 config;
186
187 union {
188 __u64 sample_period;
189 __u64 sample_freq;
190 };
191
192 __u64 sample_type;
193 __u64 read_format;
194
195 __u64 disabled : 1, /* off by default */
196 inherit : 1, /* children inherit it */
197 pinned : 1, /* must always be on PMU */
198 exclusive : 1, /* only group on PMU */
199 exclude_user : 1, /* don't count user */
200 exclude_kernel : 1, /* ditto kernel */
201 exclude_hv : 1, /* ditto hypervisor */
202 exclude_idle : 1, /* don't count when idle */
203 mmap : 1, /* include mmap data */
204 comm : 1, /* include comm data */
205 freq : 1, /* use freq, not period */
206 inherit_stat : 1, /* per task counts */
207 enable_on_exec : 1, /* next exec enables */
208 task : 1, /* trace fork/exit */
209 watermark : 1, /* wakeup_watermark */
210 /*
211 * precise_ip:
212 *
213 * 0 - SAMPLE_IP can have arbitrary skid
214 * 1 - SAMPLE_IP must have constant skid
215 * 2 - SAMPLE_IP requested to have 0 skid
216 * 3 - SAMPLE_IP must have 0 skid
217 *
218 * See also PERF_RECORD_MISC_EXACT_IP
219 */
220 precise_ip : 2, /* skid constraint */
221 mmap_data : 1, /* non-exec mmap data */
222 sample_id_all : 1, /* sample_type all events */
223
224 exclude_host : 1, /* don't count in host */
225 exclude_guest : 1, /* don't count in guest */
226
227 __reserved_1 : 43;
228
229 union {
230 __u32 wakeup_events; /* wakeup every n events */
231 __u32 wakeup_watermark; /* bytes before wakeup */
232 };
233
234 __u32 bp_type;
235 union {
236 __u64 bp_addr;
237 __u64 config1; /* extension of config */
238 };
239 union {
240 __u64 bp_len;
241 __u64 config2; /* extension of config1 */
242 };
243 };
244
245 /*
246 * Ioctls that can be done on a perf event fd:
247 */
248 #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
249 #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
250 #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
251 #define PERF_EVENT_IOC_RESET _IO ('$', 3)
252 #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
253 #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
254 #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
255
256 enum perf_event_ioc_flags {
257 PERF_IOC_FLAG_GROUP = 1U << 0,
258 };
259
260 /*
261 * Structure of the page that can be mapped via mmap
262 */
263 struct perf_event_mmap_page {
264 __u32 version; /* version number of this structure */
265 __u32 compat_version; /* lowest version this is compat with */
266
267 /*
268 * Bits needed to read the hw events in user-space.
269 *
270 * u32 seq;
271 * s64 count;
272 *
273 * do {
274 * seq = pc->lock;
275 *
276 * barrier()
277 * if (pc->index) {
278 * count = pmc_read(pc->index - 1);
279 * count += pc->offset;
280 * } else
281 * goto regular_read;
282 *
283 * barrier();
284 * } while (pc->lock != seq);
285 *
286 * NOTE: for obvious reason this only works on self-monitoring
287 * processes.
288 */
289 __u32 lock; /* seqlock for synchronization */
290 __u32 index; /* hardware event identifier */
291 __s64 offset; /* add to hardware event value */
292 __u64 time_enabled; /* time event active */
293 __u64 time_running; /* time event on cpu */
294
295 /*
296 * Hole for extension of the self monitor capabilities
297 */
298
299 __u64 __reserved[123]; /* align to 1k */
300
301 /*
302 * Control data for the mmap() data buffer.
303 *
304 * User-space reading the @data_head value should issue an rmb(), on
305 * SMP capable platforms, after reading this value -- see
306 * perf_event_wakeup().
307 *
308 * When the mapping is PROT_WRITE the @data_tail value should be
309 * written by userspace to reflect the last read data. In this case
310 * the kernel will not over-write unread data.
311 */
312 __u64 data_head; /* head in the data section */
313 __u64 data_tail; /* user-space written tail */
314 };
315
316 #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
317 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
318 #define PERF_RECORD_MISC_KERNEL (1 << 0)
319 #define PERF_RECORD_MISC_USER (2 << 0)
320 #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
321 #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
322 #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
323
324 /*
325 * Indicates that the content of PERF_SAMPLE_IP points to
326 * the actual instruction that triggered the event. See also
327 * perf_event_attr::precise_ip.
328 */
329 #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
330 /*
331 * Reserve the last bit to indicate some extended misc field
332 */
333 #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
334
335 struct perf_event_header {
336 __u32 type;
337 __u16 misc;
338 __u16 size;
339 };
340
341 enum perf_event_type {
342
343 /*
344 * If perf_event_attr.sample_id_all is set then all event types will
345 * have the sample_type selected fields related to where/when
346 * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
347 * described in PERF_RECORD_SAMPLE below, it will be stashed just after
348 * the perf_event_header and the fields already present for the existing
349 * fields, i.e. at the end of the payload. That way a newer perf.data
350 * file will be supported by older perf tools, with these new optional
351 * fields being ignored.
352 *
353 * The MMAP events record the PROT_EXEC mappings so that we can
354 * correlate userspace IPs to code. They have the following structure:
355 *
356 * struct {
357 * struct perf_event_header header;
358 *
359 * u32 pid, tid;
360 * u64 addr;
361 * u64 len;
362 * u64 pgoff;
363 * char filename[];
364 * };
365 */
366 PERF_RECORD_MMAP = 1,
367
368 /*
369 * struct {
370 * struct perf_event_header header;
371 * u64 id;
372 * u64 lost;
373 * };
374 */
375 PERF_RECORD_LOST = 2,
376
377 /*
378 * struct {
379 * struct perf_event_header header;
380 *
381 * u32 pid, tid;
382 * char comm[];
383 * };
384 */
385 PERF_RECORD_COMM = 3,
386
387 /*
388 * struct {
389 * struct perf_event_header header;
390 * u32 pid, ppid;
391 * u32 tid, ptid;
392 * u64 time;
393 * };
394 */
395 PERF_RECORD_EXIT = 4,
396
397 /*
398 * struct {
399 * struct perf_event_header header;
400 * u64 time;
401 * u64 id;
402 * u64 stream_id;
403 * };
404 */
405 PERF_RECORD_THROTTLE = 5,
406 PERF_RECORD_UNTHROTTLE = 6,
407
408 /*
409 * struct {
410 * struct perf_event_header header;
411 * u32 pid, ppid;
412 * u32 tid, ptid;
413 * u64 time;
414 * };
415 */
416 PERF_RECORD_FORK = 7,
417
418 /*
419 * struct {
420 * struct perf_event_header header;
421 * u32 pid, tid;
422 *
423 * struct read_format values;
424 * };
425 */
426 PERF_RECORD_READ = 8,
427
428 /*
429 * struct {
430 * struct perf_event_header header;
431 *
432 * { u64 ip; } && PERF_SAMPLE_IP
433 * { u32 pid, tid; } && PERF_SAMPLE_TID
434 * { u64 time; } && PERF_SAMPLE_TIME
435 * { u64 addr; } && PERF_SAMPLE_ADDR
436 * { u64 id; } && PERF_SAMPLE_ID
437 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
438 * { u32 cpu, res; } && PERF_SAMPLE_CPU
439 * { u64 period; } && PERF_SAMPLE_PERIOD
440 *
441 * { struct read_format values; } && PERF_SAMPLE_READ
442 *
443 * { u64 nr,
444 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
445 *
446 * #
447 * # The RAW record below is opaque data wrt the ABI
448 * #
449 * # That is, the ABI doesn't make any promises wrt to
450 * # the stability of its content, it may vary depending
451 * # on event, hardware, kernel version and phase of
452 * # the moon.
453 * #
454 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
455 * #
456 *
457 * { u32 size;
458 * char data[size];}&& PERF_SAMPLE_RAW
459 * };
460 */
461 PERF_RECORD_SAMPLE = 9,
462
463 PERF_RECORD_MAX, /* non-ABI */
464 };
465
466 enum perf_callchain_context {
467 PERF_CONTEXT_HV = (__u64)-32,
468 PERF_CONTEXT_KERNEL = (__u64)-128,
469 PERF_CONTEXT_USER = (__u64)-512,
470
471 PERF_CONTEXT_GUEST = (__u64)-2048,
472 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
473 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
474
475 PERF_CONTEXT_MAX = (__u64)-4095,
476 };
477
478 #define PERF_FLAG_FD_NO_GROUP (1U << 0)
479 #define PERF_FLAG_FD_OUTPUT (1U << 1)
480 #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
481
482 #ifdef __KERNEL__
483 /*
484 * Kernel-internal data types and definitions:
485 */
486
487 #ifdef CONFIG_PERF_EVENTS
488 # include <linux/cgroup.h>
489 # include <asm/perf_event.h>
490 # include <asm/local64.h>
491 #endif
492
493 struct perf_guest_info_callbacks {
494 int (*is_in_guest)(void);
495 int (*is_user_mode)(void);
496 unsigned long (*get_guest_ip)(void);
497 };
498
499 #ifdef CONFIG_HAVE_HW_BREAKPOINT
500 #include <asm/hw_breakpoint.h>
501 #endif
502
503 #include <linux/list.h>
504 #include <linux/mutex.h>
505 #include <linux/rculist.h>
506 #include <linux/rcupdate.h>
507 #include <linux/spinlock.h>
508 #include <linux/hrtimer.h>
509 #include <linux/fs.h>
510 #include <linux/pid_namespace.h>
511 #include <linux/workqueue.h>
512 #include <linux/ftrace.h>
513 #include <linux/cpu.h>
514 #include <linux/irq_work.h>
515 #include <linux/jump_label.h>
516 #include <linux/atomic.h>
517 #include <asm/local.h>
518
519 #define PERF_MAX_STACK_DEPTH 255
520
521 struct perf_callchain_entry {
522 __u64 nr;
523 __u64 ip[PERF_MAX_STACK_DEPTH];
524 };
525
526 struct perf_raw_record {
527 u32 size;
528 void *data;
529 };
530
531 struct perf_branch_entry {
532 __u64 from;
533 __u64 to;
534 __u64 flags;
535 };
536
537 struct perf_branch_stack {
538 __u64 nr;
539 struct perf_branch_entry entries[0];
540 };
541
542 struct task_struct;
543
544 /*
545 * extra PMU register associated with an event
546 */
547 struct hw_perf_event_extra {
548 u64 config; /* register value */
549 unsigned int reg; /* register address or index */
550 int alloc; /* extra register already allocated */
551 int idx; /* index in shared_regs->regs[] */
552 };
553
554 /**
555 * struct hw_perf_event - performance event hardware details:
556 */
557 struct hw_perf_event {
558 #ifdef CONFIG_PERF_EVENTS
559 union {
560 struct { /* hardware */
561 u64 config;
562 u64 last_tag;
563 unsigned long config_base;
564 unsigned long event_base;
565 int idx;
566 int last_cpu;
567 struct hw_perf_event_extra extra_reg;
568 };
569 struct { /* software */
570 struct hrtimer hrtimer;
571 };
572 #ifdef CONFIG_HAVE_HW_BREAKPOINT
573 struct { /* breakpoint */
574 struct arch_hw_breakpoint info;
575 struct list_head bp_list;
576 /*
577 * Crufty hack to avoid the chicken and egg
578 * problem hw_breakpoint has with context
579 * creation and event initalization.
580 */
581 struct task_struct *bp_target;
582 };
583 #endif
584 };
585 int state;
586 local64_t prev_count;
587 u64 sample_period;
588 u64 last_period;
589 local64_t period_left;
590 u64 interrupts;
591
592 u64 freq_time_stamp;
593 u64 freq_count_stamp;
594 #endif
595 };
596
597 /*
598 * hw_perf_event::state flags
599 */
600 #define PERF_HES_STOPPED 0x01 /* the counter is stopped */
601 #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
602 #define PERF_HES_ARCH 0x04
603
604 struct perf_event;
605
606 /*
607 * Common implementation detail of pmu::{start,commit,cancel}_txn
608 */
609 #define PERF_EVENT_TXN 0x1
610
611 /**
612 * struct pmu - generic performance monitoring unit
613 */
614 struct pmu {
615 struct list_head entry;
616
617 struct device *dev;
618 char *name;
619 int type;
620
621 int * __percpu pmu_disable_count;
622 struct perf_cpu_context * __percpu pmu_cpu_context;
623 int task_ctx_nr;
624
625 /*
626 * Fully disable/enable this PMU, can be used to protect from the PMI
627 * as well as for lazy/batch writing of the MSRs.
628 */
629 void (*pmu_enable) (struct pmu *pmu); /* optional */
630 void (*pmu_disable) (struct pmu *pmu); /* optional */
631
632 /*
633 * Try and initialize the event for this PMU.
634 * Should return -ENOENT when the @event doesn't match this PMU.
635 */
636 int (*event_init) (struct perf_event *event);
637
638 #define PERF_EF_START 0x01 /* start the counter when adding */
639 #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
640 #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
641
642 /*
643 * Adds/Removes a counter to/from the PMU, can be done inside
644 * a transaction, see the ->*_txn() methods.
645 */
646 int (*add) (struct perf_event *event, int flags);
647 void (*del) (struct perf_event *event, int flags);
648
649 /*
650 * Starts/Stops a counter present on the PMU. The PMI handler
651 * should stop the counter when perf_event_overflow() returns
652 * !0. ->start() will be used to continue.
653 */
654 void (*start) (struct perf_event *event, int flags);
655 void (*stop) (struct perf_event *event, int flags);
656
657 /*
658 * Updates the counter value of the event.
659 */
660 void (*read) (struct perf_event *event);
661
662 /*
663 * Group events scheduling is treated as a transaction, add
664 * group events as a whole and perform one schedulability test.
665 * If the test fails, roll back the whole group
666 *
667 * Start the transaction, after this ->add() doesn't need to
668 * do schedulability tests.
669 */
670 void (*start_txn) (struct pmu *pmu); /* optional */
671 /*
672 * If ->start_txn() disabled the ->add() schedulability test
673 * then ->commit_txn() is required to perform one. On success
674 * the transaction is closed. On error the transaction is kept
675 * open until ->cancel_txn() is called.
676 */
677 int (*commit_txn) (struct pmu *pmu); /* optional */
678 /*
679 * Will cancel the transaction, assumes ->del() is called
680 * for each successful ->add() during the transaction.
681 */
682 void (*cancel_txn) (struct pmu *pmu); /* optional */
683
684 /*
685 * Will return the value for perf_event_mmap_page::index for this event,
686 * if no implementation is provided it will default to: event->hw.idx + 1.
687 */
688 int (*event_idx) (struct perf_event *event); /*optional */
689 };
690
691 /**
692 * enum perf_event_active_state - the states of a event
693 */
694 enum perf_event_active_state {
695 PERF_EVENT_STATE_ERROR = -2,
696 PERF_EVENT_STATE_OFF = -1,
697 PERF_EVENT_STATE_INACTIVE = 0,
698 PERF_EVENT_STATE_ACTIVE = 1,
699 };
700
701 struct file;
702 struct perf_sample_data;
703
704 typedef void (*perf_overflow_handler_t)(struct perf_event *,
705 struct perf_sample_data *,
706 struct pt_regs *regs);
707
708 enum perf_group_flag {
709 PERF_GROUP_SOFTWARE = 0x1,
710 };
711
712 #define SWEVENT_HLIST_BITS 8
713 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
714
715 struct swevent_hlist {
716 struct hlist_head heads[SWEVENT_HLIST_SIZE];
717 struct rcu_head rcu_head;
718 };
719
720 #define PERF_ATTACH_CONTEXT 0x01
721 #define PERF_ATTACH_GROUP 0x02
722 #define PERF_ATTACH_TASK 0x04
723
724 #ifdef CONFIG_CGROUP_PERF
725 /*
726 * perf_cgroup_info keeps track of time_enabled for a cgroup.
727 * This is a per-cpu dynamically allocated data structure.
728 */
729 struct perf_cgroup_info {
730 u64 time;
731 u64 timestamp;
732 };
733
734 struct perf_cgroup {
735 struct cgroup_subsys_state css;
736 struct perf_cgroup_info *info; /* timing info, one per cpu */
737 };
738 #endif
739
740 struct ring_buffer;
741
742 /**
743 * struct perf_event - performance event kernel representation:
744 */
745 struct perf_event {
746 #ifdef CONFIG_PERF_EVENTS
747 struct list_head group_entry;
748 struct list_head event_entry;
749 struct list_head sibling_list;
750 struct hlist_node hlist_entry;
751 int nr_siblings;
752 int group_flags;
753 struct perf_event *group_leader;
754 struct pmu *pmu;
755
756 enum perf_event_active_state state;
757 unsigned int attach_state;
758 local64_t count;
759 atomic64_t child_count;
760
761 /*
762 * These are the total time in nanoseconds that the event
763 * has been enabled (i.e. eligible to run, and the task has
764 * been scheduled in, if this is a per-task event)
765 * and running (scheduled onto the CPU), respectively.
766 *
767 * They are computed from tstamp_enabled, tstamp_running and
768 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
769 */
770 u64 total_time_enabled;
771 u64 total_time_running;
772
773 /*
774 * These are timestamps used for computing total_time_enabled
775 * and total_time_running when the event is in INACTIVE or
776 * ACTIVE state, measured in nanoseconds from an arbitrary point
777 * in time.
778 * tstamp_enabled: the notional time when the event was enabled
779 * tstamp_running: the notional time when the event was scheduled on
780 * tstamp_stopped: in INACTIVE state, the notional time when the
781 * event was scheduled off.
782 */
783 u64 tstamp_enabled;
784 u64 tstamp_running;
785 u64 tstamp_stopped;
786
787 /*
788 * timestamp shadows the actual context timing but it can
789 * be safely used in NMI interrupt context. It reflects the
790 * context time as it was when the event was last scheduled in.
791 *
792 * ctx_time already accounts for ctx->timestamp. Therefore to
793 * compute ctx_time for a sample, simply add perf_clock().
794 */
795 u64 shadow_ctx_time;
796
797 struct perf_event_attr attr;
798 u16 header_size;
799 u16 id_header_size;
800 u16 read_size;
801 struct hw_perf_event hw;
802
803 struct perf_event_context *ctx;
804 struct file *filp;
805
806 /*
807 * These accumulate total time (in nanoseconds) that children
808 * events have been enabled and running, respectively.
809 */
810 atomic64_t child_total_time_enabled;
811 atomic64_t child_total_time_running;
812
813 /*
814 * Protect attach/detach and child_list:
815 */
816 struct mutex child_mutex;
817 struct list_head child_list;
818 struct perf_event *parent;
819
820 int oncpu;
821 int cpu;
822
823 struct list_head owner_entry;
824 struct task_struct *owner;
825
826 /* mmap bits */
827 struct mutex mmap_mutex;
828 atomic_t mmap_count;
829 int mmap_locked;
830 struct user_struct *mmap_user;
831 struct ring_buffer *rb;
832 struct list_head rb_entry;
833
834 /* poll related */
835 wait_queue_head_t waitq;
836 struct fasync_struct *fasync;
837
838 /* delayed work for NMIs and such */
839 int pending_wakeup;
840 int pending_kill;
841 int pending_disable;
842 struct irq_work pending;
843
844 atomic_t event_limit;
845
846 void (*destroy)(struct perf_event *);
847 struct rcu_head rcu_head;
848
849 struct pid_namespace *ns;
850 u64 id;
851
852 perf_overflow_handler_t overflow_handler;
853 void *overflow_handler_context;
854
855 #ifdef CONFIG_EVENT_TRACING
856 struct ftrace_event_call *tp_event;
857 struct event_filter *filter;
858 #endif
859
860 #ifdef CONFIG_CGROUP_PERF
861 struct perf_cgroup *cgrp; /* cgroup event is attach to */
862 int cgrp_defer_enabled;
863 #endif
864
865 #endif /* CONFIG_PERF_EVENTS */
866 };
867
868 enum perf_event_context_type {
869 task_context,
870 cpu_context,
871 };
872
873 /**
874 * struct perf_event_context - event context structure
875 *
876 * Used as a container for task events and CPU events as well:
877 */
878 struct perf_event_context {
879 struct pmu *pmu;
880 enum perf_event_context_type type;
881 /*
882 * Protect the states of the events in the list,
883 * nr_active, and the list:
884 */
885 raw_spinlock_t lock;
886 /*
887 * Protect the list of events. Locking either mutex or lock
888 * is sufficient to ensure the list doesn't change; to change
889 * the list you need to lock both the mutex and the spinlock.
890 */
891 struct mutex mutex;
892
893 struct list_head pinned_groups;
894 struct list_head flexible_groups;
895 struct list_head event_list;
896 int nr_events;
897 int nr_active;
898 int is_active;
899 int nr_stat;
900 int nr_freq;
901 int rotate_disable;
902 atomic_t refcount;
903 struct task_struct *task;
904
905 /*
906 * Context clock, runs when context enabled.
907 */
908 u64 time;
909 u64 timestamp;
910
911 /*
912 * These fields let us detect when two contexts have both
913 * been cloned (inherited) from a common ancestor.
914 */
915 struct perf_event_context *parent_ctx;
916 u64 parent_gen;
917 u64 generation;
918 int pin_count;
919 int nr_cgroups; /* cgroup events present */
920 struct rcu_head rcu_head;
921 };
922
923 /*
924 * Number of contexts where an event can trigger:
925 * task, softirq, hardirq, nmi.
926 */
927 #define PERF_NR_CONTEXTS 4
928
929 /**
930 * struct perf_event_cpu_context - per cpu event context structure
931 */
932 struct perf_cpu_context {
933 struct perf_event_context ctx;
934 struct perf_event_context *task_ctx;
935 int active_oncpu;
936 int exclusive;
937 struct list_head rotation_list;
938 int jiffies_interval;
939 struct pmu *active_pmu;
940 struct perf_cgroup *cgrp;
941 };
942
943 struct perf_output_handle {
944 struct perf_event *event;
945 struct ring_buffer *rb;
946 unsigned long wakeup;
947 unsigned long size;
948 void *addr;
949 int page;
950 };
951
952 #ifdef CONFIG_PERF_EVENTS
953
954 extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
955 extern void perf_pmu_unregister(struct pmu *pmu);
956
957 extern int perf_num_counters(void);
958 extern const char *perf_pmu_name(void);
959 extern void __perf_event_task_sched_in(struct task_struct *prev,
960 struct task_struct *task);
961 extern void __perf_event_task_sched_out(struct task_struct *prev,
962 struct task_struct *next);
963 extern int perf_event_init_task(struct task_struct *child);
964 extern void perf_event_exit_task(struct task_struct *child);
965 extern void perf_event_free_task(struct task_struct *task);
966 extern void perf_event_delayed_put(struct task_struct *task);
967 extern void perf_event_print_debug(void);
968 extern void perf_pmu_disable(struct pmu *pmu);
969 extern void perf_pmu_enable(struct pmu *pmu);
970 extern int perf_event_task_disable(void);
971 extern int perf_event_task_enable(void);
972 extern int perf_event_refresh(struct perf_event *event, int refresh);
973 extern void perf_event_update_userpage(struct perf_event *event);
974 extern int perf_event_release_kernel(struct perf_event *event);
975 extern struct perf_event *
976 perf_event_create_kernel_counter(struct perf_event_attr *attr,
977 int cpu,
978 struct task_struct *task,
979 perf_overflow_handler_t callback,
980 void *context);
981 extern u64 perf_event_read_value(struct perf_event *event,
982 u64 *enabled, u64 *running);
983
984 struct perf_sample_data {
985 u64 type;
986
987 u64 ip;
988 struct {
989 u32 pid;
990 u32 tid;
991 } tid_entry;
992 u64 time;
993 u64 addr;
994 u64 id;
995 u64 stream_id;
996 struct {
997 u32 cpu;
998 u32 reserved;
999 } cpu_entry;
1000 u64 period;
1001 struct perf_callchain_entry *callchain;
1002 struct perf_raw_record *raw;
1003 };
1004
1005 static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
1006 {
1007 data->addr = addr;
1008 data->raw = NULL;
1009 }
1010
1011 extern void perf_output_sample(struct perf_output_handle *handle,
1012 struct perf_event_header *header,
1013 struct perf_sample_data *data,
1014 struct perf_event *event);
1015 extern void perf_prepare_sample(struct perf_event_header *header,
1016 struct perf_sample_data *data,
1017 struct perf_event *event,
1018 struct pt_regs *regs);
1019
1020 extern int perf_event_overflow(struct perf_event *event,
1021 struct perf_sample_data *data,
1022 struct pt_regs *regs);
1023
1024 static inline bool is_sampling_event(struct perf_event *event)
1025 {
1026 return event->attr.sample_period != 0;
1027 }
1028
1029 /*
1030 * Return 1 for a software event, 0 for a hardware event
1031 */
1032 static inline int is_software_event(struct perf_event *event)
1033 {
1034 return event->pmu->task_ctx_nr == perf_sw_context;
1035 }
1036
1037 extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1038
1039 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1040
1041 #ifndef perf_arch_fetch_caller_regs
1042 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1043 #endif
1044
1045 /*
1046 * Take a snapshot of the regs. Skip ip and frame pointer to
1047 * the nth caller. We only need a few of the regs:
1048 * - ip for PERF_SAMPLE_IP
1049 * - cs for user_mode() tests
1050 * - bp for callchains
1051 * - eflags, for future purposes, just in case
1052 */
1053 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1054 {
1055 memset(regs, 0, sizeof(*regs));
1056
1057 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1058 }
1059
1060 static __always_inline void
1061 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1062 {
1063 struct pt_regs hot_regs;
1064
1065 if (static_branch(&perf_swevent_enabled[event_id])) {
1066 if (!regs) {
1067 perf_fetch_caller_regs(&hot_regs);
1068 regs = &hot_regs;
1069 }
1070 __perf_sw_event(event_id, nr, regs, addr);
1071 }
1072 }
1073
1074 extern struct jump_label_key_deferred perf_sched_events;
1075
1076 static inline void perf_event_task_sched_in(struct task_struct *prev,
1077 struct task_struct *task)
1078 {
1079 if (static_branch(&perf_sched_events.key))
1080 __perf_event_task_sched_in(prev, task);
1081 }
1082
1083 static inline void perf_event_task_sched_out(struct task_struct *prev,
1084 struct task_struct *next)
1085 {
1086 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1087
1088 if (static_branch(&perf_sched_events.key))
1089 __perf_event_task_sched_out(prev, next);
1090 }
1091
1092 extern void perf_event_mmap(struct vm_area_struct *vma);
1093 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1094 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1095 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1096
1097 extern void perf_event_comm(struct task_struct *tsk);
1098 extern void perf_event_fork(struct task_struct *tsk);
1099
1100 /* Callchains */
1101 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1102
1103 extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
1104 extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
1105
1106 static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1107 {
1108 if (entry->nr < PERF_MAX_STACK_DEPTH)
1109 entry->ip[entry->nr++] = ip;
1110 }
1111
1112 extern int sysctl_perf_event_paranoid;
1113 extern int sysctl_perf_event_mlock;
1114 extern int sysctl_perf_event_sample_rate;
1115
1116 extern int perf_proc_update_handler(struct ctl_table *table, int write,
1117 void __user *buffer, size_t *lenp,
1118 loff_t *ppos);
1119
1120 static inline bool perf_paranoid_tracepoint_raw(void)
1121 {
1122 return sysctl_perf_event_paranoid > -1;
1123 }
1124
1125 static inline bool perf_paranoid_cpu(void)
1126 {
1127 return sysctl_perf_event_paranoid > 0;
1128 }
1129
1130 static inline bool perf_paranoid_kernel(void)
1131 {
1132 return sysctl_perf_event_paranoid > 1;
1133 }
1134
1135 extern void perf_event_init(void);
1136 extern void perf_tp_event(u64 addr, u64 count, void *record,
1137 int entry_size, struct pt_regs *regs,
1138 struct hlist_head *head, int rctx);
1139 extern void perf_bp_event(struct perf_event *event, void *data);
1140
1141 #ifndef perf_misc_flags
1142 # define perf_misc_flags(regs) \
1143 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1144 # define perf_instruction_pointer(regs) instruction_pointer(regs)
1145 #endif
1146
1147 extern int perf_output_begin(struct perf_output_handle *handle,
1148 struct perf_event *event, unsigned int size);
1149 extern void perf_output_end(struct perf_output_handle *handle);
1150 extern void perf_output_copy(struct perf_output_handle *handle,
1151 const void *buf, unsigned int len);
1152 extern int perf_swevent_get_recursion_context(void);
1153 extern void perf_swevent_put_recursion_context(int rctx);
1154 extern void perf_event_enable(struct perf_event *event);
1155 extern void perf_event_disable(struct perf_event *event);
1156 extern void perf_event_task_tick(void);
1157 #else
1158 static inline void
1159 perf_event_task_sched_in(struct task_struct *prev,
1160 struct task_struct *task) { }
1161 static inline void
1162 perf_event_task_sched_out(struct task_struct *prev,
1163 struct task_struct *next) { }
1164 static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1165 static inline void perf_event_exit_task(struct task_struct *child) { }
1166 static inline void perf_event_free_task(struct task_struct *task) { }
1167 static inline void perf_event_delayed_put(struct task_struct *task) { }
1168 static inline void perf_event_print_debug(void) { }
1169 static inline int perf_event_task_disable(void) { return -EINVAL; }
1170 static inline int perf_event_task_enable(void) { return -EINVAL; }
1171 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1172 {
1173 return -EINVAL;
1174 }
1175
1176 static inline void
1177 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
1178 static inline void
1179 perf_bp_event(struct perf_event *event, void *data) { }
1180
1181 static inline int perf_register_guest_info_callbacks
1182 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1183 static inline int perf_unregister_guest_info_callbacks
1184 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1185
1186 static inline void perf_event_mmap(struct vm_area_struct *vma) { }
1187 static inline void perf_event_comm(struct task_struct *tsk) { }
1188 static inline void perf_event_fork(struct task_struct *tsk) { }
1189 static inline void perf_event_init(void) { }
1190 static inline int perf_swevent_get_recursion_context(void) { return -1; }
1191 static inline void perf_swevent_put_recursion_context(int rctx) { }
1192 static inline void perf_event_enable(struct perf_event *event) { }
1193 static inline void perf_event_disable(struct perf_event *event) { }
1194 static inline void perf_event_task_tick(void) { }
1195 #endif
1196
1197 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1198
1199 /*
1200 * This has to have a higher priority than migration_notifier in sched.c.
1201 */
1202 #define perf_cpu_notifier(fn) \
1203 do { \
1204 static struct notifier_block fn##_nb __cpuinitdata = \
1205 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1206 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
1207 (void *)(unsigned long)smp_processor_id()); \
1208 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
1209 (void *)(unsigned long)smp_processor_id()); \
1210 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
1211 (void *)(unsigned long)smp_processor_id()); \
1212 register_cpu_notifier(&fn##_nb); \
1213 } while (0)
1214
1215 #endif /* __KERNEL__ */
1216 #endif /* _LINUX_PERF_EVENT_H */
This page took 0.077525 seconds and 6 git commands to generate.