2 * Performance counters:
4 * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
7 * Data type definitions, declarations, prototypes.
9 * Started by: Thomas Gleixner and Ingo Molnar
11 * For licencing details see kernel-base/COPYING
13 #ifndef _LINUX_PERF_COUNTER_H
14 #define _LINUX_PERF_COUNTER_H
16 #include <linux/types.h>
17 #include <linux/ioctl.h>
18 #include <asm/byteorder.h>
21 * User-space ABI bits:
27 enum perf_event_types
{
28 PERF_TYPE_HARDWARE
= 0,
29 PERF_TYPE_SOFTWARE
= 1,
30 PERF_TYPE_TRACEPOINT
= 2,
33 * available TYPE space, raw is the max value.
40 * Generalized performance counter event types, used by the hw_event.event_id
41 * parameter of the sys_perf_counter_open() syscall:
45 * Common hardware events, generalized by the kernel:
47 PERF_COUNT_CPU_CYCLES
= 0,
48 PERF_COUNT_INSTRUCTIONS
= 1,
49 PERF_COUNT_CACHE_REFERENCES
= 2,
50 PERF_COUNT_CACHE_MISSES
= 3,
51 PERF_COUNT_BRANCH_INSTRUCTIONS
= 4,
52 PERF_COUNT_BRANCH_MISSES
= 5,
53 PERF_COUNT_BUS_CYCLES
= 6,
55 PERF_HW_EVENTS_MAX
= 7,
59 * Special "software" counters provided by the kernel, even if the hardware
60 * does not support performance counters. These counters measure various
61 * physical and sw events of the kernel (and allow the profiling of them as
65 PERF_COUNT_CPU_CLOCK
= 0,
66 PERF_COUNT_TASK_CLOCK
= 1,
67 PERF_COUNT_PAGE_FAULTS
= 2,
68 PERF_COUNT_CONTEXT_SWITCHES
= 3,
69 PERF_COUNT_CPU_MIGRATIONS
= 4,
70 PERF_COUNT_PAGE_FAULTS_MIN
= 5,
71 PERF_COUNT_PAGE_FAULTS_MAJ
= 6,
73 PERF_SW_EVENTS_MAX
= 7,
76 #define __PERF_COUNTER_MASK(name) \
77 (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \
78 PERF_COUNTER_##name##_SHIFT)
80 #define PERF_COUNTER_RAW_BITS 1
81 #define PERF_COUNTER_RAW_SHIFT 63
82 #define PERF_COUNTER_RAW_MASK __PERF_COUNTER_MASK(RAW)
84 #define PERF_COUNTER_CONFIG_BITS 63
85 #define PERF_COUNTER_CONFIG_SHIFT 0
86 #define PERF_COUNTER_CONFIG_MASK __PERF_COUNTER_MASK(CONFIG)
88 #define PERF_COUNTER_TYPE_BITS 7
89 #define PERF_COUNTER_TYPE_SHIFT 56
90 #define PERF_COUNTER_TYPE_MASK __PERF_COUNTER_MASK(TYPE)
92 #define PERF_COUNTER_EVENT_BITS 56
93 #define PERF_COUNTER_EVENT_SHIFT 0
94 #define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT)
97 * Bits that can be set in hw_event.record_type to request information
98 * in the overflow packets.
100 enum perf_counter_record_format
{
101 PERF_RECORD_IP
= 1U << 0,
102 PERF_RECORD_TID
= 1U << 1,
103 PERF_RECORD_GROUP
= 1U << 2,
104 PERF_RECORD_CALLCHAIN
= 1U << 3,
105 PERF_RECORD_TIME
= 1U << 4,
109 * Bits that can be set in hw_event.read_format to request that
110 * reads on the counter should return the indicated quantities,
111 * in increasing order of bit value, after the counter value.
113 enum perf_counter_read_format
{
114 PERF_FORMAT_TOTAL_TIME_ENABLED
= 1,
115 PERF_FORMAT_TOTAL_TIME_RUNNING
= 2,
119 * Hardware event to monitor via a performance monitoring counter:
121 struct perf_counter_hw_event
{
123 * The MSB of the config word signifies if the rest contains cpu
124 * specific (raw) counter configuration data, if unset, the next
125 * 7 bits are an event type and the rest of the bits are the event
134 __u64 disabled
: 1, /* off by default */
135 nmi
: 1, /* NMI sampling */
136 inherit
: 1, /* children inherit it */
137 pinned
: 1, /* must always be on PMU */
138 exclusive
: 1, /* only group on PMU */
139 exclude_user
: 1, /* don't count user */
140 exclude_kernel
: 1, /* ditto kernel */
141 exclude_hv
: 1, /* ditto hypervisor */
142 exclude_idle
: 1, /* don't count when idle */
143 mmap
: 1, /* include mmap data */
144 munmap
: 1, /* include munmap data */
148 __u32 extra_config_len
;
149 __u32 wakeup_events
; /* wakeup every n events */
156 * Ioctls that can be done on a perf counter fd:
158 #define PERF_COUNTER_IOC_ENABLE _IO ('$', 0)
159 #define PERF_COUNTER_IOC_DISABLE _IO ('$', 1)
160 #define PERF_COUNTER_IOC_REFRESH _IOW('$', 2, u32)
163 * Structure of the page that can be mapped via mmap
165 struct perf_counter_mmap_page
{
166 __u32 version
; /* version number of this structure */
167 __u32 compat_version
; /* lowest version this is compat with */
170 * Bits needed to read the hw counters in user-space.
180 * count = pmc_read(pc->index - 1);
181 * count += pc->offset;
186 * } while (pc->lock != seq);
188 * NOTE: for obvious reason this only works on self-monitoring
191 __u32 lock
; /* seqlock for synchronization */
192 __u32 index
; /* hardware counter identifier */
193 __s64 offset
; /* add to hardware counter value */
196 * Control data for the mmap() data buffer.
198 * User-space reading this value should issue an rmb(), on SMP capable
199 * platforms, after reading this value -- see perf_counter_wakeup().
201 __u32 data_head
; /* head in the data section */
204 #define PERF_EVENT_MISC_KERNEL (1 << 0)
205 #define PERF_EVENT_MISC_USER (1 << 1)
207 struct perf_event_header
{
213 enum perf_event_type
{
216 * The MMAP events record the PROT_EXEC mappings so that we can
217 * correlate userspace IPs to code. They have the following structure:
220 * struct perf_event_header header;
230 PERF_EVENT_MUNMAP
= 2,
233 * Half the event type space is reserved for the counter overflow
234 * bitfields, as found in hw_event.record_type.
236 * These events will have types of the form:
237 * PERF_EVENT_COUNTER_OVERFLOW { | __PERF_EVENT_* } *
240 * struct perf_event_header header;
242 * { u64 ip; } && __PERF_EVENT_IP
243 * { u32 pid, tid; } && __PERF_EVENT_TID
246 * { u64 event, val; } cnt[nr]; } && __PERF_EVENT_GROUP
252 * u64 ips[nr]; } && __PERF_EVENT_CALLCHAIN
254 * { u64 time; } && __PERF_EVENT_TIME
257 PERF_EVENT_COUNTER_OVERFLOW
= 1UL << 31,
258 __PERF_EVENT_IP
= PERF_RECORD_IP
,
259 __PERF_EVENT_TID
= PERF_RECORD_TID
,
260 __PERF_EVENT_GROUP
= PERF_RECORD_GROUP
,
261 __PERF_EVENT_CALLCHAIN
= PERF_RECORD_CALLCHAIN
,
262 __PERF_EVENT_TIME
= PERF_RECORD_TIME
,
267 * Kernel-internal data types and definitions:
270 #ifdef CONFIG_PERF_COUNTERS
271 # include <asm/perf_counter.h>
274 #include <linux/list.h>
275 #include <linux/mutex.h>
276 #include <linux/rculist.h>
277 #include <linux/rcupdate.h>
278 #include <linux/spinlock.h>
279 #include <linux/hrtimer.h>
280 #include <linux/fs.h>
281 #include <asm/atomic.h>
285 static inline u64
perf_event_raw(struct perf_counter_hw_event
*hw_event
)
287 return hw_event
->config
& PERF_COUNTER_RAW_MASK
;
290 static inline u64
perf_event_config(struct perf_counter_hw_event
*hw_event
)
292 return hw_event
->config
& PERF_COUNTER_CONFIG_MASK
;
295 static inline u64
perf_event_type(struct perf_counter_hw_event
*hw_event
)
297 return (hw_event
->config
& PERF_COUNTER_TYPE_MASK
) >>
298 PERF_COUNTER_TYPE_SHIFT
;
301 static inline u64
perf_event_id(struct perf_counter_hw_event
*hw_event
)
303 return hw_event
->config
& PERF_COUNTER_EVENT_MASK
;
307 * struct hw_perf_counter - performance counter hardware details:
309 struct hw_perf_counter
{
310 #ifdef CONFIG_PERF_COUNTERS
312 struct { /* hardware */
314 unsigned long config_base
;
315 unsigned long counter_base
;
319 union { /* software */
321 struct hrtimer hrtimer
;
324 atomic64_t prev_count
;
326 atomic64_t period_left
;
333 * struct hw_perf_counter_ops - performance counter hw ops
335 struct hw_perf_counter_ops
{
336 int (*enable
) (struct perf_counter
*counter
);
337 void (*disable
) (struct perf_counter
*counter
);
338 void (*read
) (struct perf_counter
*counter
);
342 * enum perf_counter_active_state - the states of a counter
344 enum perf_counter_active_state
{
345 PERF_COUNTER_STATE_ERROR
= -2,
346 PERF_COUNTER_STATE_OFF
= -1,
347 PERF_COUNTER_STATE_INACTIVE
= 0,
348 PERF_COUNTER_STATE_ACTIVE
= 1,
353 struct perf_mmap_data
{
354 struct rcu_head rcu_head
;
359 struct perf_counter_mmap_page
*user_page
;
363 struct perf_pending_entry
{
364 struct perf_pending_entry
*next
;
365 void (*func
)(struct perf_pending_entry
*);
369 * struct perf_counter - performance counter kernel representation:
371 struct perf_counter
{
372 #ifdef CONFIG_PERF_COUNTERS
373 struct list_head list_entry
;
374 struct list_head event_entry
;
375 struct list_head sibling_list
;
377 struct perf_counter
*group_leader
;
378 const struct hw_perf_counter_ops
*hw_ops
;
380 enum perf_counter_active_state state
;
381 enum perf_counter_active_state prev_state
;
385 * These are the total time in nanoseconds that the counter
386 * has been enabled (i.e. eligible to run, and the task has
387 * been scheduled in, if this is a per-task counter)
388 * and running (scheduled onto the CPU), respectively.
390 * They are computed from tstamp_enabled, tstamp_running and
391 * tstamp_stopped when the counter is in INACTIVE or ACTIVE state.
393 u64 total_time_enabled
;
394 u64 total_time_running
;
397 * These are timestamps used for computing total_time_enabled
398 * and total_time_running when the counter is in INACTIVE or
399 * ACTIVE state, measured in nanoseconds from an arbitrary point
401 * tstamp_enabled: the notional time when the counter was enabled
402 * tstamp_running: the notional time when the counter was scheduled on
403 * tstamp_stopped: in INACTIVE state, the notional time when the
404 * counter was scheduled off.
410 struct perf_counter_hw_event hw_event
;
411 struct hw_perf_counter hw
;
413 struct perf_counter_context
*ctx
;
414 struct task_struct
*task
;
417 struct perf_counter
*parent
;
418 struct list_head child_list
;
421 * These accumulate total time (in nanoseconds) that children
422 * counters have been enabled and running, respectively.
424 atomic64_t child_total_time_enabled
;
425 atomic64_t child_total_time_running
;
428 * Protect attach/detach and child_list:
436 struct mutex mmap_mutex
;
438 struct perf_mmap_data
*data
;
441 wait_queue_head_t waitq
;
442 struct fasync_struct
*fasync
;
444 /* delayed work for NMIs and such */
448 struct perf_pending_entry pending
;
450 atomic_t event_limit
;
452 void (*destroy
)(struct perf_counter
*);
453 struct rcu_head rcu_head
;
458 * struct perf_counter_context - counter context structure
460 * Used as a container for task counters and CPU counters as well:
462 struct perf_counter_context
{
463 #ifdef CONFIG_PERF_COUNTERS
465 * Protect the states of the counters in the list,
466 * nr_active, and the list:
470 * Protect the list of counters. Locking either mutex or lock
471 * is sufficient to ensure the list doesn't change; to change
472 * the list you need to lock both the mutex and the spinlock.
476 struct list_head counter_list
;
477 struct list_head event_list
;
481 struct task_struct
*task
;
484 * Context clock, runs when context enabled.
492 * struct perf_counter_cpu_context - per cpu counter context structure
494 struct perf_cpu_context
{
495 struct perf_counter_context ctx
;
496 struct perf_counter_context
*task_ctx
;
502 * Recursion avoidance:
504 * task, softirq, irq, nmi context
510 * Set by architecture code:
512 extern int perf_max_counters
;
514 #ifdef CONFIG_PERF_COUNTERS
515 extern const struct hw_perf_counter_ops
*
516 hw_perf_counter_init(struct perf_counter
*counter
);
518 extern void perf_counter_task_sched_in(struct task_struct
*task
, int cpu
);
519 extern void perf_counter_task_sched_out(struct task_struct
*task
, int cpu
);
520 extern void perf_counter_task_tick(struct task_struct
*task
, int cpu
);
521 extern void perf_counter_init_task(struct task_struct
*child
);
522 extern void perf_counter_exit_task(struct task_struct
*child
);
523 extern void perf_counter_do_pending(void);
524 extern void perf_counter_print_debug(void);
525 extern void perf_counter_unthrottle(void);
526 extern u64
hw_perf_save_disable(void);
527 extern void hw_perf_restore(u64 ctrl
);
528 extern int perf_counter_task_disable(void);
529 extern int perf_counter_task_enable(void);
530 extern int hw_perf_group_sched_in(struct perf_counter
*group_leader
,
531 struct perf_cpu_context
*cpuctx
,
532 struct perf_counter_context
*ctx
, int cpu
);
533 extern void perf_counter_update_userpage(struct perf_counter
*counter
);
535 extern int perf_counter_overflow(struct perf_counter
*counter
,
536 int nmi
, struct pt_regs
*regs
);
538 * Return 1 for a software counter, 0 for a hardware counter
540 static inline int is_software_counter(struct perf_counter
*counter
)
542 return !perf_event_raw(&counter
->hw_event
) &&
543 perf_event_type(&counter
->hw_event
) != PERF_TYPE_HARDWARE
;
546 extern void perf_swcounter_event(u32
, u64
, int, struct pt_regs
*);
548 extern void perf_counter_mmap(unsigned long addr
, unsigned long len
,
549 unsigned long pgoff
, struct file
*file
);
551 extern void perf_counter_munmap(unsigned long addr
, unsigned long len
,
552 unsigned long pgoff
, struct file
*file
);
554 #define MAX_STACK_DEPTH 255
556 struct perf_callchain_entry
{
557 u16 nr
, hv
, kernel
, user
;
558 u64 ip
[MAX_STACK_DEPTH
];
561 extern struct perf_callchain_entry
*perf_callchain(struct pt_regs
*regs
);
565 perf_counter_task_sched_in(struct task_struct
*task
, int cpu
) { }
567 perf_counter_task_sched_out(struct task_struct
*task
, int cpu
) { }
569 perf_counter_task_tick(struct task_struct
*task
, int cpu
) { }
570 static inline void perf_counter_init_task(struct task_struct
*child
) { }
571 static inline void perf_counter_exit_task(struct task_struct
*child
) { }
572 static inline void perf_counter_do_pending(void) { }
573 static inline void perf_counter_print_debug(void) { }
574 static inline void perf_counter_unthrottle(void) { }
575 static inline void hw_perf_restore(u64 ctrl
) { }
576 static inline u64
hw_perf_save_disable(void) { return 0; }
577 static inline int perf_counter_task_disable(void) { return -EINVAL
; }
578 static inline int perf_counter_task_enable(void) { return -EINVAL
; }
581 perf_swcounter_event(u32 event
, u64 nr
, int nmi
, struct pt_regs
*regs
) { }
585 perf_counter_mmap(unsigned long addr
, unsigned long len
,
586 unsigned long pgoff
, struct file
*file
) { }
589 perf_counter_munmap(unsigned long addr
, unsigned long len
,
590 unsigned long pgoff
, struct file
*file
) { }
594 #endif /* __KERNEL__ */
595 #endif /* _LINUX_PERF_COUNTER_H */