Commit | Line | Data |
---|---|---|
0793a61d | 1 | /* |
57c0c15b | 2 | * Performance events: |
0793a61d | 3 | * |
a308444c | 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
e7e7ee2e IM |
5 | * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar |
6 | * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra | |
0793a61d | 7 | * |
57c0c15b | 8 | * Data type definitions, declarations, prototypes. |
0793a61d | 9 | * |
a308444c | 10 | * Started by: Thomas Gleixner and Ingo Molnar |
0793a61d | 11 | * |
57c0c15b | 12 | * For licencing details see kernel-base/COPYING |
0793a61d | 13 | */ |
cdd6c482 IM |
14 | #ifndef _LINUX_PERF_EVENT_H |
15 | #define _LINUX_PERF_EVENT_H | |
0793a61d | 16 | |
607ca46e | 17 | #include <uapi/linux/perf_event.h> |
0793a61d | 18 | |
9f66a381 | 19 | /* |
f3dfd265 | 20 | * Kernel-internal data types and definitions: |
9f66a381 IM |
21 | */ |
22 | ||
cdd6c482 IM |
23 | #ifdef CONFIG_PERF_EVENTS |
24 | # include <asm/perf_event.h> | |
7be79236 | 25 | # include <asm/local64.h> |
f3dfd265 PM |
26 | #endif |
27 | ||
39447b38 | 28 | struct perf_guest_info_callbacks { |
e7e7ee2e IM |
29 | int (*is_in_guest)(void); |
30 | int (*is_user_mode)(void); | |
31 | unsigned long (*get_guest_ip)(void); | |
39447b38 ZY |
32 | }; |
33 | ||
2ff6cfd7 AB |
34 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
35 | #include <asm/hw_breakpoint.h> | |
36 | #endif | |
37 | ||
f3dfd265 PM |
38 | #include <linux/list.h> |
39 | #include <linux/mutex.h> | |
40 | #include <linux/rculist.h> | |
41 | #include <linux/rcupdate.h> | |
42 | #include <linux/spinlock.h> | |
d6d020e9 | 43 | #include <linux/hrtimer.h> |
3c446b3d | 44 | #include <linux/fs.h> |
709e50cf | 45 | #include <linux/pid_namespace.h> |
906010b2 | 46 | #include <linux/workqueue.h> |
5331d7b8 | 47 | #include <linux/ftrace.h> |
85cfabbc | 48 | #include <linux/cpu.h> |
e360adbe | 49 | #include <linux/irq_work.h> |
c5905afb | 50 | #include <linux/static_key.h> |
851cf6e7 | 51 | #include <linux/jump_label_ratelimit.h> |
60063497 | 52 | #include <linux/atomic.h> |
641cc938 | 53 | #include <linux/sysfs.h> |
4018994f | 54 | #include <linux/perf_regs.h> |
fadfe7be | 55 | #include <linux/workqueue.h> |
39bed6cb | 56 | #include <linux/cgroup.h> |
fa588151 | 57 | #include <asm/local.h> |
f3dfd265 | 58 | |
f9188e02 PZ |
59 | struct perf_callchain_entry { |
60 | __u64 nr; | |
61 | __u64 ip[PERF_MAX_STACK_DEPTH]; | |
62 | }; | |
63 | ||
3a43ce68 FW |
64 | struct perf_raw_record { |
65 | u32 size; | |
66 | void *data; | |
f413cdb8 FW |
67 | }; |
68 | ||
bce38cd5 SE |
69 | /* |
70 | * branch stack layout: | |
71 | * nr: number of taken branches stored in entries[] | |
72 | * | |
73 | * Note that nr can vary from sample to sample | |
74 | * branches (to, from) are stored from most recent | |
75 | * to least recent, i.e., entries[0] contains the most | |
76 | * recent branch. | |
77 | */ | |
caff2bef PZ |
78 | struct perf_branch_stack { |
79 | __u64 nr; | |
80 | struct perf_branch_entry entries[0]; | |
81 | }; | |
82 | ||
f3dfd265 PM |
83 | struct task_struct; |
84 | ||
efc9f05d SE |
85 | /* |
86 | * extra PMU register associated with an event | |
87 | */ | |
88 | struct hw_perf_event_extra { | |
89 | u64 config; /* register value */ | |
90 | unsigned int reg; /* register address or index */ | |
91 | int alloc; /* extra register already allocated */ | |
92 | int idx; /* index in shared_regs->regs[] */ | |
93 | }; | |
94 | ||
43b45780 AH |
95 | struct event_constraint; |
96 | ||
0793a61d | 97 | /** |
cdd6c482 | 98 | * struct hw_perf_event - performance event hardware details: |
0793a61d | 99 | */ |
cdd6c482 IM |
100 | struct hw_perf_event { |
101 | #ifdef CONFIG_PERF_EVENTS | |
d6d020e9 PZ |
102 | union { |
103 | struct { /* hardware */ | |
a308444c | 104 | u64 config; |
447a194b | 105 | u64 last_tag; |
a308444c | 106 | unsigned long config_base; |
cdd6c482 | 107 | unsigned long event_base; |
c48b6053 | 108 | int event_base_rdpmc; |
a308444c | 109 | int idx; |
447a194b | 110 | int last_cpu; |
9fac2cf3 | 111 | int flags; |
bce38cd5 | 112 | |
efc9f05d | 113 | struct hw_perf_event_extra extra_reg; |
bce38cd5 | 114 | struct hw_perf_event_extra branch_reg; |
43b45780 AH |
115 | |
116 | struct event_constraint *constraint; | |
d6d020e9 | 117 | }; |
721a669b | 118 | struct { /* software */ |
a308444c | 119 | struct hrtimer hrtimer; |
d6d020e9 | 120 | }; |
f22c1bb6 | 121 | struct { /* tracepoint */ |
f22c1bb6 ON |
122 | /* for tp_event->class */ |
123 | struct list_head tp_list; | |
124 | }; | |
4afbb24c MF |
125 | struct { /* intel_cqm */ |
126 | int cqm_state; | |
127 | int cqm_rmid; | |
128 | struct list_head cqm_events_entry; | |
129 | struct list_head cqm_groups_entry; | |
130 | struct list_head cqm_group_entry; | |
131 | }; | |
24f1e32c | 132 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
45a73372 | 133 | struct { /* breakpoint */ |
d580ff86 PZ |
134 | /* |
135 | * Crufty hack to avoid the chicken and egg | |
136 | * problem hw_breakpoint has with context | |
137 | * creation and event initalization. | |
138 | */ | |
f22c1bb6 ON |
139 | struct arch_hw_breakpoint info; |
140 | struct list_head bp_list; | |
45a73372 | 141 | }; |
24f1e32c | 142 | #endif |
d6d020e9 | 143 | }; |
50f16a8b | 144 | struct task_struct *target; |
a4eaf7f1 | 145 | int state; |
e7850595 | 146 | local64_t prev_count; |
b23f3325 | 147 | u64 sample_period; |
9e350de3 | 148 | u64 last_period; |
e7850595 | 149 | local64_t period_left; |
e050e3f0 | 150 | u64 interrupts_seq; |
60db5e09 | 151 | u64 interrupts; |
6a24ed6c | 152 | |
abd50713 PZ |
153 | u64 freq_time_stamp; |
154 | u64 freq_count_stamp; | |
ee06094f | 155 | #endif |
0793a61d TG |
156 | }; |
157 | ||
a4eaf7f1 PZ |
158 | /* |
159 | * hw_perf_event::state flags | |
160 | */ | |
161 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | |
162 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | |
163 | #define PERF_HES_ARCH 0x04 | |
164 | ||
cdd6c482 | 165 | struct perf_event; |
621a01ea | 166 | |
8d2cacbb PZ |
167 | /* |
168 | * Common implementation detail of pmu::{start,commit,cancel}_txn | |
169 | */ | |
170 | #define PERF_EVENT_TXN 0x1 | |
6bde9b6c | 171 | |
53b25335 VW |
172 | /** |
173 | * pmu::capabilities flags | |
174 | */ | |
175 | #define PERF_PMU_CAP_NO_INTERRUPT 0x01 | |
34f43927 | 176 | #define PERF_PMU_CAP_NO_NMI 0x02 |
0a4e38e6 | 177 | #define PERF_PMU_CAP_AUX_NO_SG 0x04 |
53b25335 | 178 | |
621a01ea | 179 | /** |
4aeb0b42 | 180 | * struct pmu - generic performance monitoring unit |
621a01ea | 181 | */ |
4aeb0b42 | 182 | struct pmu { |
b0a873eb PZ |
183 | struct list_head entry; |
184 | ||
c464c76e | 185 | struct module *module; |
abe43400 | 186 | struct device *dev; |
0c9d42ed | 187 | const struct attribute_group **attr_groups; |
03d8e80b | 188 | const char *name; |
2e80a82a PZ |
189 | int type; |
190 | ||
53b25335 VW |
191 | /* |
192 | * various common per-pmu feature flags | |
193 | */ | |
194 | int capabilities; | |
195 | ||
108b02cf PZ |
196 | int * __percpu pmu_disable_count; |
197 | struct perf_cpu_context * __percpu pmu_cpu_context; | |
8dc85d54 | 198 | int task_ctx_nr; |
62b85639 | 199 | int hrtimer_interval_ms; |
6bde9b6c LM |
200 | |
201 | /* | |
a4eaf7f1 PZ |
202 | * Fully disable/enable this PMU, can be used to protect from the PMI |
203 | * as well as for lazy/batch writing of the MSRs. | |
6bde9b6c | 204 | */ |
ad5133b7 PZ |
205 | void (*pmu_enable) (struct pmu *pmu); /* optional */ |
206 | void (*pmu_disable) (struct pmu *pmu); /* optional */ | |
6bde9b6c | 207 | |
8d2cacbb | 208 | /* |
a4eaf7f1 | 209 | * Try and initialize the event for this PMU. |
24cd7f54 | 210 | * Should return -ENOENT when the @event doesn't match this PMU. |
8d2cacbb | 211 | */ |
b0a873eb PZ |
212 | int (*event_init) (struct perf_event *event); |
213 | ||
1e0fb9ec AL |
214 | /* |
215 | * Notification that the event was mapped or unmapped. Called | |
216 | * in the context of the mapping task. | |
217 | */ | |
218 | void (*event_mapped) (struct perf_event *event); /*optional*/ | |
219 | void (*event_unmapped) (struct perf_event *event); /*optional*/ | |
220 | ||
a4eaf7f1 PZ |
221 | #define PERF_EF_START 0x01 /* start the counter when adding */ |
222 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ | |
223 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ | |
224 | ||
8d2cacbb | 225 | /* |
a4eaf7f1 PZ |
226 | * Adds/Removes a counter to/from the PMU, can be done inside |
227 | * a transaction, see the ->*_txn() methods. | |
228 | */ | |
229 | int (*add) (struct perf_event *event, int flags); | |
230 | void (*del) (struct perf_event *event, int flags); | |
231 | ||
232 | /* | |
233 | * Starts/Stops a counter present on the PMU. The PMI handler | |
234 | * should stop the counter when perf_event_overflow() returns | |
235 | * !0. ->start() will be used to continue. | |
236 | */ | |
237 | void (*start) (struct perf_event *event, int flags); | |
238 | void (*stop) (struct perf_event *event, int flags); | |
239 | ||
240 | /* | |
241 | * Updates the counter value of the event. | |
242 | */ | |
cdd6c482 | 243 | void (*read) (struct perf_event *event); |
6bde9b6c LM |
244 | |
245 | /* | |
24cd7f54 PZ |
246 | * Group events scheduling is treated as a transaction, add |
247 | * group events as a whole and perform one schedulability test. | |
248 | * If the test fails, roll back the whole group | |
a4eaf7f1 PZ |
249 | * |
250 | * Start the transaction, after this ->add() doesn't need to | |
24cd7f54 | 251 | * do schedulability tests. |
8d2cacbb | 252 | */ |
e7e7ee2e | 253 | void (*start_txn) (struct pmu *pmu); /* optional */ |
8d2cacbb | 254 | /* |
a4eaf7f1 | 255 | * If ->start_txn() disabled the ->add() schedulability test |
8d2cacbb PZ |
256 | * then ->commit_txn() is required to perform one. On success |
257 | * the transaction is closed. On error the transaction is kept | |
258 | * open until ->cancel_txn() is called. | |
259 | */ | |
e7e7ee2e | 260 | int (*commit_txn) (struct pmu *pmu); /* optional */ |
8d2cacbb | 261 | /* |
a4eaf7f1 | 262 | * Will cancel the transaction, assumes ->del() is called |
25985edc | 263 | * for each successful ->add() during the transaction. |
8d2cacbb | 264 | */ |
e7e7ee2e | 265 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
35edc2a5 PZ |
266 | |
267 | /* | |
268 | * Will return the value for perf_event_mmap_page::index for this event, | |
269 | * if no implementation is provided it will default to: event->hw.idx + 1. | |
270 | */ | |
271 | int (*event_idx) (struct perf_event *event); /*optional */ | |
d010b332 | 272 | |
ba532500 YZ |
273 | /* |
274 | * context-switches callback | |
275 | */ | |
276 | void (*sched_task) (struct perf_event_context *ctx, | |
277 | bool sched_in); | |
4af57ef2 YZ |
278 | /* |
279 | * PMU specific data size | |
280 | */ | |
281 | size_t task_ctx_size; | |
ba532500 | 282 | |
eacd3ecc MF |
283 | |
284 | /* | |
285 | * Return the count value for a counter. | |
286 | */ | |
287 | u64 (*count) (struct perf_event *event); /*optional*/ | |
45bfb2e5 PZ |
288 | |
289 | /* | |
290 | * Set up pmu-private data structures for an AUX area | |
291 | */ | |
292 | void *(*setup_aux) (int cpu, void **pages, | |
293 | int nr_pages, bool overwrite); | |
294 | /* optional */ | |
295 | ||
296 | /* | |
297 | * Free pmu-private AUX data structures | |
298 | */ | |
299 | void (*free_aux) (void *aux); /* optional */ | |
621a01ea IM |
300 | }; |
301 | ||
6a930700 | 302 | /** |
cdd6c482 | 303 | * enum perf_event_active_state - the states of a event |
6a930700 | 304 | */ |
cdd6c482 | 305 | enum perf_event_active_state { |
179033b3 | 306 | PERF_EVENT_STATE_EXIT = -3, |
57c0c15b | 307 | PERF_EVENT_STATE_ERROR = -2, |
cdd6c482 IM |
308 | PERF_EVENT_STATE_OFF = -1, |
309 | PERF_EVENT_STATE_INACTIVE = 0, | |
57c0c15b | 310 | PERF_EVENT_STATE_ACTIVE = 1, |
6a930700 IM |
311 | }; |
312 | ||
9b51f66d | 313 | struct file; |
453f19ee PZ |
314 | struct perf_sample_data; |
315 | ||
a8b0ca17 | 316 | typedef void (*perf_overflow_handler_t)(struct perf_event *, |
b326e956 FW |
317 | struct perf_sample_data *, |
318 | struct pt_regs *regs); | |
319 | ||
d6f962b5 | 320 | enum perf_group_flag { |
e7e7ee2e | 321 | PERF_GROUP_SOFTWARE = 0x1, |
d6f962b5 FW |
322 | }; |
323 | ||
e7e7ee2e IM |
324 | #define SWEVENT_HLIST_BITS 8 |
325 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) | |
76e1d904 FW |
326 | |
327 | struct swevent_hlist { | |
e7e7ee2e IM |
328 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; |
329 | struct rcu_head rcu_head; | |
76e1d904 FW |
330 | }; |
331 | ||
8a49542c PZ |
332 | #define PERF_ATTACH_CONTEXT 0x01 |
333 | #define PERF_ATTACH_GROUP 0x02 | |
d580ff86 | 334 | #define PERF_ATTACH_TASK 0x04 |
4af57ef2 | 335 | #define PERF_ATTACH_TASK_DATA 0x08 |
8a49542c | 336 | |
877c6856 | 337 | struct perf_cgroup; |
76369139 FW |
338 | struct ring_buffer; |
339 | ||
0793a61d | 340 | /** |
cdd6c482 | 341 | * struct perf_event - performance event kernel representation: |
0793a61d | 342 | */ |
cdd6c482 IM |
343 | struct perf_event { |
344 | #ifdef CONFIG_PERF_EVENTS | |
9886167d PZ |
345 | /* |
346 | * entry onto perf_event_context::event_list; | |
347 | * modifications require ctx->lock | |
348 | * RCU safe iterations. | |
349 | */ | |
592903cd | 350 | struct list_head event_entry; |
9886167d PZ |
351 | |
352 | /* | |
353 | * XXX: group_entry and sibling_list should be mutually exclusive; | |
354 | * either you're a sibling on a group, or you're the group leader. | |
355 | * Rework the code to always use the same list element. | |
356 | * | |
357 | * Locked for modification by both ctx->mutex and ctx->lock; holding | |
358 | * either sufficies for read. | |
359 | */ | |
360 | struct list_head group_entry; | |
04289bb9 | 361 | struct list_head sibling_list; |
9886167d PZ |
362 | |
363 | /* | |
364 | * We need storage to track the entries in perf_pmu_migrate_context; we | |
365 | * cannot use the event_entry because of RCU and we want to keep the | |
366 | * group in tact which avoids us using the other two entries. | |
367 | */ | |
368 | struct list_head migrate_entry; | |
369 | ||
f3ae75de SE |
370 | struct hlist_node hlist_entry; |
371 | struct list_head active_entry; | |
0127c3ea | 372 | int nr_siblings; |
d6f962b5 | 373 | int group_flags; |
cdd6c482 | 374 | struct perf_event *group_leader; |
a4eaf7f1 | 375 | struct pmu *pmu; |
04289bb9 | 376 | |
cdd6c482 | 377 | enum perf_event_active_state state; |
8a49542c | 378 | unsigned int attach_state; |
e7850595 | 379 | local64_t count; |
a6e6dea6 | 380 | atomic64_t child_count; |
ee06094f | 381 | |
53cfbf59 | 382 | /* |
cdd6c482 | 383 | * These are the total time in nanoseconds that the event |
53cfbf59 | 384 | * has been enabled (i.e. eligible to run, and the task has |
cdd6c482 | 385 | * been scheduled in, if this is a per-task event) |
53cfbf59 PM |
386 | * and running (scheduled onto the CPU), respectively. |
387 | * | |
388 | * They are computed from tstamp_enabled, tstamp_running and | |
cdd6c482 | 389 | * tstamp_stopped when the event is in INACTIVE or ACTIVE state. |
53cfbf59 PM |
390 | */ |
391 | u64 total_time_enabled; | |
392 | u64 total_time_running; | |
393 | ||
394 | /* | |
395 | * These are timestamps used for computing total_time_enabled | |
cdd6c482 | 396 | * and total_time_running when the event is in INACTIVE or |
53cfbf59 PM |
397 | * ACTIVE state, measured in nanoseconds from an arbitrary point |
398 | * in time. | |
cdd6c482 IM |
399 | * tstamp_enabled: the notional time when the event was enabled |
400 | * tstamp_running: the notional time when the event was scheduled on | |
53cfbf59 | 401 | * tstamp_stopped: in INACTIVE state, the notional time when the |
cdd6c482 | 402 | * event was scheduled off. |
53cfbf59 PM |
403 | */ |
404 | u64 tstamp_enabled; | |
405 | u64 tstamp_running; | |
406 | u64 tstamp_stopped; | |
407 | ||
eed01528 SE |
408 | /* |
409 | * timestamp shadows the actual context timing but it can | |
410 | * be safely used in NMI interrupt context. It reflects the | |
411 | * context time as it was when the event was last scheduled in. | |
412 | * | |
413 | * ctx_time already accounts for ctx->timestamp. Therefore to | |
414 | * compute ctx_time for a sample, simply add perf_clock(). | |
415 | */ | |
416 | u64 shadow_ctx_time; | |
417 | ||
24f1e32c | 418 | struct perf_event_attr attr; |
c320c7b7 | 419 | u16 header_size; |
6844c09d | 420 | u16 id_header_size; |
c320c7b7 | 421 | u16 read_size; |
cdd6c482 | 422 | struct hw_perf_event hw; |
0793a61d | 423 | |
cdd6c482 | 424 | struct perf_event_context *ctx; |
a6fa941d | 425 | atomic_long_t refcount; |
0793a61d | 426 | |
53cfbf59 PM |
427 | /* |
428 | * These accumulate total time (in nanoseconds) that children | |
cdd6c482 | 429 | * events have been enabled and running, respectively. |
53cfbf59 PM |
430 | */ |
431 | atomic64_t child_total_time_enabled; | |
432 | atomic64_t child_total_time_running; | |
433 | ||
0793a61d | 434 | /* |
d859e29f | 435 | * Protect attach/detach and child_list: |
0793a61d | 436 | */ |
fccc714b PZ |
437 | struct mutex child_mutex; |
438 | struct list_head child_list; | |
cdd6c482 | 439 | struct perf_event *parent; |
0793a61d TG |
440 | |
441 | int oncpu; | |
442 | int cpu; | |
443 | ||
082ff5a2 PZ |
444 | struct list_head owner_entry; |
445 | struct task_struct *owner; | |
446 | ||
7b732a75 PZ |
447 | /* mmap bits */ |
448 | struct mutex mmap_mutex; | |
449 | atomic_t mmap_count; | |
26cb63ad | 450 | |
76369139 | 451 | struct ring_buffer *rb; |
10c6db11 | 452 | struct list_head rb_entry; |
b69cf536 PZ |
453 | unsigned long rcu_batches; |
454 | int rcu_pending; | |
37d81828 | 455 | |
7b732a75 | 456 | /* poll related */ |
0793a61d | 457 | wait_queue_head_t waitq; |
3c446b3d | 458 | struct fasync_struct *fasync; |
79f14641 PZ |
459 | |
460 | /* delayed work for NMIs and such */ | |
461 | int pending_wakeup; | |
4c9e2542 | 462 | int pending_kill; |
79f14641 | 463 | int pending_disable; |
e360adbe | 464 | struct irq_work pending; |
592903cd | 465 | |
79f14641 PZ |
466 | atomic_t event_limit; |
467 | ||
cdd6c482 | 468 | void (*destroy)(struct perf_event *); |
592903cd | 469 | struct rcu_head rcu_head; |
709e50cf PZ |
470 | |
471 | struct pid_namespace *ns; | |
8e5799b1 | 472 | u64 id; |
6fb2915d | 473 | |
34f43927 | 474 | u64 (*clock)(void); |
b326e956 | 475 | perf_overflow_handler_t overflow_handler; |
4dc0da86 | 476 | void *overflow_handler_context; |
453f19ee | 477 | |
07b139c8 | 478 | #ifdef CONFIG_EVENT_TRACING |
1c024eca | 479 | struct ftrace_event_call *tp_event; |
6fb2915d | 480 | struct event_filter *filter; |
ced39002 JO |
481 | #ifdef CONFIG_FUNCTION_TRACER |
482 | struct ftrace_ops ftrace_ops; | |
483 | #endif | |
ee06094f | 484 | #endif |
6fb2915d | 485 | |
e5d1367f SE |
486 | #ifdef CONFIG_CGROUP_PERF |
487 | struct perf_cgroup *cgrp; /* cgroup event is attach to */ | |
488 | int cgrp_defer_enabled; | |
489 | #endif | |
490 | ||
6fb2915d | 491 | #endif /* CONFIG_PERF_EVENTS */ |
0793a61d TG |
492 | }; |
493 | ||
494 | /** | |
cdd6c482 | 495 | * struct perf_event_context - event context structure |
0793a61d | 496 | * |
cdd6c482 | 497 | * Used as a container for task events and CPU events as well: |
0793a61d | 498 | */ |
cdd6c482 | 499 | struct perf_event_context { |
108b02cf | 500 | struct pmu *pmu; |
0793a61d | 501 | /* |
cdd6c482 | 502 | * Protect the states of the events in the list, |
d859e29f | 503 | * nr_active, and the list: |
0793a61d | 504 | */ |
e625cce1 | 505 | raw_spinlock_t lock; |
d859e29f | 506 | /* |
cdd6c482 | 507 | * Protect the list of events. Locking either mutex or lock |
d859e29f PM |
508 | * is sufficient to ensure the list doesn't change; to change |
509 | * the list you need to lock both the mutex and the spinlock. | |
510 | */ | |
a308444c | 511 | struct mutex mutex; |
04289bb9 | 512 | |
2fde4f94 | 513 | struct list_head active_ctx_list; |
889ff015 FW |
514 | struct list_head pinned_groups; |
515 | struct list_head flexible_groups; | |
a308444c | 516 | struct list_head event_list; |
cdd6c482 | 517 | int nr_events; |
a308444c IM |
518 | int nr_active; |
519 | int is_active; | |
bfbd3381 | 520 | int nr_stat; |
0f5a2601 | 521 | int nr_freq; |
dddd3379 | 522 | int rotate_disable; |
a308444c IM |
523 | atomic_t refcount; |
524 | struct task_struct *task; | |
53cfbf59 PM |
525 | |
526 | /* | |
4af4998b | 527 | * Context clock, runs when context enabled. |
53cfbf59 | 528 | */ |
a308444c IM |
529 | u64 time; |
530 | u64 timestamp; | |
564c2b21 PM |
531 | |
532 | /* | |
533 | * These fields let us detect when two contexts have both | |
534 | * been cloned (inherited) from a common ancestor. | |
535 | */ | |
cdd6c482 | 536 | struct perf_event_context *parent_ctx; |
a308444c IM |
537 | u64 parent_gen; |
538 | u64 generation; | |
539 | int pin_count; | |
d010b332 | 540 | int nr_cgroups; /* cgroup evts */ |
4af57ef2 | 541 | void *task_ctx_data; /* pmu specific data */ |
28009ce4 | 542 | struct rcu_head rcu_head; |
fadfe7be JO |
543 | |
544 | struct delayed_work orphans_remove; | |
545 | bool orphans_remove_sched; | |
0793a61d TG |
546 | }; |
547 | ||
7ae07ea3 FW |
548 | /* |
549 | * Number of contexts where an event can trigger: | |
e7e7ee2e | 550 | * task, softirq, hardirq, nmi. |
7ae07ea3 FW |
551 | */ |
552 | #define PERF_NR_CONTEXTS 4 | |
553 | ||
0793a61d | 554 | /** |
cdd6c482 | 555 | * struct perf_event_cpu_context - per cpu event context structure |
0793a61d TG |
556 | */ |
557 | struct perf_cpu_context { | |
cdd6c482 IM |
558 | struct perf_event_context ctx; |
559 | struct perf_event_context *task_ctx; | |
0793a61d | 560 | int active_oncpu; |
3b6f9e5c | 561 | int exclusive; |
9e630205 SE |
562 | struct hrtimer hrtimer; |
563 | ktime_t hrtimer_interval; | |
3f1f3320 | 564 | struct pmu *unique_pmu; |
e5d1367f | 565 | struct perf_cgroup *cgrp; |
0793a61d TG |
566 | }; |
567 | ||
5622f295 | 568 | struct perf_output_handle { |
57c0c15b | 569 | struct perf_event *event; |
76369139 | 570 | struct ring_buffer *rb; |
6d1acfd5 | 571 | unsigned long wakeup; |
5d967a8b PZ |
572 | unsigned long size; |
573 | void *addr; | |
574 | int page; | |
5622f295 MM |
575 | }; |
576 | ||
39bed6cb MF |
577 | #ifdef CONFIG_CGROUP_PERF |
578 | ||
579 | /* | |
580 | * perf_cgroup_info keeps track of time_enabled for a cgroup. | |
581 | * This is a per-cpu dynamically allocated data structure. | |
582 | */ | |
583 | struct perf_cgroup_info { | |
584 | u64 time; | |
585 | u64 timestamp; | |
586 | }; | |
587 | ||
588 | struct perf_cgroup { | |
589 | struct cgroup_subsys_state css; | |
590 | struct perf_cgroup_info __percpu *info; | |
591 | }; | |
592 | ||
593 | /* | |
594 | * Must ensure cgroup is pinned (css_get) before calling | |
595 | * this function. In other words, we cannot call this function | |
596 | * if there is no cgroup event for the current CPU context. | |
597 | */ | |
598 | static inline struct perf_cgroup * | |
599 | perf_cgroup_from_task(struct task_struct *task) | |
600 | { | |
601 | return container_of(task_css(task, perf_event_cgrp_id), | |
602 | struct perf_cgroup, css); | |
603 | } | |
604 | #endif /* CONFIG_CGROUP_PERF */ | |
605 | ||
cdd6c482 | 606 | #ifdef CONFIG_PERF_EVENTS |
829b42dd | 607 | |
03d8e80b | 608 | extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); |
b0a873eb | 609 | extern void perf_pmu_unregister(struct pmu *pmu); |
621a01ea | 610 | |
3bf101ba | 611 | extern int perf_num_counters(void); |
84c79910 | 612 | extern const char *perf_pmu_name(void); |
ab0cce56 JO |
613 | extern void __perf_event_task_sched_in(struct task_struct *prev, |
614 | struct task_struct *task); | |
615 | extern void __perf_event_task_sched_out(struct task_struct *prev, | |
616 | struct task_struct *next); | |
cdd6c482 IM |
617 | extern int perf_event_init_task(struct task_struct *child); |
618 | extern void perf_event_exit_task(struct task_struct *child); | |
619 | extern void perf_event_free_task(struct task_struct *task); | |
4e231c79 | 620 | extern void perf_event_delayed_put(struct task_struct *task); |
cdd6c482 | 621 | extern void perf_event_print_debug(void); |
33696fc0 PZ |
622 | extern void perf_pmu_disable(struct pmu *pmu); |
623 | extern void perf_pmu_enable(struct pmu *pmu); | |
ba532500 YZ |
624 | extern void perf_sched_cb_dec(struct pmu *pmu); |
625 | extern void perf_sched_cb_inc(struct pmu *pmu); | |
cdd6c482 IM |
626 | extern int perf_event_task_disable(void); |
627 | extern int perf_event_task_enable(void); | |
26ca5c11 | 628 | extern int perf_event_refresh(struct perf_event *event, int refresh); |
cdd6c482 | 629 | extern void perf_event_update_userpage(struct perf_event *event); |
fb0459d7 AV |
630 | extern int perf_event_release_kernel(struct perf_event *event); |
631 | extern struct perf_event * | |
632 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | |
633 | int cpu, | |
38a81da2 | 634 | struct task_struct *task, |
4dc0da86 AK |
635 | perf_overflow_handler_t callback, |
636 | void *context); | |
0cda4c02 YZ |
637 | extern void perf_pmu_migrate_context(struct pmu *pmu, |
638 | int src_cpu, int dst_cpu); | |
59ed446f PZ |
639 | extern u64 perf_event_read_value(struct perf_event *event, |
640 | u64 *enabled, u64 *running); | |
5c92d124 | 641 | |
d010b332 | 642 | |
df1a132b | 643 | struct perf_sample_data { |
2565711f PZ |
644 | /* |
645 | * Fields set by perf_sample_data_init(), group so as to | |
646 | * minimize the cachelines touched. | |
647 | */ | |
648 | u64 addr; | |
649 | struct perf_raw_record *raw; | |
650 | struct perf_branch_stack *br_stack; | |
651 | u64 period; | |
652 | u64 weight; | |
653 | u64 txn; | |
654 | union perf_mem_data_src data_src; | |
5622f295 | 655 | |
2565711f PZ |
656 | /* |
657 | * The other fields, optionally {set,used} by | |
658 | * perf_{prepare,output}_sample(). | |
659 | */ | |
660 | u64 type; | |
5622f295 MM |
661 | u64 ip; |
662 | struct { | |
663 | u32 pid; | |
664 | u32 tid; | |
665 | } tid_entry; | |
666 | u64 time; | |
5622f295 MM |
667 | u64 id; |
668 | u64 stream_id; | |
669 | struct { | |
670 | u32 cpu; | |
671 | u32 reserved; | |
672 | } cpu_entry; | |
5622f295 | 673 | struct perf_callchain_entry *callchain; |
88a7c26a AL |
674 | |
675 | /* | |
676 | * regs_user may point to task_pt_regs or to regs_user_copy, depending | |
677 | * on arch details. | |
678 | */ | |
60e2364e | 679 | struct perf_regs regs_user; |
88a7c26a AL |
680 | struct pt_regs regs_user_copy; |
681 | ||
60e2364e | 682 | struct perf_regs regs_intr; |
c5ebcedb | 683 | u64 stack_user_size; |
2565711f | 684 | } ____cacheline_aligned; |
df1a132b | 685 | |
770eee1f SE |
686 | /* default value for data source */ |
687 | #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ | |
688 | PERF_MEM_S(LVL, NA) |\ | |
689 | PERF_MEM_S(SNOOP, NA) |\ | |
690 | PERF_MEM_S(LOCK, NA) |\ | |
691 | PERF_MEM_S(TLB, NA)) | |
692 | ||
fd0d000b RR |
693 | static inline void perf_sample_data_init(struct perf_sample_data *data, |
694 | u64 addr, u64 period) | |
dc1d628a | 695 | { |
fd0d000b | 696 | /* remaining struct members initialized in perf_prepare_sample() */ |
dc1d628a PZ |
697 | data->addr = addr; |
698 | data->raw = NULL; | |
bce38cd5 | 699 | data->br_stack = NULL; |
4018994f | 700 | data->period = period; |
c3feedf2 | 701 | data->weight = 0; |
770eee1f | 702 | data->data_src.val = PERF_MEM_NA; |
fdfbbd07 | 703 | data->txn = 0; |
dc1d628a PZ |
704 | } |
705 | ||
5622f295 MM |
706 | extern void perf_output_sample(struct perf_output_handle *handle, |
707 | struct perf_event_header *header, | |
708 | struct perf_sample_data *data, | |
cdd6c482 | 709 | struct perf_event *event); |
5622f295 MM |
710 | extern void perf_prepare_sample(struct perf_event_header *header, |
711 | struct perf_sample_data *data, | |
cdd6c482 | 712 | struct perf_event *event, |
5622f295 MM |
713 | struct pt_regs *regs); |
714 | ||
a8b0ca17 | 715 | extern int perf_event_overflow(struct perf_event *event, |
5622f295 MM |
716 | struct perf_sample_data *data, |
717 | struct pt_regs *regs); | |
df1a132b | 718 | |
6c7e550f FBH |
719 | static inline bool is_sampling_event(struct perf_event *event) |
720 | { | |
721 | return event->attr.sample_period != 0; | |
722 | } | |
723 | ||
3b6f9e5c | 724 | /* |
cdd6c482 | 725 | * Return 1 for a software event, 0 for a hardware event |
3b6f9e5c | 726 | */ |
cdd6c482 | 727 | static inline int is_software_event(struct perf_event *event) |
3b6f9e5c | 728 | { |
89a1e187 | 729 | return event->pmu->task_ctx_nr == perf_sw_context; |
3b6f9e5c PM |
730 | } |
731 | ||
c5905afb | 732 | extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
f29ac756 | 733 | |
86038c5e | 734 | extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); |
a8b0ca17 | 735 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); |
f29ac756 | 736 | |
b0f82b81 | 737 | #ifndef perf_arch_fetch_caller_regs |
e7e7ee2e | 738 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
b0f82b81 | 739 | #endif |
5331d7b8 FW |
740 | |
741 | /* | |
742 | * Take a snapshot of the regs. Skip ip and frame pointer to | |
743 | * the nth caller. We only need a few of the regs: | |
744 | * - ip for PERF_SAMPLE_IP | |
745 | * - cs for user_mode() tests | |
746 | * - bp for callchains | |
747 | * - eflags, for future purposes, just in case | |
748 | */ | |
b0f82b81 | 749 | static inline void perf_fetch_caller_regs(struct pt_regs *regs) |
5331d7b8 | 750 | { |
5331d7b8 FW |
751 | memset(regs, 0, sizeof(*regs)); |
752 | ||
b0f82b81 | 753 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); |
5331d7b8 FW |
754 | } |
755 | ||
7e54a5a0 | 756 | static __always_inline void |
a8b0ca17 | 757 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
e49a5bd3 | 758 | { |
86038c5e PZI |
759 | if (static_key_false(&perf_swevent_enabled[event_id])) |
760 | __perf_sw_event(event_id, nr, regs, addr); | |
761 | } | |
762 | ||
763 | DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); | |
7e54a5a0 | 764 | |
86038c5e PZI |
765 | /* |
766 | * 'Special' version for the scheduler, it hard assumes no recursion, | |
767 | * which is guaranteed by us not actually scheduling inside other swevents | |
768 | * because those disable preemption. | |
769 | */ | |
770 | static __always_inline void | |
771 | perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) | |
772 | { | |
c5905afb | 773 | if (static_key_false(&perf_swevent_enabled[event_id])) { |
86038c5e PZI |
774 | struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); |
775 | ||
776 | perf_fetch_caller_regs(regs); | |
777 | ___perf_sw_event(event_id, nr, regs, addr); | |
e49a5bd3 FW |
778 | } |
779 | } | |
780 | ||
c5905afb | 781 | extern struct static_key_deferred perf_sched_events; |
ee6dcfa4 | 782 | |
ab0cce56 | 783 | static inline void perf_event_task_sched_in(struct task_struct *prev, |
a8d757ef | 784 | struct task_struct *task) |
ab0cce56 JO |
785 | { |
786 | if (static_key_false(&perf_sched_events.key)) | |
787 | __perf_event_task_sched_in(prev, task); | |
788 | } | |
789 | ||
790 | static inline void perf_event_task_sched_out(struct task_struct *prev, | |
791 | struct task_struct *next) | |
ee6dcfa4 | 792 | { |
86038c5e | 793 | perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); |
ee6dcfa4 | 794 | |
c5905afb | 795 | if (static_key_false(&perf_sched_events.key)) |
ab0cce56 | 796 | __perf_event_task_sched_out(prev, next); |
ee6dcfa4 PZ |
797 | } |
798 | ||
eacd3ecc MF |
799 | static inline u64 __perf_event_count(struct perf_event *event) |
800 | { | |
801 | return local64_read(&event->count) + atomic64_read(&event->child_count); | |
802 | } | |
803 | ||
3af9e859 | 804 | extern void perf_event_mmap(struct vm_area_struct *vma); |
39447b38 | 805 | extern struct perf_guest_info_callbacks *perf_guest_cbs; |
dcf46b94 ZY |
806 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
807 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | |
39447b38 | 808 | |
e041e328 | 809 | extern void perf_event_exec(void); |
82b89778 | 810 | extern void perf_event_comm(struct task_struct *tsk, bool exec); |
cdd6c482 | 811 | extern void perf_event_fork(struct task_struct *tsk); |
8d1b2d93 | 812 | |
56962b44 FW |
813 | /* Callchains */ |
814 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | |
815 | ||
e7e7ee2e IM |
816 | extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); |
817 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); | |
394ee076 | 818 | |
e7e7ee2e | 819 | static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) |
70791ce9 FW |
820 | { |
821 | if (entry->nr < PERF_MAX_STACK_DEPTH) | |
822 | entry->ip[entry->nr++] = ip; | |
823 | } | |
394ee076 | 824 | |
cdd6c482 IM |
825 | extern int sysctl_perf_event_paranoid; |
826 | extern int sysctl_perf_event_mlock; | |
827 | extern int sysctl_perf_event_sample_rate; | |
14c63f17 DH |
828 | extern int sysctl_perf_cpu_time_max_percent; |
829 | ||
830 | extern void perf_sample_event_took(u64 sample_len_ns); | |
1ccd1549 | 831 | |
163ec435 PZ |
832 | extern int perf_proc_update_handler(struct ctl_table *table, int write, |
833 | void __user *buffer, size_t *lenp, | |
834 | loff_t *ppos); | |
14c63f17 DH |
835 | extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, |
836 | void __user *buffer, size_t *lenp, | |
837 | loff_t *ppos); | |
838 | ||
163ec435 | 839 | |
320ebf09 PZ |
840 | static inline bool perf_paranoid_tracepoint_raw(void) |
841 | { | |
842 | return sysctl_perf_event_paranoid > -1; | |
843 | } | |
844 | ||
845 | static inline bool perf_paranoid_cpu(void) | |
846 | { | |
847 | return sysctl_perf_event_paranoid > 0; | |
848 | } | |
849 | ||
850 | static inline bool perf_paranoid_kernel(void) | |
851 | { | |
852 | return sysctl_perf_event_paranoid > 1; | |
853 | } | |
854 | ||
cdd6c482 | 855 | extern void perf_event_init(void); |
1c024eca PZ |
856 | extern void perf_tp_event(u64 addr, u64 count, void *record, |
857 | int entry_size, struct pt_regs *regs, | |
e6dab5ff AV |
858 | struct hlist_head *head, int rctx, |
859 | struct task_struct *task); | |
24f1e32c | 860 | extern void perf_bp_event(struct perf_event *event, void *data); |
0d905bca | 861 | |
9d23a90a | 862 | #ifndef perf_misc_flags |
e7e7ee2e IM |
863 | # define perf_misc_flags(regs) \ |
864 | (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) | |
865 | # define perf_instruction_pointer(regs) instruction_pointer(regs) | |
9d23a90a PM |
866 | #endif |
867 | ||
bce38cd5 SE |
868 | static inline bool has_branch_stack(struct perf_event *event) |
869 | { | |
870 | return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; | |
a46a2300 YZ |
871 | } |
872 | ||
873 | static inline bool needs_branch_stack(struct perf_event *event) | |
874 | { | |
875 | return event->attr.branch_sample_type != 0; | |
bce38cd5 SE |
876 | } |
877 | ||
45bfb2e5 PZ |
878 | static inline bool has_aux(struct perf_event *event) |
879 | { | |
880 | return event->pmu->setup_aux; | |
881 | } | |
882 | ||
5622f295 | 883 | extern int perf_output_begin(struct perf_output_handle *handle, |
a7ac67ea | 884 | struct perf_event *event, unsigned int size); |
5622f295 | 885 | extern void perf_output_end(struct perf_output_handle *handle); |
91d7753a | 886 | extern unsigned int perf_output_copy(struct perf_output_handle *handle, |
5622f295 | 887 | const void *buf, unsigned int len); |
5685e0ff JO |
888 | extern unsigned int perf_output_skip(struct perf_output_handle *handle, |
889 | unsigned int len); | |
4ed7c92d PZ |
890 | extern int perf_swevent_get_recursion_context(void); |
891 | extern void perf_swevent_put_recursion_context(int rctx); | |
ab573844 | 892 | extern u64 perf_swevent_set_period(struct perf_event *event); |
44234adc FW |
893 | extern void perf_event_enable(struct perf_event *event); |
894 | extern void perf_event_disable(struct perf_event *event); | |
500ad2d8 | 895 | extern int __perf_event_disable(void *info); |
e9d2b064 | 896 | extern void perf_event_task_tick(void); |
e041e328 | 897 | #else /* !CONFIG_PERF_EVENTS: */ |
0793a61d | 898 | static inline void |
ab0cce56 JO |
899 | perf_event_task_sched_in(struct task_struct *prev, |
900 | struct task_struct *task) { } | |
901 | static inline void | |
902 | perf_event_task_sched_out(struct task_struct *prev, | |
903 | struct task_struct *next) { } | |
cdd6c482 IM |
904 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
905 | static inline void perf_event_exit_task(struct task_struct *child) { } | |
906 | static inline void perf_event_free_task(struct task_struct *task) { } | |
4e231c79 | 907 | static inline void perf_event_delayed_put(struct task_struct *task) { } |
57c0c15b | 908 | static inline void perf_event_print_debug(void) { } |
57c0c15b IM |
909 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
910 | static inline int perf_event_task_enable(void) { return -EINVAL; } | |
26ca5c11 AK |
911 | static inline int perf_event_refresh(struct perf_event *event, int refresh) |
912 | { | |
913 | return -EINVAL; | |
914 | } | |
15dbf27c | 915 | |
925d519a | 916 | static inline void |
a8b0ca17 | 917 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } |
24f1e32c | 918 | static inline void |
86038c5e PZI |
919 | perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { } |
920 | static inline void | |
184f412c | 921 | perf_bp_event(struct perf_event *event, void *data) { } |
0a4a9391 | 922 | |
39447b38 | 923 | static inline int perf_register_guest_info_callbacks |
e7e7ee2e | 924 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
39447b38 | 925 | static inline int perf_unregister_guest_info_callbacks |
e7e7ee2e | 926 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
39447b38 | 927 | |
57c0c15b | 928 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
e041e328 | 929 | static inline void perf_event_exec(void) { } |
82b89778 | 930 | static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } |
cdd6c482 IM |
931 | static inline void perf_event_fork(struct task_struct *tsk) { } |
932 | static inline void perf_event_init(void) { } | |
184f412c | 933 | static inline int perf_swevent_get_recursion_context(void) { return -1; } |
4ed7c92d | 934 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
ab573844 | 935 | static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } |
44234adc FW |
936 | static inline void perf_event_enable(struct perf_event *event) { } |
937 | static inline void perf_event_disable(struct perf_event *event) { } | |
500ad2d8 | 938 | static inline int __perf_event_disable(void *info) { return -1; } |
e9d2b064 | 939 | static inline void perf_event_task_tick(void) { } |
0793a61d TG |
940 | #endif |
941 | ||
026249ef FW |
942 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL) |
943 | extern bool perf_event_can_stop_tick(void); | |
944 | #else | |
945 | static inline bool perf_event_can_stop_tick(void) { return true; } | |
946 | #endif | |
947 | ||
6c4d3bc9 DR |
948 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) |
949 | extern void perf_restore_debug_store(void); | |
950 | #else | |
1d9d8639 | 951 | static inline void perf_restore_debug_store(void) { } |
0793a61d TG |
952 | #endif |
953 | ||
e7e7ee2e | 954 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) |
5622f295 | 955 | |
3f6da390 | 956 | /* |
0a0fca9d | 957 | * This has to have a higher priority than migration_notifier in sched/core.c. |
3f6da390 | 958 | */ |
e7e7ee2e IM |
959 | #define perf_cpu_notifier(fn) \ |
960 | do { \ | |
0db0628d | 961 | static struct notifier_block fn##_nb = \ |
e7e7ee2e | 962 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ |
c13d38e4 | 963 | unsigned long cpu = smp_processor_id(); \ |
6760bca9 | 964 | unsigned long flags; \ |
f0bdb5e0 SB |
965 | \ |
966 | cpu_notifier_register_begin(); \ | |
e7e7ee2e | 967 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ |
c13d38e4 | 968 | (void *)(unsigned long)cpu); \ |
6760bca9 | 969 | local_irq_save(flags); \ |
e7e7ee2e | 970 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ |
c13d38e4 | 971 | (void *)(unsigned long)cpu); \ |
6760bca9 | 972 | local_irq_restore(flags); \ |
e7e7ee2e | 973 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ |
c13d38e4 | 974 | (void *)(unsigned long)cpu); \ |
f0bdb5e0 SB |
975 | __register_cpu_notifier(&fn##_nb); \ |
976 | cpu_notifier_register_done(); \ | |
3f6da390 PZ |
977 | } while (0) |
978 | ||
f0bdb5e0 SB |
979 | /* |
980 | * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the | |
981 | * callback for already online CPUs. | |
982 | */ | |
983 | #define __perf_cpu_notifier(fn) \ | |
984 | do { \ | |
985 | static struct notifier_block fn##_nb = \ | |
986 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ | |
987 | \ | |
988 | __register_cpu_notifier(&fn##_nb); \ | |
989 | } while (0) | |
641cc938 | 990 | |
2663960c SB |
991 | struct perf_pmu_events_attr { |
992 | struct device_attribute attr; | |
993 | u64 id; | |
3a54aaa0 | 994 | const char *event_str; |
2663960c SB |
995 | }; |
996 | ||
fd979c01 CS |
997 | ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, |
998 | char *page); | |
999 | ||
2663960c SB |
1000 | #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ |
1001 | static struct perf_pmu_events_attr _var = { \ | |
1002 | .attr = __ATTR(_name, 0444, _show, NULL), \ | |
1003 | .id = _id, \ | |
1004 | }; | |
1005 | ||
f0405b81 CS |
1006 | #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \ |
1007 | static struct perf_pmu_events_attr _var = { \ | |
1008 | .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ | |
1009 | .id = 0, \ | |
1010 | .event_str = _str, \ | |
1011 | }; | |
1012 | ||
641cc938 JO |
1013 | #define PMU_FORMAT_ATTR(_name, _format) \ |
1014 | static ssize_t \ | |
1015 | _name##_show(struct device *dev, \ | |
1016 | struct device_attribute *attr, \ | |
1017 | char *page) \ | |
1018 | { \ | |
1019 | BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ | |
1020 | return sprintf(page, _format "\n"); \ | |
1021 | } \ | |
1022 | \ | |
1023 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) | |
1024 | ||
cdd6c482 | 1025 | #endif /* _LINUX_PERF_EVENT_H */ |