| 1 | /* |
| 2 | * Performance events: |
| 3 | * |
| 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra |
| 7 | * |
| 8 | * Data type definitions, declarations, prototypes. |
| 9 | * |
| 10 | * Started by: Thomas Gleixner and Ingo Molnar |
| 11 | * |
| 12 | * For licencing details see kernel-base/COPYING |
| 13 | */ |
| 14 | #ifndef _LINUX_PERF_EVENT_H |
| 15 | #define _LINUX_PERF_EVENT_H |
| 16 | |
| 17 | #include <uapi/linux/perf_event.h> |
| 18 | |
| 19 | /* |
| 20 | * Kernel-internal data types and definitions: |
| 21 | */ |
| 22 | |
| 23 | #ifdef CONFIG_PERF_EVENTS |
| 24 | # include <asm/perf_event.h> |
| 25 | # include <asm/local64.h> |
| 26 | #endif |
| 27 | |
| 28 | struct perf_guest_info_callbacks { |
| 29 | int (*is_in_guest)(void); |
| 30 | int (*is_user_mode)(void); |
| 31 | unsigned long (*get_guest_ip)(void); |
| 32 | }; |
| 33 | |
| 34 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 35 | #include <asm/hw_breakpoint.h> |
| 36 | #endif |
| 37 | |
| 38 | #include <linux/list.h> |
| 39 | #include <linux/mutex.h> |
| 40 | #include <linux/rculist.h> |
| 41 | #include <linux/rcupdate.h> |
| 42 | #include <linux/spinlock.h> |
| 43 | #include <linux/hrtimer.h> |
| 44 | #include <linux/fs.h> |
| 45 | #include <linux/pid_namespace.h> |
| 46 | #include <linux/workqueue.h> |
| 47 | #include <linux/ftrace.h> |
| 48 | #include <linux/cpu.h> |
| 49 | #include <linux/irq_work.h> |
| 50 | #include <linux/static_key.h> |
| 51 | #include <linux/jump_label_ratelimit.h> |
| 52 | #include <linux/atomic.h> |
| 53 | #include <linux/sysfs.h> |
| 54 | #include <linux/perf_regs.h> |
| 55 | #include <linux/workqueue.h> |
| 56 | #include <linux/cgroup.h> |
| 57 | #include <asm/local.h> |
| 58 | |
| 59 | struct perf_callchain_entry { |
| 60 | __u64 nr; |
| 61 | __u64 ip[PERF_MAX_STACK_DEPTH]; |
| 62 | }; |
| 63 | |
| 64 | struct perf_raw_record { |
| 65 | u32 size; |
| 66 | void *data; |
| 67 | }; |
| 68 | |
| 69 | /* |
| 70 | * branch stack layout: |
| 71 | * nr: number of taken branches stored in entries[] |
| 72 | * |
| 73 | * Note that nr can vary from sample to sample |
| 74 | * branches (to, from) are stored from most recent |
| 75 | * to least recent, i.e., entries[0] contains the most |
| 76 | * recent branch. |
| 77 | */ |
| 78 | struct perf_branch_stack { |
| 79 | __u64 nr; |
| 80 | struct perf_branch_entry entries[0]; |
| 81 | }; |
| 82 | |
| 83 | struct task_struct; |
| 84 | |
| 85 | /* |
| 86 | * extra PMU register associated with an event |
| 87 | */ |
| 88 | struct hw_perf_event_extra { |
| 89 | u64 config; /* register value */ |
| 90 | unsigned int reg; /* register address or index */ |
| 91 | int alloc; /* extra register already allocated */ |
| 92 | int idx; /* index in shared_regs->regs[] */ |
| 93 | }; |
| 94 | |
| 95 | /** |
| 96 | * struct hw_perf_event - performance event hardware details: |
| 97 | */ |
| 98 | struct hw_perf_event { |
| 99 | #ifdef CONFIG_PERF_EVENTS |
| 100 | union { |
| 101 | struct { /* hardware */ |
| 102 | u64 config; |
| 103 | u64 last_tag; |
| 104 | unsigned long config_base; |
| 105 | unsigned long event_base; |
| 106 | int event_base_rdpmc; |
| 107 | int idx; |
| 108 | int last_cpu; |
| 109 | int flags; |
| 110 | |
| 111 | struct hw_perf_event_extra extra_reg; |
| 112 | struct hw_perf_event_extra branch_reg; |
| 113 | }; |
| 114 | struct { /* software */ |
| 115 | struct hrtimer hrtimer; |
| 116 | }; |
| 117 | struct { /* tracepoint */ |
| 118 | /* for tp_event->class */ |
| 119 | struct list_head tp_list; |
| 120 | }; |
| 121 | struct { /* intel_cqm */ |
| 122 | int cqm_state; |
| 123 | u32 cqm_rmid; |
| 124 | struct list_head cqm_events_entry; |
| 125 | struct list_head cqm_groups_entry; |
| 126 | struct list_head cqm_group_entry; |
| 127 | }; |
| 128 | struct { /* itrace */ |
| 129 | int itrace_started; |
| 130 | }; |
| 131 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 132 | struct { /* breakpoint */ |
| 133 | /* |
| 134 | * Crufty hack to avoid the chicken and egg |
| 135 | * problem hw_breakpoint has with context |
| 136 | * creation and event initalization. |
| 137 | */ |
| 138 | struct arch_hw_breakpoint info; |
| 139 | struct list_head bp_list; |
| 140 | }; |
| 141 | #endif |
| 142 | }; |
| 143 | struct task_struct *target; |
| 144 | int state; |
| 145 | local64_t prev_count; |
| 146 | u64 sample_period; |
| 147 | u64 last_period; |
| 148 | local64_t period_left; |
| 149 | u64 interrupts_seq; |
| 150 | u64 interrupts; |
| 151 | |
| 152 | u64 freq_time_stamp; |
| 153 | u64 freq_count_stamp; |
| 154 | #endif |
| 155 | }; |
| 156 | |
| 157 | /* |
| 158 | * hw_perf_event::state flags |
| 159 | */ |
| 160 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ |
| 161 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ |
| 162 | #define PERF_HES_ARCH 0x04 |
| 163 | |
| 164 | struct perf_event; |
| 165 | |
| 166 | /* |
| 167 | * Common implementation detail of pmu::{start,commit,cancel}_txn |
| 168 | */ |
| 169 | #define PERF_EVENT_TXN 0x1 |
| 170 | |
| 171 | /** |
| 172 | * pmu::capabilities flags |
| 173 | */ |
| 174 | #define PERF_PMU_CAP_NO_INTERRUPT 0x01 |
| 175 | #define PERF_PMU_CAP_NO_NMI 0x02 |
| 176 | #define PERF_PMU_CAP_AUX_NO_SG 0x04 |
| 177 | #define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08 |
| 178 | #define PERF_PMU_CAP_EXCLUSIVE 0x10 |
| 179 | #define PERF_PMU_CAP_ITRACE 0x20 |
| 180 | |
| 181 | /** |
| 182 | * struct pmu - generic performance monitoring unit |
| 183 | */ |
| 184 | struct pmu { |
| 185 | struct list_head entry; |
| 186 | |
| 187 | struct module *module; |
| 188 | struct device *dev; |
| 189 | const struct attribute_group **attr_groups; |
| 190 | const char *name; |
| 191 | int type; |
| 192 | |
| 193 | /* |
| 194 | * various common per-pmu feature flags |
| 195 | */ |
| 196 | int capabilities; |
| 197 | |
| 198 | int * __percpu pmu_disable_count; |
| 199 | struct perf_cpu_context * __percpu pmu_cpu_context; |
| 200 | atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ |
| 201 | int task_ctx_nr; |
| 202 | int hrtimer_interval_ms; |
| 203 | |
| 204 | /* |
| 205 | * Fully disable/enable this PMU, can be used to protect from the PMI |
| 206 | * as well as for lazy/batch writing of the MSRs. |
| 207 | */ |
| 208 | void (*pmu_enable) (struct pmu *pmu); /* optional */ |
| 209 | void (*pmu_disable) (struct pmu *pmu); /* optional */ |
| 210 | |
| 211 | /* |
| 212 | * Try and initialize the event for this PMU. |
| 213 | * Should return -ENOENT when the @event doesn't match this PMU. |
| 214 | */ |
| 215 | int (*event_init) (struct perf_event *event); |
| 216 | |
| 217 | /* |
| 218 | * Notification that the event was mapped or unmapped. Called |
| 219 | * in the context of the mapping task. |
| 220 | */ |
| 221 | void (*event_mapped) (struct perf_event *event); /*optional*/ |
| 222 | void (*event_unmapped) (struct perf_event *event); /*optional*/ |
| 223 | |
| 224 | #define PERF_EF_START 0x01 /* start the counter when adding */ |
| 225 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ |
| 226 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ |
| 227 | |
| 228 | /* |
| 229 | * Adds/Removes a counter to/from the PMU, can be done inside |
| 230 | * a transaction, see the ->*_txn() methods. |
| 231 | */ |
| 232 | int (*add) (struct perf_event *event, int flags); |
| 233 | void (*del) (struct perf_event *event, int flags); |
| 234 | |
| 235 | /* |
| 236 | * Starts/Stops a counter present on the PMU. The PMI handler |
| 237 | * should stop the counter when perf_event_overflow() returns |
| 238 | * !0. ->start() will be used to continue. |
| 239 | */ |
| 240 | void (*start) (struct perf_event *event, int flags); |
| 241 | void (*stop) (struct perf_event *event, int flags); |
| 242 | |
| 243 | /* |
| 244 | * Updates the counter value of the event. |
| 245 | */ |
| 246 | void (*read) (struct perf_event *event); |
| 247 | |
| 248 | /* |
| 249 | * Group events scheduling is treated as a transaction, add |
| 250 | * group events as a whole and perform one schedulability test. |
| 251 | * If the test fails, roll back the whole group |
| 252 | * |
| 253 | * Start the transaction, after this ->add() doesn't need to |
| 254 | * do schedulability tests. |
| 255 | */ |
| 256 | void (*start_txn) (struct pmu *pmu); /* optional */ |
| 257 | /* |
| 258 | * If ->start_txn() disabled the ->add() schedulability test |
| 259 | * then ->commit_txn() is required to perform one. On success |
| 260 | * the transaction is closed. On error the transaction is kept |
| 261 | * open until ->cancel_txn() is called. |
| 262 | */ |
| 263 | int (*commit_txn) (struct pmu *pmu); /* optional */ |
| 264 | /* |
| 265 | * Will cancel the transaction, assumes ->del() is called |
| 266 | * for each successful ->add() during the transaction. |
| 267 | */ |
| 268 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
| 269 | |
| 270 | /* |
| 271 | * Will return the value for perf_event_mmap_page::index for this event, |
| 272 | * if no implementation is provided it will default to: event->hw.idx + 1. |
| 273 | */ |
| 274 | int (*event_idx) (struct perf_event *event); /*optional */ |
| 275 | |
| 276 | /* |
| 277 | * context-switches callback |
| 278 | */ |
| 279 | void (*sched_task) (struct perf_event_context *ctx, |
| 280 | bool sched_in); |
| 281 | /* |
| 282 | * PMU specific data size |
| 283 | */ |
| 284 | size_t task_ctx_size; |
| 285 | |
| 286 | |
| 287 | /* |
| 288 | * Return the count value for a counter. |
| 289 | */ |
| 290 | u64 (*count) (struct perf_event *event); /*optional*/ |
| 291 | |
| 292 | /* |
| 293 | * Set up pmu-private data structures for an AUX area |
| 294 | */ |
| 295 | void *(*setup_aux) (int cpu, void **pages, |
| 296 | int nr_pages, bool overwrite); |
| 297 | /* optional */ |
| 298 | |
| 299 | /* |
| 300 | * Free pmu-private AUX data structures |
| 301 | */ |
| 302 | void (*free_aux) (void *aux); /* optional */ |
| 303 | |
| 304 | /* |
| 305 | * Filter events for PMU-specific reasons. |
| 306 | */ |
| 307 | int (*filter_match) (struct perf_event *event); /* optional */ |
| 308 | }; |
| 309 | |
| 310 | /** |
| 311 | * enum perf_event_active_state - the states of a event |
| 312 | */ |
| 313 | enum perf_event_active_state { |
| 314 | PERF_EVENT_STATE_EXIT = -3, |
| 315 | PERF_EVENT_STATE_ERROR = -2, |
| 316 | PERF_EVENT_STATE_OFF = -1, |
| 317 | PERF_EVENT_STATE_INACTIVE = 0, |
| 318 | PERF_EVENT_STATE_ACTIVE = 1, |
| 319 | }; |
| 320 | |
| 321 | struct file; |
| 322 | struct perf_sample_data; |
| 323 | |
| 324 | typedef void (*perf_overflow_handler_t)(struct perf_event *, |
| 325 | struct perf_sample_data *, |
| 326 | struct pt_regs *regs); |
| 327 | |
| 328 | enum perf_group_flag { |
| 329 | PERF_GROUP_SOFTWARE = 0x1, |
| 330 | }; |
| 331 | |
| 332 | #define SWEVENT_HLIST_BITS 8 |
| 333 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) |
| 334 | |
| 335 | struct swevent_hlist { |
| 336 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; |
| 337 | struct rcu_head rcu_head; |
| 338 | }; |
| 339 | |
| 340 | #define PERF_ATTACH_CONTEXT 0x01 |
| 341 | #define PERF_ATTACH_GROUP 0x02 |
| 342 | #define PERF_ATTACH_TASK 0x04 |
| 343 | #define PERF_ATTACH_TASK_DATA 0x08 |
| 344 | |
| 345 | struct perf_cgroup; |
| 346 | struct ring_buffer; |
| 347 | |
| 348 | /** |
| 349 | * struct perf_event - performance event kernel representation: |
| 350 | */ |
| 351 | struct perf_event { |
| 352 | #ifdef CONFIG_PERF_EVENTS |
| 353 | /* |
| 354 | * entry onto perf_event_context::event_list; |
| 355 | * modifications require ctx->lock |
| 356 | * RCU safe iterations. |
| 357 | */ |
| 358 | struct list_head event_entry; |
| 359 | |
| 360 | /* |
| 361 | * XXX: group_entry and sibling_list should be mutually exclusive; |
| 362 | * either you're a sibling on a group, or you're the group leader. |
| 363 | * Rework the code to always use the same list element. |
| 364 | * |
| 365 | * Locked for modification by both ctx->mutex and ctx->lock; holding |
| 366 | * either sufficies for read. |
| 367 | */ |
| 368 | struct list_head group_entry; |
| 369 | struct list_head sibling_list; |
| 370 | |
| 371 | /* |
| 372 | * We need storage to track the entries in perf_pmu_migrate_context; we |
| 373 | * cannot use the event_entry because of RCU and we want to keep the |
| 374 | * group in tact which avoids us using the other two entries. |
| 375 | */ |
| 376 | struct list_head migrate_entry; |
| 377 | |
| 378 | struct hlist_node hlist_entry; |
| 379 | struct list_head active_entry; |
| 380 | int nr_siblings; |
| 381 | int group_flags; |
| 382 | struct perf_event *group_leader; |
| 383 | struct pmu *pmu; |
| 384 | |
| 385 | enum perf_event_active_state state; |
| 386 | unsigned int attach_state; |
| 387 | local64_t count; |
| 388 | atomic64_t child_count; |
| 389 | |
| 390 | /* |
| 391 | * These are the total time in nanoseconds that the event |
| 392 | * has been enabled (i.e. eligible to run, and the task has |
| 393 | * been scheduled in, if this is a per-task event) |
| 394 | * and running (scheduled onto the CPU), respectively. |
| 395 | * |
| 396 | * They are computed from tstamp_enabled, tstamp_running and |
| 397 | * tstamp_stopped when the event is in INACTIVE or ACTIVE state. |
| 398 | */ |
| 399 | u64 total_time_enabled; |
| 400 | u64 total_time_running; |
| 401 | |
| 402 | /* |
| 403 | * These are timestamps used for computing total_time_enabled |
| 404 | * and total_time_running when the event is in INACTIVE or |
| 405 | * ACTIVE state, measured in nanoseconds from an arbitrary point |
| 406 | * in time. |
| 407 | * tstamp_enabled: the notional time when the event was enabled |
| 408 | * tstamp_running: the notional time when the event was scheduled on |
| 409 | * tstamp_stopped: in INACTIVE state, the notional time when the |
| 410 | * event was scheduled off. |
| 411 | */ |
| 412 | u64 tstamp_enabled; |
| 413 | u64 tstamp_running; |
| 414 | u64 tstamp_stopped; |
| 415 | |
| 416 | /* |
| 417 | * timestamp shadows the actual context timing but it can |
| 418 | * be safely used in NMI interrupt context. It reflects the |
| 419 | * context time as it was when the event was last scheduled in. |
| 420 | * |
| 421 | * ctx_time already accounts for ctx->timestamp. Therefore to |
| 422 | * compute ctx_time for a sample, simply add perf_clock(). |
| 423 | */ |
| 424 | u64 shadow_ctx_time; |
| 425 | |
| 426 | struct perf_event_attr attr; |
| 427 | u16 header_size; |
| 428 | u16 id_header_size; |
| 429 | u16 read_size; |
| 430 | struct hw_perf_event hw; |
| 431 | |
| 432 | struct perf_event_context *ctx; |
| 433 | atomic_long_t refcount; |
| 434 | |
| 435 | /* |
| 436 | * These accumulate total time (in nanoseconds) that children |
| 437 | * events have been enabled and running, respectively. |
| 438 | */ |
| 439 | atomic64_t child_total_time_enabled; |
| 440 | atomic64_t child_total_time_running; |
| 441 | |
| 442 | /* |
| 443 | * Protect attach/detach and child_list: |
| 444 | */ |
| 445 | struct mutex child_mutex; |
| 446 | struct list_head child_list; |
| 447 | struct perf_event *parent; |
| 448 | |
| 449 | int oncpu; |
| 450 | int cpu; |
| 451 | |
| 452 | struct list_head owner_entry; |
| 453 | struct task_struct *owner; |
| 454 | |
| 455 | /* mmap bits */ |
| 456 | struct mutex mmap_mutex; |
| 457 | atomic_t mmap_count; |
| 458 | |
| 459 | struct ring_buffer *rb; |
| 460 | struct list_head rb_entry; |
| 461 | unsigned long rcu_batches; |
| 462 | int rcu_pending; |
| 463 | |
| 464 | /* poll related */ |
| 465 | wait_queue_head_t waitq; |
| 466 | struct fasync_struct *fasync; |
| 467 | |
| 468 | /* delayed work for NMIs and such */ |
| 469 | int pending_wakeup; |
| 470 | int pending_kill; |
| 471 | int pending_disable; |
| 472 | struct irq_work pending; |
| 473 | |
| 474 | atomic_t event_limit; |
| 475 | |
| 476 | void (*destroy)(struct perf_event *); |
| 477 | struct rcu_head rcu_head; |
| 478 | |
| 479 | struct pid_namespace *ns; |
| 480 | u64 id; |
| 481 | |
| 482 | u64 (*clock)(void); |
| 483 | perf_overflow_handler_t overflow_handler; |
| 484 | void *overflow_handler_context; |
| 485 | |
| 486 | #ifdef CONFIG_EVENT_TRACING |
| 487 | struct trace_event_call *tp_event; |
| 488 | struct event_filter *filter; |
| 489 | #ifdef CONFIG_FUNCTION_TRACER |
| 490 | struct ftrace_ops ftrace_ops; |
| 491 | #endif |
| 492 | #endif |
| 493 | |
| 494 | #ifdef CONFIG_CGROUP_PERF |
| 495 | struct perf_cgroup *cgrp; /* cgroup event is attach to */ |
| 496 | int cgrp_defer_enabled; |
| 497 | #endif |
| 498 | |
| 499 | #endif /* CONFIG_PERF_EVENTS */ |
| 500 | }; |
| 501 | |
| 502 | /** |
| 503 | * struct perf_event_context - event context structure |
| 504 | * |
| 505 | * Used as a container for task events and CPU events as well: |
| 506 | */ |
| 507 | struct perf_event_context { |
| 508 | struct pmu *pmu; |
| 509 | /* |
| 510 | * Protect the states of the events in the list, |
| 511 | * nr_active, and the list: |
| 512 | */ |
| 513 | raw_spinlock_t lock; |
| 514 | /* |
| 515 | * Protect the list of events. Locking either mutex or lock |
| 516 | * is sufficient to ensure the list doesn't change; to change |
| 517 | * the list you need to lock both the mutex and the spinlock. |
| 518 | */ |
| 519 | struct mutex mutex; |
| 520 | |
| 521 | struct list_head active_ctx_list; |
| 522 | struct list_head pinned_groups; |
| 523 | struct list_head flexible_groups; |
| 524 | struct list_head event_list; |
| 525 | int nr_events; |
| 526 | int nr_active; |
| 527 | int is_active; |
| 528 | int nr_stat; |
| 529 | int nr_freq; |
| 530 | int rotate_disable; |
| 531 | atomic_t refcount; |
| 532 | struct task_struct *task; |
| 533 | |
| 534 | /* |
| 535 | * Context clock, runs when context enabled. |
| 536 | */ |
| 537 | u64 time; |
| 538 | u64 timestamp; |
| 539 | |
| 540 | /* |
| 541 | * These fields let us detect when two contexts have both |
| 542 | * been cloned (inherited) from a common ancestor. |
| 543 | */ |
| 544 | struct perf_event_context *parent_ctx; |
| 545 | u64 parent_gen; |
| 546 | u64 generation; |
| 547 | int pin_count; |
| 548 | int nr_cgroups; /* cgroup evts */ |
| 549 | void *task_ctx_data; /* pmu specific data */ |
| 550 | struct rcu_head rcu_head; |
| 551 | |
| 552 | struct delayed_work orphans_remove; |
| 553 | bool orphans_remove_sched; |
| 554 | }; |
| 555 | |
| 556 | /* |
| 557 | * Number of contexts where an event can trigger: |
| 558 | * task, softirq, hardirq, nmi. |
| 559 | */ |
| 560 | #define PERF_NR_CONTEXTS 4 |
| 561 | |
| 562 | /** |
| 563 | * struct perf_event_cpu_context - per cpu event context structure |
| 564 | */ |
| 565 | struct perf_cpu_context { |
| 566 | struct perf_event_context ctx; |
| 567 | struct perf_event_context *task_ctx; |
| 568 | int active_oncpu; |
| 569 | int exclusive; |
| 570 | |
| 571 | raw_spinlock_t hrtimer_lock; |
| 572 | struct hrtimer hrtimer; |
| 573 | ktime_t hrtimer_interval; |
| 574 | unsigned int hrtimer_active; |
| 575 | |
| 576 | struct pmu *unique_pmu; |
| 577 | struct perf_cgroup *cgrp; |
| 578 | }; |
| 579 | |
| 580 | struct perf_output_handle { |
| 581 | struct perf_event *event; |
| 582 | struct ring_buffer *rb; |
| 583 | unsigned long wakeup; |
| 584 | unsigned long size; |
| 585 | union { |
| 586 | void *addr; |
| 587 | unsigned long head; |
| 588 | }; |
| 589 | int page; |
| 590 | }; |
| 591 | |
| 592 | #ifdef CONFIG_CGROUP_PERF |
| 593 | |
| 594 | /* |
| 595 | * perf_cgroup_info keeps track of time_enabled for a cgroup. |
| 596 | * This is a per-cpu dynamically allocated data structure. |
| 597 | */ |
| 598 | struct perf_cgroup_info { |
| 599 | u64 time; |
| 600 | u64 timestamp; |
| 601 | }; |
| 602 | |
| 603 | struct perf_cgroup { |
| 604 | struct cgroup_subsys_state css; |
| 605 | struct perf_cgroup_info __percpu *info; |
| 606 | }; |
| 607 | |
| 608 | /* |
| 609 | * Must ensure cgroup is pinned (css_get) before calling |
| 610 | * this function. In other words, we cannot call this function |
| 611 | * if there is no cgroup event for the current CPU context. |
| 612 | */ |
| 613 | static inline struct perf_cgroup * |
| 614 | perf_cgroup_from_task(struct task_struct *task) |
| 615 | { |
| 616 | return container_of(task_css(task, perf_event_cgrp_id), |
| 617 | struct perf_cgroup, css); |
| 618 | } |
| 619 | #endif /* CONFIG_CGROUP_PERF */ |
| 620 | |
| 621 | #ifdef CONFIG_PERF_EVENTS |
| 622 | |
| 623 | extern void *perf_aux_output_begin(struct perf_output_handle *handle, |
| 624 | struct perf_event *event); |
| 625 | extern void perf_aux_output_end(struct perf_output_handle *handle, |
| 626 | unsigned long size, bool truncated); |
| 627 | extern int perf_aux_output_skip(struct perf_output_handle *handle, |
| 628 | unsigned long size); |
| 629 | extern void *perf_get_aux(struct perf_output_handle *handle); |
| 630 | |
| 631 | extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); |
| 632 | extern void perf_pmu_unregister(struct pmu *pmu); |
| 633 | |
| 634 | extern int perf_num_counters(void); |
| 635 | extern const char *perf_pmu_name(void); |
| 636 | extern void __perf_event_task_sched_in(struct task_struct *prev, |
| 637 | struct task_struct *task); |
| 638 | extern void __perf_event_task_sched_out(struct task_struct *prev, |
| 639 | struct task_struct *next); |
| 640 | extern int perf_event_init_task(struct task_struct *child); |
| 641 | extern void perf_event_exit_task(struct task_struct *child); |
| 642 | extern void perf_event_free_task(struct task_struct *task); |
| 643 | extern void perf_event_delayed_put(struct task_struct *task); |
| 644 | extern struct perf_event *perf_event_get(unsigned int fd); |
| 645 | extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); |
| 646 | extern void perf_event_print_debug(void); |
| 647 | extern void perf_pmu_disable(struct pmu *pmu); |
| 648 | extern void perf_pmu_enable(struct pmu *pmu); |
| 649 | extern void perf_sched_cb_dec(struct pmu *pmu); |
| 650 | extern void perf_sched_cb_inc(struct pmu *pmu); |
| 651 | extern int perf_event_task_disable(void); |
| 652 | extern int perf_event_task_enable(void); |
| 653 | extern int perf_event_refresh(struct perf_event *event, int refresh); |
| 654 | extern void perf_event_update_userpage(struct perf_event *event); |
| 655 | extern int perf_event_release_kernel(struct perf_event *event); |
| 656 | extern struct perf_event * |
| 657 | perf_event_create_kernel_counter(struct perf_event_attr *attr, |
| 658 | int cpu, |
| 659 | struct task_struct *task, |
| 660 | perf_overflow_handler_t callback, |
| 661 | void *context); |
| 662 | extern void perf_pmu_migrate_context(struct pmu *pmu, |
| 663 | int src_cpu, int dst_cpu); |
| 664 | extern u64 perf_event_read_local(struct perf_event *event); |
| 665 | extern u64 perf_event_read_value(struct perf_event *event, |
| 666 | u64 *enabled, u64 *running); |
| 667 | |
| 668 | |
| 669 | struct perf_sample_data { |
| 670 | /* |
| 671 | * Fields set by perf_sample_data_init(), group so as to |
| 672 | * minimize the cachelines touched. |
| 673 | */ |
| 674 | u64 addr; |
| 675 | struct perf_raw_record *raw; |
| 676 | struct perf_branch_stack *br_stack; |
| 677 | u64 period; |
| 678 | u64 weight; |
| 679 | u64 txn; |
| 680 | union perf_mem_data_src data_src; |
| 681 | |
| 682 | /* |
| 683 | * The other fields, optionally {set,used} by |
| 684 | * perf_{prepare,output}_sample(). |
| 685 | */ |
| 686 | u64 type; |
| 687 | u64 ip; |
| 688 | struct { |
| 689 | u32 pid; |
| 690 | u32 tid; |
| 691 | } tid_entry; |
| 692 | u64 time; |
| 693 | u64 id; |
| 694 | u64 stream_id; |
| 695 | struct { |
| 696 | u32 cpu; |
| 697 | u32 reserved; |
| 698 | } cpu_entry; |
| 699 | struct perf_callchain_entry *callchain; |
| 700 | |
| 701 | /* |
| 702 | * regs_user may point to task_pt_regs or to regs_user_copy, depending |
| 703 | * on arch details. |
| 704 | */ |
| 705 | struct perf_regs regs_user; |
| 706 | struct pt_regs regs_user_copy; |
| 707 | |
| 708 | struct perf_regs regs_intr; |
| 709 | u64 stack_user_size; |
| 710 | } ____cacheline_aligned; |
| 711 | |
| 712 | /* default value for data source */ |
| 713 | #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ |
| 714 | PERF_MEM_S(LVL, NA) |\ |
| 715 | PERF_MEM_S(SNOOP, NA) |\ |
| 716 | PERF_MEM_S(LOCK, NA) |\ |
| 717 | PERF_MEM_S(TLB, NA)) |
| 718 | |
| 719 | static inline void perf_sample_data_init(struct perf_sample_data *data, |
| 720 | u64 addr, u64 period) |
| 721 | { |
| 722 | /* remaining struct members initialized in perf_prepare_sample() */ |
| 723 | data->addr = addr; |
| 724 | data->raw = NULL; |
| 725 | data->br_stack = NULL; |
| 726 | data->period = period; |
| 727 | data->weight = 0; |
| 728 | data->data_src.val = PERF_MEM_NA; |
| 729 | data->txn = 0; |
| 730 | } |
| 731 | |
| 732 | extern void perf_output_sample(struct perf_output_handle *handle, |
| 733 | struct perf_event_header *header, |
| 734 | struct perf_sample_data *data, |
| 735 | struct perf_event *event); |
| 736 | extern void perf_prepare_sample(struct perf_event_header *header, |
| 737 | struct perf_sample_data *data, |
| 738 | struct perf_event *event, |
| 739 | struct pt_regs *regs); |
| 740 | |
| 741 | extern int perf_event_overflow(struct perf_event *event, |
| 742 | struct perf_sample_data *data, |
| 743 | struct pt_regs *regs); |
| 744 | |
| 745 | extern void perf_event_output(struct perf_event *event, |
| 746 | struct perf_sample_data *data, |
| 747 | struct pt_regs *regs); |
| 748 | |
| 749 | extern void |
| 750 | perf_event_header__init_id(struct perf_event_header *header, |
| 751 | struct perf_sample_data *data, |
| 752 | struct perf_event *event); |
| 753 | extern void |
| 754 | perf_event__output_id_sample(struct perf_event *event, |
| 755 | struct perf_output_handle *handle, |
| 756 | struct perf_sample_data *sample); |
| 757 | |
| 758 | extern void |
| 759 | perf_log_lost_samples(struct perf_event *event, u64 lost); |
| 760 | |
| 761 | static inline bool is_sampling_event(struct perf_event *event) |
| 762 | { |
| 763 | return event->attr.sample_period != 0; |
| 764 | } |
| 765 | |
| 766 | /* |
| 767 | * Return 1 for a software event, 0 for a hardware event |
| 768 | */ |
| 769 | static inline int is_software_event(struct perf_event *event) |
| 770 | { |
| 771 | return event->pmu->task_ctx_nr == perf_sw_context; |
| 772 | } |
| 773 | |
| 774 | extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
| 775 | |
| 776 | extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); |
| 777 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); |
| 778 | |
| 779 | #ifndef perf_arch_fetch_caller_regs |
| 780 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
| 781 | #endif |
| 782 | |
| 783 | /* |
| 784 | * Take a snapshot of the regs. Skip ip and frame pointer to |
| 785 | * the nth caller. We only need a few of the regs: |
| 786 | * - ip for PERF_SAMPLE_IP |
| 787 | * - cs for user_mode() tests |
| 788 | * - bp for callchains |
| 789 | * - eflags, for future purposes, just in case |
| 790 | */ |
| 791 | static inline void perf_fetch_caller_regs(struct pt_regs *regs) |
| 792 | { |
| 793 | memset(regs, 0, sizeof(*regs)); |
| 794 | |
| 795 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); |
| 796 | } |
| 797 | |
| 798 | static __always_inline void |
| 799 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
| 800 | { |
| 801 | if (static_key_false(&perf_swevent_enabled[event_id])) |
| 802 | __perf_sw_event(event_id, nr, regs, addr); |
| 803 | } |
| 804 | |
| 805 | DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); |
| 806 | |
| 807 | /* |
| 808 | * 'Special' version for the scheduler, it hard assumes no recursion, |
| 809 | * which is guaranteed by us not actually scheduling inside other swevents |
| 810 | * because those disable preemption. |
| 811 | */ |
| 812 | static __always_inline void |
| 813 | perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) |
| 814 | { |
| 815 | if (static_key_false(&perf_swevent_enabled[event_id])) { |
| 816 | struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); |
| 817 | |
| 818 | perf_fetch_caller_regs(regs); |
| 819 | ___perf_sw_event(event_id, nr, regs, addr); |
| 820 | } |
| 821 | } |
| 822 | |
| 823 | extern struct static_key_deferred perf_sched_events; |
| 824 | |
| 825 | static __always_inline bool |
| 826 | perf_sw_migrate_enabled(void) |
| 827 | { |
| 828 | if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) |
| 829 | return true; |
| 830 | return false; |
| 831 | } |
| 832 | |
| 833 | static inline void perf_event_task_migrate(struct task_struct *task) |
| 834 | { |
| 835 | if (perf_sw_migrate_enabled()) |
| 836 | task->sched_migrated = 1; |
| 837 | } |
| 838 | |
| 839 | static inline void perf_event_task_sched_in(struct task_struct *prev, |
| 840 | struct task_struct *task) |
| 841 | { |
| 842 | if (static_key_false(&perf_sched_events.key)) |
| 843 | __perf_event_task_sched_in(prev, task); |
| 844 | |
| 845 | if (perf_sw_migrate_enabled() && task->sched_migrated) { |
| 846 | struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); |
| 847 | |
| 848 | perf_fetch_caller_regs(regs); |
| 849 | ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); |
| 850 | task->sched_migrated = 0; |
| 851 | } |
| 852 | } |
| 853 | |
| 854 | static inline void perf_event_task_sched_out(struct task_struct *prev, |
| 855 | struct task_struct *next) |
| 856 | { |
| 857 | perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); |
| 858 | |
| 859 | if (static_key_false(&perf_sched_events.key)) |
| 860 | __perf_event_task_sched_out(prev, next); |
| 861 | } |
| 862 | |
| 863 | static inline u64 __perf_event_count(struct perf_event *event) |
| 864 | { |
| 865 | return local64_read(&event->count) + atomic64_read(&event->child_count); |
| 866 | } |
| 867 | |
| 868 | extern void perf_event_mmap(struct vm_area_struct *vma); |
| 869 | extern struct perf_guest_info_callbacks *perf_guest_cbs; |
| 870 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
| 871 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
| 872 | |
| 873 | extern void perf_event_exec(void); |
| 874 | extern void perf_event_comm(struct task_struct *tsk, bool exec); |
| 875 | extern void perf_event_fork(struct task_struct *tsk); |
| 876 | |
| 877 | /* Callchains */ |
| 878 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); |
| 879 | |
| 880 | extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); |
| 881 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); |
| 882 | |
| 883 | static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) |
| 884 | { |
| 885 | if (entry->nr < PERF_MAX_STACK_DEPTH) |
| 886 | entry->ip[entry->nr++] = ip; |
| 887 | } |
| 888 | |
| 889 | extern int sysctl_perf_event_paranoid; |
| 890 | extern int sysctl_perf_event_mlock; |
| 891 | extern int sysctl_perf_event_sample_rate; |
| 892 | extern int sysctl_perf_cpu_time_max_percent; |
| 893 | |
| 894 | extern void perf_sample_event_took(u64 sample_len_ns); |
| 895 | |
| 896 | extern int perf_proc_update_handler(struct ctl_table *table, int write, |
| 897 | void __user *buffer, size_t *lenp, |
| 898 | loff_t *ppos); |
| 899 | extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, |
| 900 | void __user *buffer, size_t *lenp, |
| 901 | loff_t *ppos); |
| 902 | |
| 903 | |
| 904 | static inline bool perf_paranoid_tracepoint_raw(void) |
| 905 | { |
| 906 | return sysctl_perf_event_paranoid > -1; |
| 907 | } |
| 908 | |
| 909 | static inline bool perf_paranoid_cpu(void) |
| 910 | { |
| 911 | return sysctl_perf_event_paranoid > 0; |
| 912 | } |
| 913 | |
| 914 | static inline bool perf_paranoid_kernel(void) |
| 915 | { |
| 916 | return sysctl_perf_event_paranoid > 1; |
| 917 | } |
| 918 | |
| 919 | extern void perf_event_init(void); |
| 920 | extern void perf_tp_event(u64 addr, u64 count, void *record, |
| 921 | int entry_size, struct pt_regs *regs, |
| 922 | struct hlist_head *head, int rctx, |
| 923 | struct task_struct *task); |
| 924 | extern void perf_bp_event(struct perf_event *event, void *data); |
| 925 | |
| 926 | #ifndef perf_misc_flags |
| 927 | # define perf_misc_flags(regs) \ |
| 928 | (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) |
| 929 | # define perf_instruction_pointer(regs) instruction_pointer(regs) |
| 930 | #endif |
| 931 | |
| 932 | static inline bool has_branch_stack(struct perf_event *event) |
| 933 | { |
| 934 | return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; |
| 935 | } |
| 936 | |
| 937 | static inline bool needs_branch_stack(struct perf_event *event) |
| 938 | { |
| 939 | return event->attr.branch_sample_type != 0; |
| 940 | } |
| 941 | |
| 942 | static inline bool has_aux(struct perf_event *event) |
| 943 | { |
| 944 | return event->pmu->setup_aux; |
| 945 | } |
| 946 | |
| 947 | extern int perf_output_begin(struct perf_output_handle *handle, |
| 948 | struct perf_event *event, unsigned int size); |
| 949 | extern void perf_output_end(struct perf_output_handle *handle); |
| 950 | extern unsigned int perf_output_copy(struct perf_output_handle *handle, |
| 951 | const void *buf, unsigned int len); |
| 952 | extern unsigned int perf_output_skip(struct perf_output_handle *handle, |
| 953 | unsigned int len); |
| 954 | extern int perf_swevent_get_recursion_context(void); |
| 955 | extern void perf_swevent_put_recursion_context(int rctx); |
| 956 | extern u64 perf_swevent_set_period(struct perf_event *event); |
| 957 | extern void perf_event_enable(struct perf_event *event); |
| 958 | extern void perf_event_disable(struct perf_event *event); |
| 959 | extern int __perf_event_disable(void *info); |
| 960 | extern void perf_event_task_tick(void); |
| 961 | #else /* !CONFIG_PERF_EVENTS: */ |
| 962 | static inline void * |
| 963 | perf_aux_output_begin(struct perf_output_handle *handle, |
| 964 | struct perf_event *event) { return NULL; } |
| 965 | static inline void |
| 966 | perf_aux_output_end(struct perf_output_handle *handle, unsigned long size, |
| 967 | bool truncated) { } |
| 968 | static inline int |
| 969 | perf_aux_output_skip(struct perf_output_handle *handle, |
| 970 | unsigned long size) { return -EINVAL; } |
| 971 | static inline void * |
| 972 | perf_get_aux(struct perf_output_handle *handle) { return NULL; } |
| 973 | static inline void |
| 974 | perf_event_task_migrate(struct task_struct *task) { } |
| 975 | static inline void |
| 976 | perf_event_task_sched_in(struct task_struct *prev, |
| 977 | struct task_struct *task) { } |
| 978 | static inline void |
| 979 | perf_event_task_sched_out(struct task_struct *prev, |
| 980 | struct task_struct *next) { } |
| 981 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
| 982 | static inline void perf_event_exit_task(struct task_struct *child) { } |
| 983 | static inline void perf_event_free_task(struct task_struct *task) { } |
| 984 | static inline void perf_event_delayed_put(struct task_struct *task) { } |
| 985 | static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } |
| 986 | static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) |
| 987 | { |
| 988 | return ERR_PTR(-EINVAL); |
| 989 | } |
| 990 | static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; } |
| 991 | static inline void perf_event_print_debug(void) { } |
| 992 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
| 993 | static inline int perf_event_task_enable(void) { return -EINVAL; } |
| 994 | static inline int perf_event_refresh(struct perf_event *event, int refresh) |
| 995 | { |
| 996 | return -EINVAL; |
| 997 | } |
| 998 | |
| 999 | static inline void |
| 1000 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } |
| 1001 | static inline void |
| 1002 | perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { } |
| 1003 | static inline void |
| 1004 | perf_bp_event(struct perf_event *event, void *data) { } |
| 1005 | |
| 1006 | static inline int perf_register_guest_info_callbacks |
| 1007 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
| 1008 | static inline int perf_unregister_guest_info_callbacks |
| 1009 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
| 1010 | |
| 1011 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
| 1012 | static inline void perf_event_exec(void) { } |
| 1013 | static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } |
| 1014 | static inline void perf_event_fork(struct task_struct *tsk) { } |
| 1015 | static inline void perf_event_init(void) { } |
| 1016 | static inline int perf_swevent_get_recursion_context(void) { return -1; } |
| 1017 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
| 1018 | static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } |
| 1019 | static inline void perf_event_enable(struct perf_event *event) { } |
| 1020 | static inline void perf_event_disable(struct perf_event *event) { } |
| 1021 | static inline int __perf_event_disable(void *info) { return -1; } |
| 1022 | static inline void perf_event_task_tick(void) { } |
| 1023 | static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } |
| 1024 | #endif |
| 1025 | |
| 1026 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL) |
| 1027 | extern bool perf_event_can_stop_tick(void); |
| 1028 | #else |
| 1029 | static inline bool perf_event_can_stop_tick(void) { return true; } |
| 1030 | #endif |
| 1031 | |
| 1032 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) |
| 1033 | extern void perf_restore_debug_store(void); |
| 1034 | #else |
| 1035 | static inline void perf_restore_debug_store(void) { } |
| 1036 | #endif |
| 1037 | |
| 1038 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) |
| 1039 | |
| 1040 | /* |
| 1041 | * This has to have a higher priority than migration_notifier in sched/core.c. |
| 1042 | */ |
| 1043 | #define perf_cpu_notifier(fn) \ |
| 1044 | do { \ |
| 1045 | static struct notifier_block fn##_nb = \ |
| 1046 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ |
| 1047 | unsigned long cpu = smp_processor_id(); \ |
| 1048 | unsigned long flags; \ |
| 1049 | \ |
| 1050 | cpu_notifier_register_begin(); \ |
| 1051 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ |
| 1052 | (void *)(unsigned long)cpu); \ |
| 1053 | local_irq_save(flags); \ |
| 1054 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ |
| 1055 | (void *)(unsigned long)cpu); \ |
| 1056 | local_irq_restore(flags); \ |
| 1057 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ |
| 1058 | (void *)(unsigned long)cpu); \ |
| 1059 | __register_cpu_notifier(&fn##_nb); \ |
| 1060 | cpu_notifier_register_done(); \ |
| 1061 | } while (0) |
| 1062 | |
| 1063 | /* |
| 1064 | * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the |
| 1065 | * callback for already online CPUs. |
| 1066 | */ |
| 1067 | #define __perf_cpu_notifier(fn) \ |
| 1068 | do { \ |
| 1069 | static struct notifier_block fn##_nb = \ |
| 1070 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ |
| 1071 | \ |
| 1072 | __register_cpu_notifier(&fn##_nb); \ |
| 1073 | } while (0) |
| 1074 | |
| 1075 | struct perf_pmu_events_attr { |
| 1076 | struct device_attribute attr; |
| 1077 | u64 id; |
| 1078 | const char *event_str; |
| 1079 | }; |
| 1080 | |
| 1081 | ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, |
| 1082 | char *page); |
| 1083 | |
| 1084 | #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ |
| 1085 | static struct perf_pmu_events_attr _var = { \ |
| 1086 | .attr = __ATTR(_name, 0444, _show, NULL), \ |
| 1087 | .id = _id, \ |
| 1088 | }; |
| 1089 | |
| 1090 | #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \ |
| 1091 | static struct perf_pmu_events_attr _var = { \ |
| 1092 | .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ |
| 1093 | .id = 0, \ |
| 1094 | .event_str = _str, \ |
| 1095 | }; |
| 1096 | |
| 1097 | #define PMU_FORMAT_ATTR(_name, _format) \ |
| 1098 | static ssize_t \ |
| 1099 | _name##_show(struct device *dev, \ |
| 1100 | struct device_attribute *attr, \ |
| 1101 | char *page) \ |
| 1102 | { \ |
| 1103 | BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ |
| 1104 | return sprintf(page, _format "\n"); \ |
| 1105 | } \ |
| 1106 | \ |
| 1107 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) |
| 1108 | |
| 1109 | #endif /* _LINUX_PERF_EVENT_H */ |