perf/x86/intel/pt: Fix KVM warning due to doing rdmsr() before the CPUID test
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event_intel_pt.c
1 /*
2 * Intel(R) Processor Trace PMU driver for perf
3 * Copyright (c) 2013-2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * Intel PT is specified in the Intel Architecture Instruction Set Extensions
15 * Programming Reference:
16 * http://software.intel.com/en-us/intel-isa-extensions
17 */
18
19 #undef DEBUG
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/types.h>
24 #include <linux/slab.h>
25 #include <linux/device.h>
26
27 #include <asm/perf_event.h>
28 #include <asm/insn.h>
29 #include <asm/io.h>
30
31 #include "perf_event.h"
32 #include "intel_pt.h"
33
34 static DEFINE_PER_CPU(struct pt, pt_ctx);
35
36 static struct pt_pmu pt_pmu;
37
38 enum cpuid_regs {
39 CR_EAX = 0,
40 CR_ECX,
41 CR_EDX,
42 CR_EBX
43 };
44
45 /*
46 * Capabilities of Intel PT hardware, such as number of address bits or
47 * supported output schemes, are cached and exported to userspace as "caps"
48 * attribute group of pt pmu device
49 * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store
50 * relevant bits together with intel_pt traces.
51 *
52 * These are necessary for both trace decoding (payloads_lip, contains address
53 * width encoded in IP-related packets), and event configuration (bitmasks with
54 * permitted values for certain bit fields).
55 */
56 #define PT_CAP(_n, _l, _r, _m) \
57 [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \
58 .reg = _r, .mask = _m }
59
60 static struct pt_cap_desc {
61 const char *name;
62 u32 leaf;
63 u8 reg;
64 u32 mask;
65 } pt_caps[] = {
66 PT_CAP(max_subleaf, 0, CR_EAX, 0xffffffff),
67 PT_CAP(cr3_filtering, 0, CR_EBX, BIT(0)),
68 PT_CAP(psb_cyc, 0, CR_EBX, BIT(1)),
69 PT_CAP(mtc, 0, CR_EBX, BIT(3)),
70 PT_CAP(topa_output, 0, CR_ECX, BIT(0)),
71 PT_CAP(topa_multiple_entries, 0, CR_ECX, BIT(1)),
72 PT_CAP(single_range_output, 0, CR_ECX, BIT(2)),
73 PT_CAP(payloads_lip, 0, CR_ECX, BIT(31)),
74 PT_CAP(mtc_periods, 1, CR_EAX, 0xffff0000),
75 PT_CAP(cycle_thresholds, 1, CR_EBX, 0xffff),
76 PT_CAP(psb_periods, 1, CR_EBX, 0xffff0000),
77 };
78
79 static u32 pt_cap_get(enum pt_capabilities cap)
80 {
81 struct pt_cap_desc *cd = &pt_caps[cap];
82 u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
83 unsigned int shift = __ffs(cd->mask);
84
85 return (c & cd->mask) >> shift;
86 }
87
88 static ssize_t pt_cap_show(struct device *cdev,
89 struct device_attribute *attr,
90 char *buf)
91 {
92 struct dev_ext_attribute *ea =
93 container_of(attr, struct dev_ext_attribute, attr);
94 enum pt_capabilities cap = (long)ea->var;
95
96 return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap));
97 }
98
99 static struct attribute_group pt_cap_group = {
100 .name = "caps",
101 };
102
103 PMU_FORMAT_ATTR(cyc, "config:1" );
104 PMU_FORMAT_ATTR(mtc, "config:9" );
105 PMU_FORMAT_ATTR(tsc, "config:10" );
106 PMU_FORMAT_ATTR(noretcomp, "config:11" );
107 PMU_FORMAT_ATTR(mtc_period, "config:14-17" );
108 PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" );
109 PMU_FORMAT_ATTR(psb_period, "config:24-27" );
110
111 static struct attribute *pt_formats_attr[] = {
112 &format_attr_cyc.attr,
113 &format_attr_mtc.attr,
114 &format_attr_tsc.attr,
115 &format_attr_noretcomp.attr,
116 &format_attr_mtc_period.attr,
117 &format_attr_cyc_thresh.attr,
118 &format_attr_psb_period.attr,
119 NULL,
120 };
121
122 static struct attribute_group pt_format_group = {
123 .name = "format",
124 .attrs = pt_formats_attr,
125 };
126
127 static const struct attribute_group *pt_attr_groups[] = {
128 &pt_cap_group,
129 &pt_format_group,
130 NULL,
131 };
132
133 static int __init pt_pmu_hw_init(void)
134 {
135 struct dev_ext_attribute *de_attrs;
136 struct attribute **attrs;
137 size_t size;
138 int ret;
139 long i;
140
141 attrs = NULL;
142
143 for (i = 0; i < PT_CPUID_LEAVES; i++) {
144 cpuid_count(20, i,
145 &pt_pmu.caps[CR_EAX + i*PT_CPUID_REGS_NUM],
146 &pt_pmu.caps[CR_EBX + i*PT_CPUID_REGS_NUM],
147 &pt_pmu.caps[CR_ECX + i*PT_CPUID_REGS_NUM],
148 &pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]);
149 }
150
151 ret = -ENOMEM;
152 size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
153 attrs = kzalloc(size, GFP_KERNEL);
154 if (!attrs)
155 goto fail;
156
157 size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
158 de_attrs = kzalloc(size, GFP_KERNEL);
159 if (!de_attrs)
160 goto fail;
161
162 for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
163 struct dev_ext_attribute *de_attr = de_attrs + i;
164
165 de_attr->attr.attr.name = pt_caps[i].name;
166
167 sysfs_attr_init(&de_attr->attr.attr);
168
169 de_attr->attr.attr.mode = S_IRUGO;
170 de_attr->attr.show = pt_cap_show;
171 de_attr->var = (void *)i;
172
173 attrs[i] = &de_attr->attr.attr;
174 }
175
176 pt_cap_group.attrs = attrs;
177
178 return 0;
179
180 fail:
181 kfree(attrs);
182
183 return ret;
184 }
185
186 #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \
187 RTIT_CTL_CYC_THRESH | \
188 RTIT_CTL_PSB_FREQ)
189
190 #define RTIT_CTL_MTC (RTIT_CTL_MTC_EN | \
191 RTIT_CTL_MTC_RANGE)
192
193 #define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | \
194 RTIT_CTL_DISRETC | \
195 RTIT_CTL_CYC_PSB | \
196 RTIT_CTL_MTC)
197
198 static bool pt_event_valid(struct perf_event *event)
199 {
200 u64 config = event->attr.config;
201 u64 allowed, requested;
202
203 if ((config & PT_CONFIG_MASK) != config)
204 return false;
205
206 if (config & RTIT_CTL_CYC_PSB) {
207 if (!pt_cap_get(PT_CAP_psb_cyc))
208 return false;
209
210 allowed = pt_cap_get(PT_CAP_psb_periods);
211 requested = (config & RTIT_CTL_PSB_FREQ) >>
212 RTIT_CTL_PSB_FREQ_OFFSET;
213 if (requested && (!(allowed & BIT(requested))))
214 return false;
215
216 allowed = pt_cap_get(PT_CAP_cycle_thresholds);
217 requested = (config & RTIT_CTL_CYC_THRESH) >>
218 RTIT_CTL_CYC_THRESH_OFFSET;
219 if (requested && (!(allowed & BIT(requested))))
220 return false;
221 }
222
223 if (config & RTIT_CTL_MTC) {
224 /*
225 * In the unlikely case that CPUID lists valid mtc periods,
226 * but not the mtc capability, drop out here.
227 *
228 * Spec says that setting mtc period bits while mtc bit in
229 * CPUID is 0 will #GP, so better safe than sorry.
230 */
231 if (!pt_cap_get(PT_CAP_mtc))
232 return false;
233
234 allowed = pt_cap_get(PT_CAP_mtc_periods);
235 if (!allowed)
236 return false;
237
238 requested = (config & RTIT_CTL_MTC_RANGE) >>
239 RTIT_CTL_MTC_RANGE_OFFSET;
240
241 if (!(allowed & BIT(requested)))
242 return false;
243 }
244
245 return true;
246 }
247
248 /*
249 * PT configuration helpers
250 * These all are cpu affine and operate on a local PT
251 */
252
253 static void pt_config(struct perf_event *event)
254 {
255 u64 reg;
256
257 if (!event->hw.itrace_started) {
258 event->hw.itrace_started = 1;
259 wrmsrl(MSR_IA32_RTIT_STATUS, 0);
260 }
261
262 reg = RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN;
263
264 if (!event->attr.exclude_kernel)
265 reg |= RTIT_CTL_OS;
266 if (!event->attr.exclude_user)
267 reg |= RTIT_CTL_USR;
268
269 reg |= (event->attr.config & PT_CONFIG_MASK);
270
271 wrmsrl(MSR_IA32_RTIT_CTL, reg);
272 }
273
274 static void pt_config_start(bool start)
275 {
276 u64 ctl;
277
278 rdmsrl(MSR_IA32_RTIT_CTL, ctl);
279 if (start)
280 ctl |= RTIT_CTL_TRACEEN;
281 else
282 ctl &= ~RTIT_CTL_TRACEEN;
283 wrmsrl(MSR_IA32_RTIT_CTL, ctl);
284
285 /*
286 * A wrmsr that disables trace generation serializes other PT
287 * registers and causes all data packets to be written to memory,
288 * but a fence is required for the data to become globally visible.
289 *
290 * The below WMB, separating data store and aux_head store matches
291 * the consumer's RMB that separates aux_head load and data load.
292 */
293 if (!start)
294 wmb();
295 }
296
297 static void pt_config_buffer(void *buf, unsigned int topa_idx,
298 unsigned int output_off)
299 {
300 u64 reg;
301
302 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(buf));
303
304 reg = 0x7f | ((u64)topa_idx << 7) | ((u64)output_off << 32);
305
306 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
307 }
308
309 /*
310 * Keep ToPA table-related metadata on the same page as the actual table,
311 * taking up a few words from the top
312 */
313
314 #define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
315
316 /**
317 * struct topa - page-sized ToPA table with metadata at the top
318 * @table: actual ToPA table entries, as understood by PT hardware
319 * @list: linkage to struct pt_buffer's list of tables
320 * @phys: physical address of this page
321 * @offset: offset of the first entry in this table in the buffer
322 * @size: total size of all entries in this table
323 * @last: index of the last initialized entry in this table
324 */
325 struct topa {
326 struct topa_entry table[TENTS_PER_PAGE];
327 struct list_head list;
328 u64 phys;
329 u64 offset;
330 size_t size;
331 int last;
332 };
333
334 /* make -1 stand for the last table entry */
335 #define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
336
337 /**
338 * topa_alloc() - allocate page-sized ToPA table
339 * @cpu: CPU on which to allocate.
340 * @gfp: Allocation flags.
341 *
342 * Return: On success, return the pointer to ToPA table page.
343 */
344 static struct topa *topa_alloc(int cpu, gfp_t gfp)
345 {
346 int node = cpu_to_node(cpu);
347 struct topa *topa;
348 struct page *p;
349
350 p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
351 if (!p)
352 return NULL;
353
354 topa = page_address(p);
355 topa->last = 0;
356 topa->phys = page_to_phys(p);
357
358 /*
359 * In case of singe-entry ToPA, always put the self-referencing END
360 * link as the 2nd entry in the table
361 */
362 if (!pt_cap_get(PT_CAP_topa_multiple_entries)) {
363 TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
364 TOPA_ENTRY(topa, 1)->end = 1;
365 }
366
367 return topa;
368 }
369
370 /**
371 * topa_free() - free a page-sized ToPA table
372 * @topa: Table to deallocate.
373 */
374 static void topa_free(struct topa *topa)
375 {
376 free_page((unsigned long)topa);
377 }
378
379 /**
380 * topa_insert_table() - insert a ToPA table into a buffer
381 * @buf: PT buffer that's being extended.
382 * @topa: New topa table to be inserted.
383 *
384 * If it's the first table in this buffer, set up buffer's pointers
385 * accordingly; otherwise, add a END=1 link entry to @topa to the current
386 * "last" table and adjust the last table pointer to @topa.
387 */
388 static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
389 {
390 struct topa *last = buf->last;
391
392 list_add_tail(&topa->list, &buf->tables);
393
394 if (!buf->first) {
395 buf->first = buf->last = buf->cur = topa;
396 return;
397 }
398
399 topa->offset = last->offset + last->size;
400 buf->last = topa;
401
402 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
403 return;
404
405 BUG_ON(last->last != TENTS_PER_PAGE - 1);
406
407 TOPA_ENTRY(last, -1)->base = topa->phys >> TOPA_SHIFT;
408 TOPA_ENTRY(last, -1)->end = 1;
409 }
410
411 /**
412 * topa_table_full() - check if a ToPA table is filled up
413 * @topa: ToPA table.
414 */
415 static bool topa_table_full(struct topa *topa)
416 {
417 /* single-entry ToPA is a special case */
418 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
419 return !!topa->last;
420
421 return topa->last == TENTS_PER_PAGE - 1;
422 }
423
424 /**
425 * topa_insert_pages() - create a list of ToPA tables
426 * @buf: PT buffer being initialized.
427 * @gfp: Allocation flags.
428 *
429 * This initializes a list of ToPA tables with entries from
430 * the data_pages provided by rb_alloc_aux().
431 *
432 * Return: 0 on success or error code.
433 */
434 static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
435 {
436 struct topa *topa = buf->last;
437 int order = 0;
438 struct page *p;
439
440 p = virt_to_page(buf->data_pages[buf->nr_pages]);
441 if (PagePrivate(p))
442 order = page_private(p);
443
444 if (topa_table_full(topa)) {
445 topa = topa_alloc(buf->cpu, gfp);
446 if (!topa)
447 return -ENOMEM;
448
449 topa_insert_table(buf, topa);
450 }
451
452 TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
453 TOPA_ENTRY(topa, -1)->size = order;
454 if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) {
455 TOPA_ENTRY(topa, -1)->intr = 1;
456 TOPA_ENTRY(topa, -1)->stop = 1;
457 }
458
459 topa->last++;
460 topa->size += sizes(order);
461
462 buf->nr_pages += 1ul << order;
463
464 return 0;
465 }
466
467 /**
468 * pt_topa_dump() - print ToPA tables and their entries
469 * @buf: PT buffer.
470 */
471 static void pt_topa_dump(struct pt_buffer *buf)
472 {
473 struct topa *topa;
474
475 list_for_each_entry(topa, &buf->tables, list) {
476 int i;
477
478 pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table,
479 topa->phys, topa->offset, topa->size);
480 for (i = 0; i < TENTS_PER_PAGE; i++) {
481 pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
482 &topa->table[i],
483 (unsigned long)topa->table[i].base << TOPA_SHIFT,
484 sizes(topa->table[i].size),
485 topa->table[i].end ? 'E' : ' ',
486 topa->table[i].intr ? 'I' : ' ',
487 topa->table[i].stop ? 'S' : ' ',
488 *(u64 *)&topa->table[i]);
489 if ((pt_cap_get(PT_CAP_topa_multiple_entries) &&
490 topa->table[i].stop) ||
491 topa->table[i].end)
492 break;
493 }
494 }
495 }
496
497 /**
498 * pt_buffer_advance() - advance to the next output region
499 * @buf: PT buffer.
500 *
501 * Advance the current pointers in the buffer to the next ToPA entry.
502 */
503 static void pt_buffer_advance(struct pt_buffer *buf)
504 {
505 buf->output_off = 0;
506 buf->cur_idx++;
507
508 if (buf->cur_idx == buf->cur->last) {
509 if (buf->cur == buf->last)
510 buf->cur = buf->first;
511 else
512 buf->cur = list_entry(buf->cur->list.next, struct topa,
513 list);
514 buf->cur_idx = 0;
515 }
516 }
517
518 /**
519 * pt_update_head() - calculate current offsets and sizes
520 * @pt: Per-cpu pt context.
521 *
522 * Update buffer's current write pointer position and data size.
523 */
524 static void pt_update_head(struct pt *pt)
525 {
526 struct pt_buffer *buf = perf_get_aux(&pt->handle);
527 u64 topa_idx, base, old;
528
529 /* offset of the first region in this table from the beginning of buf */
530 base = buf->cur->offset + buf->output_off;
531
532 /* offset of the current output region within this table */
533 for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
534 base += sizes(buf->cur->table[topa_idx].size);
535
536 if (buf->snapshot) {
537 local_set(&buf->data_size, base);
538 } else {
539 old = (local64_xchg(&buf->head, base) &
540 ((buf->nr_pages << PAGE_SHIFT) - 1));
541 if (base < old)
542 base += buf->nr_pages << PAGE_SHIFT;
543
544 local_add(base - old, &buf->data_size);
545 }
546 }
547
548 /**
549 * pt_buffer_region() - obtain current output region's address
550 * @buf: PT buffer.
551 */
552 static void *pt_buffer_region(struct pt_buffer *buf)
553 {
554 return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT);
555 }
556
557 /**
558 * pt_buffer_region_size() - obtain current output region's size
559 * @buf: PT buffer.
560 */
561 static size_t pt_buffer_region_size(struct pt_buffer *buf)
562 {
563 return sizes(buf->cur->table[buf->cur_idx].size);
564 }
565
566 /**
567 * pt_handle_status() - take care of possible status conditions
568 * @pt: Per-cpu pt context.
569 */
570 static void pt_handle_status(struct pt *pt)
571 {
572 struct pt_buffer *buf = perf_get_aux(&pt->handle);
573 int advance = 0;
574 u64 status;
575
576 rdmsrl(MSR_IA32_RTIT_STATUS, status);
577
578 if (status & RTIT_STATUS_ERROR) {
579 pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
580 pt_topa_dump(buf);
581 status &= ~RTIT_STATUS_ERROR;
582 }
583
584 if (status & RTIT_STATUS_STOPPED) {
585 status &= ~RTIT_STATUS_STOPPED;
586
587 /*
588 * On systems that only do single-entry ToPA, hitting STOP
589 * means we are already losing data; need to let the decoder
590 * know.
591 */
592 if (!pt_cap_get(PT_CAP_topa_multiple_entries) ||
593 buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
594 local_inc(&buf->lost);
595 advance++;
596 }
597 }
598
599 /*
600 * Also on single-entry ToPA implementations, interrupt will come
601 * before the output reaches its output region's boundary.
602 */
603 if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot &&
604 pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
605 void *head = pt_buffer_region(buf);
606
607 /* everything within this margin needs to be zeroed out */
608 memset(head + buf->output_off, 0,
609 pt_buffer_region_size(buf) -
610 buf->output_off);
611 advance++;
612 }
613
614 if (advance)
615 pt_buffer_advance(buf);
616
617 wrmsrl(MSR_IA32_RTIT_STATUS, status);
618 }
619
620 /**
621 * pt_read_offset() - translate registers into buffer pointers
622 * @buf: PT buffer.
623 *
624 * Set buffer's output pointers from MSR values.
625 */
626 static void pt_read_offset(struct pt_buffer *buf)
627 {
628 u64 offset, base_topa;
629
630 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
631 buf->cur = phys_to_virt(base_topa);
632
633 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
634 /* offset within current output region */
635 buf->output_off = offset >> 32;
636 /* index of current output region within this table */
637 buf->cur_idx = (offset & 0xffffff80) >> 7;
638 }
639
640 /**
641 * pt_topa_next_entry() - obtain index of the first page in the next ToPA entry
642 * @buf: PT buffer.
643 * @pg: Page offset in the buffer.
644 *
645 * When advancing to the next output region (ToPA entry), given a page offset
646 * into the buffer, we need to find the offset of the first page in the next
647 * region.
648 */
649 static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg)
650 {
651 struct topa_entry *te = buf->topa_index[pg];
652
653 /* one region */
654 if (buf->first == buf->last && buf->first->last == 1)
655 return pg;
656
657 do {
658 pg++;
659 pg &= buf->nr_pages - 1;
660 } while (buf->topa_index[pg] == te);
661
662 return pg;
663 }
664
665 /**
666 * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
667 * @buf: PT buffer.
668 * @handle: Current output handle.
669 *
670 * Place INT and STOP marks to prevent overwriting old data that the consumer
671 * hasn't yet collected and waking up the consumer after a certain fraction of
672 * the buffer has filled up. Only needed and sensible for non-snapshot counters.
673 *
674 * This obviously relies on buf::head to figure out buffer markers, so it has
675 * to be called after pt_buffer_reset_offsets() and before the hardware tracing
676 * is enabled.
677 */
678 static int pt_buffer_reset_markers(struct pt_buffer *buf,
679 struct perf_output_handle *handle)
680
681 {
682 unsigned long head = local64_read(&buf->head);
683 unsigned long idx, npages, wakeup;
684
685 /* can't stop in the middle of an output region */
686 if (buf->output_off + handle->size + 1 <
687 sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size))
688 return -EINVAL;
689
690
691 /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
692 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
693 return 0;
694
695 /* clear STOP and INT from current entry */
696 buf->topa_index[buf->stop_pos]->stop = 0;
697 buf->topa_index[buf->intr_pos]->intr = 0;
698
699 /* how many pages till the STOP marker */
700 npages = handle->size >> PAGE_SHIFT;
701
702 /* if it's on a page boundary, fill up one more page */
703 if (!offset_in_page(head + handle->size + 1))
704 npages++;
705
706 idx = (head >> PAGE_SHIFT) + npages;
707 idx &= buf->nr_pages - 1;
708 buf->stop_pos = idx;
709
710 wakeup = handle->wakeup >> PAGE_SHIFT;
711
712 /* in the worst case, wake up the consumer one page before hard stop */
713 idx = (head >> PAGE_SHIFT) + npages - 1;
714 if (idx > wakeup)
715 idx = wakeup;
716
717 idx &= buf->nr_pages - 1;
718 buf->intr_pos = idx;
719
720 buf->topa_index[buf->stop_pos]->stop = 1;
721 buf->topa_index[buf->intr_pos]->intr = 1;
722
723 return 0;
724 }
725
726 /**
727 * pt_buffer_setup_topa_index() - build topa_index[] table of regions
728 * @buf: PT buffer.
729 *
730 * topa_index[] references output regions indexed by offset into the
731 * buffer for purposes of quick reverse lookup.
732 */
733 static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
734 {
735 struct topa *cur = buf->first, *prev = buf->last;
736 struct topa_entry *te_cur = TOPA_ENTRY(cur, 0),
737 *te_prev = TOPA_ENTRY(prev, prev->last - 1);
738 int pg = 0, idx = 0;
739
740 while (pg < buf->nr_pages) {
741 int tidx;
742
743 /* pages within one topa entry */
744 for (tidx = 0; tidx < 1 << te_cur->size; tidx++, pg++)
745 buf->topa_index[pg] = te_prev;
746
747 te_prev = te_cur;
748
749 if (idx == cur->last - 1) {
750 /* advance to next topa table */
751 idx = 0;
752 cur = list_entry(cur->list.next, struct topa, list);
753 } else {
754 idx++;
755 }
756 te_cur = TOPA_ENTRY(cur, idx);
757 }
758
759 }
760
761 /**
762 * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
763 * @buf: PT buffer.
764 * @head: Write pointer (aux_head) from AUX buffer.
765 *
766 * Find the ToPA table and entry corresponding to given @head and set buffer's
767 * "current" pointers accordingly. This is done after we have obtained the
768 * current aux_head position from a successful call to perf_aux_output_begin()
769 * to make sure the hardware is writing to the right place.
770 *
771 * This function modifies buf::{cur,cur_idx,output_off} that will be programmed
772 * into PT msrs when the tracing is enabled and buf::head and buf::data_size,
773 * which are used to determine INT and STOP markers' locations by a subsequent
774 * call to pt_buffer_reset_markers().
775 */
776 static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
777 {
778 int pg;
779
780 if (buf->snapshot)
781 head &= (buf->nr_pages << PAGE_SHIFT) - 1;
782
783 pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
784 pg = pt_topa_next_entry(buf, pg);
785
786 buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK);
787 buf->cur_idx = ((unsigned long)buf->topa_index[pg] -
788 (unsigned long)buf->cur) / sizeof(struct topa_entry);
789 buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1);
790
791 local64_set(&buf->head, head);
792 local_set(&buf->data_size, 0);
793 }
794
795 /**
796 * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
797 * @buf: PT buffer.
798 */
799 static void pt_buffer_fini_topa(struct pt_buffer *buf)
800 {
801 struct topa *topa, *iter;
802
803 list_for_each_entry_safe(topa, iter, &buf->tables, list) {
804 /*
805 * right now, this is in free_aux() path only, so
806 * no need to unlink this table from the list
807 */
808 topa_free(topa);
809 }
810 }
811
812 /**
813 * pt_buffer_init_topa() - initialize ToPA table for pt buffer
814 * @buf: PT buffer.
815 * @size: Total size of all regions within this ToPA.
816 * @gfp: Allocation flags.
817 */
818 static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
819 gfp_t gfp)
820 {
821 struct topa *topa;
822 int err;
823
824 topa = topa_alloc(buf->cpu, gfp);
825 if (!topa)
826 return -ENOMEM;
827
828 topa_insert_table(buf, topa);
829
830 while (buf->nr_pages < nr_pages) {
831 err = topa_insert_pages(buf, gfp);
832 if (err) {
833 pt_buffer_fini_topa(buf);
834 return -ENOMEM;
835 }
836 }
837
838 pt_buffer_setup_topa_index(buf);
839
840 /* link last table to the first one, unless we're double buffering */
841 if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
842 TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
843 TOPA_ENTRY(buf->last, -1)->end = 1;
844 }
845
846 pt_topa_dump(buf);
847 return 0;
848 }
849
850 /**
851 * pt_buffer_setup_aux() - set up topa tables for a PT buffer
852 * @cpu: Cpu on which to allocate, -1 means current.
853 * @pages: Array of pointers to buffer pages passed from perf core.
854 * @nr_pages: Number of pages in the buffer.
855 * @snapshot: If this is a snapshot/overwrite counter.
856 *
857 * This is a pmu::setup_aux callback that sets up ToPA tables and all the
858 * bookkeeping for an AUX buffer.
859 *
860 * Return: Our private PT buffer structure.
861 */
862 static void *
863 pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot)
864 {
865 struct pt_buffer *buf;
866 int node, ret;
867
868 if (!nr_pages)
869 return NULL;
870
871 if (cpu == -1)
872 cpu = raw_smp_processor_id();
873 node = cpu_to_node(cpu);
874
875 buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]),
876 GFP_KERNEL, node);
877 if (!buf)
878 return NULL;
879
880 buf->cpu = cpu;
881 buf->snapshot = snapshot;
882 buf->data_pages = pages;
883
884 INIT_LIST_HEAD(&buf->tables);
885
886 ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL);
887 if (ret) {
888 kfree(buf);
889 return NULL;
890 }
891
892 return buf;
893 }
894
895 /**
896 * pt_buffer_free_aux() - perf AUX deallocation path callback
897 * @data: PT buffer.
898 */
899 static void pt_buffer_free_aux(void *data)
900 {
901 struct pt_buffer *buf = data;
902
903 pt_buffer_fini_topa(buf);
904 kfree(buf);
905 }
906
907 /**
908 * pt_buffer_is_full() - check if the buffer is full
909 * @buf: PT buffer.
910 * @pt: Per-cpu pt handle.
911 *
912 * If the user hasn't read data from the output region that aux_head
913 * points to, the buffer is considered full: the user needs to read at
914 * least this region and update aux_tail to point past it.
915 */
916 static bool pt_buffer_is_full(struct pt_buffer *buf, struct pt *pt)
917 {
918 if (buf->snapshot)
919 return false;
920
921 if (local_read(&buf->data_size) >= pt->handle.size)
922 return true;
923
924 return false;
925 }
926
927 /**
928 * intel_pt_interrupt() - PT PMI handler
929 */
930 void intel_pt_interrupt(void)
931 {
932 struct pt *pt = this_cpu_ptr(&pt_ctx);
933 struct pt_buffer *buf;
934 struct perf_event *event = pt->handle.event;
935
936 /*
937 * There may be a dangling PT bit in the interrupt status register
938 * after PT has been disabled by pt_event_stop(). Make sure we don't
939 * do anything (particularly, re-enable) for this event here.
940 */
941 if (!ACCESS_ONCE(pt->handle_nmi))
942 return;
943
944 pt_config_start(false);
945
946 if (!event)
947 return;
948
949 buf = perf_get_aux(&pt->handle);
950 if (!buf)
951 return;
952
953 pt_read_offset(buf);
954
955 pt_handle_status(pt);
956
957 pt_update_head(pt);
958
959 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
960 local_xchg(&buf->lost, 0));
961
962 if (!event->hw.state) {
963 int ret;
964
965 buf = perf_aux_output_begin(&pt->handle, event);
966 if (!buf) {
967 event->hw.state = PERF_HES_STOPPED;
968 return;
969 }
970
971 pt_buffer_reset_offsets(buf, pt->handle.head);
972 /* snapshot counters don't use PMI, so it's safe */
973 ret = pt_buffer_reset_markers(buf, &pt->handle);
974 if (ret) {
975 perf_aux_output_end(&pt->handle, 0, true);
976 return;
977 }
978
979 pt_config_buffer(buf->cur->table, buf->cur_idx,
980 buf->output_off);
981 pt_config(event);
982 }
983 }
984
985 /*
986 * PMU callbacks
987 */
988
989 static void pt_event_start(struct perf_event *event, int mode)
990 {
991 struct pt *pt = this_cpu_ptr(&pt_ctx);
992 struct pt_buffer *buf = perf_get_aux(&pt->handle);
993
994 if (!buf || pt_buffer_is_full(buf, pt)) {
995 event->hw.state = PERF_HES_STOPPED;
996 return;
997 }
998
999 ACCESS_ONCE(pt->handle_nmi) = 1;
1000 event->hw.state = 0;
1001
1002 pt_config_buffer(buf->cur->table, buf->cur_idx,
1003 buf->output_off);
1004 pt_config(event);
1005 }
1006
1007 static void pt_event_stop(struct perf_event *event, int mode)
1008 {
1009 struct pt *pt = this_cpu_ptr(&pt_ctx);
1010
1011 /*
1012 * Protect against the PMI racing with disabling wrmsr,
1013 * see comment in intel_pt_interrupt().
1014 */
1015 ACCESS_ONCE(pt->handle_nmi) = 0;
1016 pt_config_start(false);
1017
1018 if (event->hw.state == PERF_HES_STOPPED)
1019 return;
1020
1021 event->hw.state = PERF_HES_STOPPED;
1022
1023 if (mode & PERF_EF_UPDATE) {
1024 struct pt_buffer *buf = perf_get_aux(&pt->handle);
1025
1026 if (!buf)
1027 return;
1028
1029 if (WARN_ON_ONCE(pt->handle.event != event))
1030 return;
1031
1032 pt_read_offset(buf);
1033
1034 pt_handle_status(pt);
1035
1036 pt_update_head(pt);
1037 }
1038 }
1039
1040 static void pt_event_del(struct perf_event *event, int mode)
1041 {
1042 struct pt *pt = this_cpu_ptr(&pt_ctx);
1043 struct pt_buffer *buf;
1044
1045 pt_event_stop(event, PERF_EF_UPDATE);
1046
1047 buf = perf_get_aux(&pt->handle);
1048
1049 if (buf) {
1050 if (buf->snapshot)
1051 pt->handle.head =
1052 local_xchg(&buf->data_size,
1053 buf->nr_pages << PAGE_SHIFT);
1054 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
1055 local_xchg(&buf->lost, 0));
1056 }
1057 }
1058
1059 static int pt_event_add(struct perf_event *event, int mode)
1060 {
1061 struct pt_buffer *buf;
1062 struct pt *pt = this_cpu_ptr(&pt_ctx);
1063 struct hw_perf_event *hwc = &event->hw;
1064 int ret = -EBUSY;
1065
1066 if (pt->handle.event)
1067 goto fail;
1068
1069 buf = perf_aux_output_begin(&pt->handle, event);
1070 ret = -EINVAL;
1071 if (!buf)
1072 goto fail_stop;
1073
1074 pt_buffer_reset_offsets(buf, pt->handle.head);
1075 if (!buf->snapshot) {
1076 ret = pt_buffer_reset_markers(buf, &pt->handle);
1077 if (ret)
1078 goto fail_end_stop;
1079 }
1080
1081 if (mode & PERF_EF_START) {
1082 pt_event_start(event, 0);
1083 ret = -EBUSY;
1084 if (hwc->state == PERF_HES_STOPPED)
1085 goto fail_end_stop;
1086 } else {
1087 hwc->state = PERF_HES_STOPPED;
1088 }
1089
1090 return 0;
1091
1092 fail_end_stop:
1093 perf_aux_output_end(&pt->handle, 0, true);
1094 fail_stop:
1095 hwc->state = PERF_HES_STOPPED;
1096 fail:
1097 return ret;
1098 }
1099
1100 static void pt_event_read(struct perf_event *event)
1101 {
1102 }
1103
1104 static void pt_event_destroy(struct perf_event *event)
1105 {
1106 x86_del_exclusive(x86_lbr_exclusive_pt);
1107 }
1108
1109 static int pt_event_init(struct perf_event *event)
1110 {
1111 if (event->attr.type != pt_pmu.pmu.type)
1112 return -ENOENT;
1113
1114 if (!pt_event_valid(event))
1115 return -EINVAL;
1116
1117 if (x86_add_exclusive(x86_lbr_exclusive_pt))
1118 return -EBUSY;
1119
1120 event->destroy = pt_event_destroy;
1121
1122 return 0;
1123 }
1124
1125 static __init int pt_init(void)
1126 {
1127 int ret, cpu, prior_warn = 0;
1128
1129 BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
1130
1131 if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_INTEL_PT))
1132 return -ENODEV;
1133
1134 get_online_cpus();
1135 for_each_online_cpu(cpu) {
1136 u64 ctl;
1137
1138 ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
1139 if (!ret && (ctl & RTIT_CTL_TRACEEN))
1140 prior_warn++;
1141 }
1142 put_online_cpus();
1143
1144 if (prior_warn) {
1145 x86_add_exclusive(x86_lbr_exclusive_pt);
1146 pr_warn("PT is enabled at boot time, doing nothing\n");
1147
1148 return -EBUSY;
1149 }
1150
1151 ret = pt_pmu_hw_init();
1152 if (ret)
1153 return ret;
1154
1155 if (!pt_cap_get(PT_CAP_topa_output)) {
1156 pr_warn("ToPA output is not supported on this CPU\n");
1157 return -ENODEV;
1158 }
1159
1160 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
1161 pt_pmu.pmu.capabilities =
1162 PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
1163
1164 pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
1165 pt_pmu.pmu.attr_groups = pt_attr_groups;
1166 pt_pmu.pmu.task_ctx_nr = perf_sw_context;
1167 pt_pmu.pmu.event_init = pt_event_init;
1168 pt_pmu.pmu.add = pt_event_add;
1169 pt_pmu.pmu.del = pt_event_del;
1170 pt_pmu.pmu.start = pt_event_start;
1171 pt_pmu.pmu.stop = pt_event_stop;
1172 pt_pmu.pmu.read = pt_event_read;
1173 pt_pmu.pmu.setup_aux = pt_buffer_setup_aux;
1174 pt_pmu.pmu.free_aux = pt_buffer_free_aux;
1175 ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
1176
1177 return ret;
1178 }
1179 arch_initcall(pt_init);
This page took 0.055516 seconds and 5 git commands to generate.