Fix typo in gdb.python/py-objfile.exp
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
CommitLineData
7c97f91e
MM
1/* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
7c97f91e
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
53f81362 22#include "common-defs.h"
7c97f91e 23#include "linux-btrace.h"
361c8ade 24#include "common-regcache.h"
be8b1ea6 25#include "gdb_wait.h"
df7e5265 26#include "x86-cpuid.h"
7c97f91e 27
5b4e221c
MF
28#ifdef HAVE_SYS_SYSCALL_H
29#include <sys/syscall.h>
30#endif
31
32#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
7c97f91e 33#include <unistd.h>
7c97f91e
MM
34#include <sys/mman.h>
35#include <sys/user.h>
5826e159 36#include "nat/gdb_ptrace.h"
a950d57c 37#include <sys/types.h>
a950d57c 38#include <signal.h>
d68e53f4 39#include <sys/utsname.h>
7c97f91e
MM
40
41/* A branch trace record in perf_event. */
42struct perf_event_bts
43{
44 /* The linear address of the branch source. */
45 uint64_t from;
46
47 /* The linear address of the branch destination. */
48 uint64_t to;
49};
50
51/* A perf_event branch trace sample. */
52struct perf_event_sample
53{
54 /* The perf_event sample header. */
55 struct perf_event_header header;
56
57 /* The perf_event branch tracing payload. */
58 struct perf_event_bts bts;
59};
60
afb778a2
MM
61/* Identify the cpu we're running on. */
62static struct btrace_cpu
63btrace_this_cpu (void)
64{
65 struct btrace_cpu cpu;
66 unsigned int eax, ebx, ecx, edx;
67 int ok;
68
69 memset (&cpu, 0, sizeof (cpu));
70
71 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
72 if (ok != 0)
73 {
74 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
75 && edx == signature_INTEL_edx)
76 {
77 unsigned int cpuid, ignore;
78
79 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
80 if (ok != 0)
81 {
82 cpu.vendor = CV_INTEL;
83
84 cpu.family = (cpuid >> 8) & 0xf;
85 cpu.model = (cpuid >> 4) & 0xf;
86
87 if (cpu.family == 0x6)
88 cpu.model += (cpuid >> 12) & 0xf0;
89 }
90 }
91 }
92
93 return cpu;
94}
95
aadf7753 96/* Return non-zero if there is new data in PEVENT; zero otherwise. */
7c97f91e 97
aadf7753
MM
98static int
99perf_event_new_data (const struct perf_event_buffer *pev)
7c97f91e 100{
aadf7753 101 return *pev->data_head != pev->last_head;
7c97f91e
MM
102}
103
d68e53f4
MM
104/* Try to determine the size of a pointer in bits for the OS.
105
106 This is the same as the size of a pointer for the inferior process
107 except when a 32-bit inferior is running on a 64-bit OS. */
108
b20a6524
MM
109/* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
110 to the memory holding the copy.
111 The caller is responsible for freeing the memory. */
112
113static gdb_byte *
e7b01ce0
MM
114perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
115 size_t size)
b20a6524
MM
116{
117 const gdb_byte *begin, *end, *start, *stop;
118 gdb_byte *buffer;
e7b01ce0
MM
119 size_t buffer_size;
120 __u64 data_tail;
b20a6524
MM
121
122 if (size == 0)
123 return NULL;
124
125 gdb_assert (size <= data_head);
126 data_tail = data_head - size;
127
128 buffer_size = pev->size;
129 begin = pev->mem;
130 start = begin + data_tail % buffer_size;
131 stop = begin + data_head % buffer_size;
132
133 buffer = xmalloc (size);
134
135 if (start < stop)
136 memcpy (buffer, start, stop - start);
137 else
138 {
139 end = begin + buffer_size;
140
141 memcpy (buffer, start, end - start);
142 memcpy (buffer + (end - start), begin, stop - begin);
143 }
144
145 return buffer;
146}
147
148/* Copy the perf event buffer data from PEV.
149 Store a pointer to the copy into DATA and its size in SIZE. */
150
151static void
152perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
e7b01ce0 153 size_t *psize)
b20a6524 154{
e7b01ce0
MM
155 size_t size;
156 __u64 data_head;
b20a6524
MM
157
158 data_head = *pev->data_head;
159
160 size = pev->size;
161 if (data_head < size)
e7b01ce0 162 size = (size_t) data_head;
b20a6524
MM
163
164 *data = perf_event_read (pev, data_head, size);
165 *psize = size;
166
167 pev->last_head = data_head;
168}
169
170/* Determine the event type.
171 Returns zero on success and fills in TYPE; returns -1 otherwise. */
172
173static int
174perf_event_pt_event_type (int *type)
175{
176 FILE *file;
177 int found;
178
179 file = fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
180 if (file == NULL)
181 return -1;
182
183 found = fscanf (file, "%d", type);
184
185 fclose (file);
186
187 if (found == 1)
188 return 0;
189 return -1;
190}
191
d68e53f4
MM
192static int
193linux_determine_kernel_ptr_bits (void)
194{
195 struct utsname utsn;
196 int errcode;
197
198 memset (&utsn, 0, sizeof (utsn));
199
200 errcode = uname (&utsn);
201 if (errcode < 0)
202 return 0;
203
204 /* We only need to handle the 64-bit host case, here. For 32-bit host,
205 the pointer size can be filled in later based on the inferior. */
206 if (strcmp (utsn.machine, "x86_64") == 0)
207 return 64;
208
209 return 0;
210}
211
7c97f91e
MM
212/* Check whether an address is in the kernel. */
213
214static inline int
215perf_event_is_kernel_addr (const struct btrace_target_info *tinfo,
216 uint64_t addr)
217{
218 uint64_t mask;
219
220 /* If we don't know the size of a pointer, we can't check. Let's assume it's
221 not a kernel address in this case. */
222 if (tinfo->ptr_bits == 0)
223 return 0;
224
225 /* A bit mask for the most significant bit in an address. */
226 mask = (uint64_t) 1 << (tinfo->ptr_bits - 1);
227
228 /* Check whether the most significant bit in the address is set. */
229 return (addr & mask) != 0;
230}
231
232/* Check whether a perf event record should be skipped. */
233
234static inline int
f4abbc16
MM
235perf_event_skip_bts_record (const struct btrace_target_info *tinfo,
236 const struct perf_event_bts *bts)
7c97f91e
MM
237{
238 /* The hardware may report branches from kernel into user space. Branches
239 from user into kernel space will be suppressed. We filter the former to
240 provide a consistent branch trace excluding kernel. */
241 return perf_event_is_kernel_addr (tinfo, bts->from);
242}
243
244/* Perform a few consistency checks on a perf event sample record. This is
245 meant to catch cases when we get out of sync with the perf event stream. */
246
247static inline int
248perf_event_sample_ok (const struct perf_event_sample *sample)
249{
250 if (sample->header.type != PERF_RECORD_SAMPLE)
251 return 0;
252
253 if (sample->header.size != sizeof (*sample))
254 return 0;
255
256 return 1;
257}
258
259/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
260 and to addresses (plus a header).
261
262 Start points into that buffer at the next sample position.
263 We read the collected samples backwards from start.
264
265 While reading the samples, we convert the information into a list of blocks.
266 For two adjacent samples s1 and s2, we form a block b such that b.begin =
267 s1.to and b.end = s2.from.
268
269 In case the buffer overflows during sampling, one sample may have its lower
270 part at the end and its upper part at the beginning of the buffer. */
271
272static VEC (btrace_block_s) *
273perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
e7b01ce0 274 const uint8_t *end, const uint8_t *start, size_t size)
7c97f91e
MM
275{
276 VEC (btrace_block_s) *btrace = NULL;
277 struct perf_event_sample sample;
e7b01ce0 278 size_t read = 0;
7c97f91e
MM
279 struct btrace_block block = { 0, 0 };
280 struct regcache *regcache;
281
282 gdb_assert (begin <= start);
283 gdb_assert (start <= end);
284
285 /* The first block ends at the current pc. */
361c8ade 286 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
7c97f91e
MM
287 block.end = regcache_read_pc (regcache);
288
289 /* The buffer may contain a partial record as its last entry (i.e. when the
290 buffer size is not a multiple of the sample size). */
291 read = sizeof (sample) - 1;
292
293 for (; read < size; read += sizeof (sample))
294 {
295 const struct perf_event_sample *psample;
296
297 /* Find the next perf_event sample in a backwards traversal. */
298 start -= sizeof (sample);
299
300 /* If we're still inside the buffer, we're done. */
301 if (begin <= start)
302 psample = (const struct perf_event_sample *) start;
303 else
304 {
305 int missing;
306
307 /* We're to the left of the ring buffer, we will wrap around and
308 reappear at the very right of the ring buffer. */
309
310 missing = (begin - start);
311 start = (end - missing);
312
313 /* If the entire sample is missing, we're done. */
314 if (missing == sizeof (sample))
315 psample = (const struct perf_event_sample *) start;
316 else
317 {
318 uint8_t *stack;
319
320 /* The sample wrapped around. The lower part is at the end and
321 the upper part is at the beginning of the buffer. */
322 stack = (uint8_t *) &sample;
323
324 /* Copy the two parts so we have a contiguous sample. */
325 memcpy (stack, start, missing);
326 memcpy (stack + missing, begin, sizeof (sample) - missing);
327
328 psample = &sample;
329 }
330 }
331
332 if (!perf_event_sample_ok (psample))
333 {
334 warning (_("Branch trace may be incomplete."));
335 break;
336 }
337
f4abbc16 338 if (perf_event_skip_bts_record (tinfo, &psample->bts))
7c97f91e
MM
339 continue;
340
341 /* We found a valid sample, so we can complete the current block. */
342 block.begin = psample->bts.to;
343
344 VEC_safe_push (btrace_block_s, btrace, &block);
345
346 /* Start the next block. */
347 block.end = psample->bts.from;
348 }
349
969c39fb
MM
350 /* Push the last block (i.e. the first one of inferior execution), as well.
351 We don't know where it ends, but we know where it starts. If we're
352 reading delta trace, we can fill in the start address later on.
353 Otherwise we will prune it. */
354 block.begin = 0;
355 VEC_safe_push (btrace_block_s, btrace, &block);
356
7c97f91e
MM
357 return btrace;
358}
359
043c3577 360/* Check whether the kernel supports BTS. */
a950d57c
MM
361
362static int
043c3577 363kernel_supports_bts (void)
a950d57c
MM
364{
365 struct perf_event_attr attr;
366 pid_t child, pid;
367 int status, file;
368
369 errno = 0;
370 child = fork ();
371 switch (child)
372 {
373 case -1:
76fb6829 374 warning (_("test bts: cannot fork: %s."), safe_strerror (errno));
a950d57c
MM
375 return 0;
376
377 case 0:
378 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
379 if (status != 0)
380 {
043c3577 381 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
76fb6829 382 safe_strerror (errno));
a950d57c
MM
383 _exit (1);
384 }
385
386 status = raise (SIGTRAP);
387 if (status != 0)
388 {
043c3577 389 warning (_("test bts: cannot raise SIGTRAP: %s."),
76fb6829 390 safe_strerror (errno));
a950d57c
MM
391 _exit (1);
392 }
393
394 _exit (1);
395
396 default:
397 pid = waitpid (child, &status, 0);
398 if (pid != child)
399 {
043c3577 400 warning (_("test bts: bad pid %ld, error: %s."),
76fb6829 401 (long) pid, safe_strerror (errno));
a950d57c
MM
402 return 0;
403 }
404
405 if (!WIFSTOPPED (status))
406 {
043c3577 407 warning (_("test bts: expected stop. status: %d."),
a950d57c
MM
408 status);
409 return 0;
410 }
411
412 memset (&attr, 0, sizeof (attr));
413
414 attr.type = PERF_TYPE_HARDWARE;
415 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
416 attr.sample_period = 1;
417 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
418 attr.exclude_kernel = 1;
419 attr.exclude_hv = 1;
420 attr.exclude_idle = 1;
421
422 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
423 if (file >= 0)
424 close (file);
425
426 kill (child, SIGKILL);
427 ptrace (PTRACE_KILL, child, NULL, NULL);
428
429 pid = waitpid (child, &status, 0);
430 if (pid != child)
431 {
043c3577 432 warning (_("test bts: bad pid %ld, error: %s."),
76fb6829 433 (long) pid, safe_strerror (errno));
a950d57c 434 if (!WIFSIGNALED (status))
043c3577 435 warning (_("test bts: expected killed. status: %d."),
a950d57c
MM
436 status);
437 }
438
439 return (file >= 0);
440 }
441}
442
b20a6524
MM
443/* Check whether the kernel supports Intel(R) Processor Trace. */
444
445static int
446kernel_supports_pt (void)
447{
448 struct perf_event_attr attr;
449 pid_t child, pid;
450 int status, file, type;
451
452 errno = 0;
453 child = fork ();
454 switch (child)
455 {
456 case -1:
76fb6829 457 warning (_("test pt: cannot fork: %s."), safe_strerror (errno));
b20a6524
MM
458 return 0;
459
460 case 0:
461 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
462 if (status != 0)
463 {
464 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
76fb6829 465 safe_strerror (errno));
b20a6524
MM
466 _exit (1);
467 }
468
469 status = raise (SIGTRAP);
470 if (status != 0)
471 {
472 warning (_("test pt: cannot raise SIGTRAP: %s."),
76fb6829 473 safe_strerror (errno));
b20a6524
MM
474 _exit (1);
475 }
476
477 _exit (1);
478
479 default:
480 pid = waitpid (child, &status, 0);
481 if (pid != child)
482 {
483 warning (_("test pt: bad pid %ld, error: %s."),
76fb6829 484 (long) pid, safe_strerror (errno));
b20a6524
MM
485 return 0;
486 }
487
488 if (!WIFSTOPPED (status))
489 {
490 warning (_("test pt: expected stop. status: %d."),
491 status);
492 return 0;
493 }
494
495 status = perf_event_pt_event_type (&type);
496 if (status != 0)
497 file = -1;
498 else
499 {
500 memset (&attr, 0, sizeof (attr));
501
502 attr.size = sizeof (attr);
503 attr.type = type;
504 attr.exclude_kernel = 1;
505 attr.exclude_hv = 1;
506 attr.exclude_idle = 1;
507
508 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
509 if (file >= 0)
510 close (file);
511 }
512
513 kill (child, SIGKILL);
514 ptrace (PTRACE_KILL, child, NULL, NULL);
515
516 pid = waitpid (child, &status, 0);
517 if (pid != child)
518 {
519 warning (_("test pt: bad pid %ld, error: %s."),
76fb6829 520 (long) pid, safe_strerror (errno));
b20a6524
MM
521 if (!WIFSIGNALED (status))
522 warning (_("test pt: expected killed. status: %d."),
523 status);
524 }
525
526 return (file >= 0);
527 }
528}
529
043c3577 530/* Check whether an Intel cpu supports BTS. */
a950d57c
MM
531
532static int
afb778a2 533intel_supports_bts (const struct btrace_cpu *cpu)
a950d57c 534{
afb778a2 535 switch (cpu->family)
5f8e0b8f
MF
536 {
537 case 0x6:
afb778a2 538 switch (cpu->model)
5f8e0b8f
MF
539 {
540 case 0x1a: /* Nehalem */
541 case 0x1f:
542 case 0x1e:
543 case 0x2e:
544 case 0x25: /* Westmere */
545 case 0x2c:
546 case 0x2f:
547 case 0x2a: /* Sandy Bridge */
548 case 0x2d:
549 case 0x3a: /* Ivy Bridge */
550
551 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
552 "from" information afer an EIST transition, T-states, C1E, or
553 Adaptive Thermal Throttling. */
554 return 0;
555 }
556 }
a950d57c
MM
557
558 return 1;
a950d57c
MM
559}
560
043c3577 561/* Check whether the cpu supports BTS. */
a950d57c
MM
562
563static int
043c3577 564cpu_supports_bts (void)
a950d57c 565{
afb778a2 566 struct btrace_cpu cpu;
a950d57c 567
afb778a2
MM
568 cpu = btrace_this_cpu ();
569 switch (cpu.vendor)
570 {
571 default:
572 /* Don't know about others. Let's assume they do. */
573 return 1;
a950d57c 574
afb778a2
MM
575 case CV_INTEL:
576 return intel_supports_bts (&cpu);
577 }
a950d57c
MM
578}
579
043c3577 580/* Check whether the linux target supports BTS. */
7c97f91e 581
043c3577
MM
582static int
583linux_supports_bts (void)
7c97f91e 584{
a950d57c
MM
585 static int cached;
586
587 if (cached == 0)
588 {
043c3577 589 if (!kernel_supports_bts ())
a950d57c 590 cached = -1;
043c3577 591 else if (!cpu_supports_bts ())
a950d57c
MM
592 cached = -1;
593 else
594 cached = 1;
595 }
596
597 return cached > 0;
7c97f91e
MM
598}
599
b20a6524
MM
600/* Check whether the linux target supports Intel(R) Processor Trace. */
601
602static int
603linux_supports_pt (void)
604{
605 static int cached;
606
607 if (cached == 0)
608 {
609 if (!kernel_supports_pt ())
610 cached = -1;
611 else
612 cached = 1;
613 }
614
615 return cached > 0;
616}
617
7c97f91e
MM
618/* See linux-btrace.h. */
619
043c3577
MM
620int
621linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
622{
623 switch (format)
624 {
625 case BTRACE_FORMAT_NONE:
626 return 0;
627
628 case BTRACE_FORMAT_BTS:
629 return linux_supports_bts ();
b20a6524
MM
630
631 case BTRACE_FORMAT_PT:
632 return linux_supports_pt ();
043c3577
MM
633 }
634
635 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
636}
637
f4abbc16 638/* Enable branch tracing in BTS format. */
043c3577 639
f4abbc16 640static struct btrace_target_info *
d33501a5 641linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
7c97f91e 642{
aadf7753 643 struct perf_event_mmap_page *header;
7c97f91e 644 struct btrace_target_info *tinfo;
f4abbc16 645 struct btrace_tinfo_bts *bts;
e7b01ce0
MM
646 size_t size, pages;
647 __u64 data_offset;
d0fa7535 648 int pid, pg;
7c97f91e
MM
649
650 tinfo = xzalloc (sizeof (*tinfo));
651 tinfo->ptid = ptid;
d68e53f4 652 tinfo->ptr_bits = linux_determine_kernel_ptr_bits ();
7c97f91e 653
f4abbc16
MM
654 tinfo->conf.format = BTRACE_FORMAT_BTS;
655 bts = &tinfo->variant.bts;
7c97f91e 656
f4abbc16
MM
657 bts->attr.size = sizeof (bts->attr);
658 bts->attr.type = PERF_TYPE_HARDWARE;
659 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
660 bts->attr.sample_period = 1;
7c97f91e 661
f4abbc16
MM
662 /* We sample from and to address. */
663 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
7c97f91e 664
f4abbc16
MM
665 bts->attr.exclude_kernel = 1;
666 bts->attr.exclude_hv = 1;
667 bts->attr.exclude_idle = 1;
7c97f91e
MM
668
669 pid = ptid_get_lwp (ptid);
670 if (pid == 0)
671 pid = ptid_get_pid (ptid);
672
673 errno = 0;
f4abbc16
MM
674 bts->file = syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0);
675 if (bts->file < 0)
b20a6524 676 goto err_out;
7c97f91e 677
d33501a5 678 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
679 pages = ((size_t) conf->size / PAGE_SIZE
680 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
d33501a5
MM
681 /* We need at least one page. */
682 if (pages == 0)
683 pages = 1;
684
685 /* The buffer size can be requested in powers of two pages. Adjust PAGES
686 to the next power of two. */
e7b01ce0
MM
687 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
688 if ((pages & ((size_t) 1 << pg)) != 0)
689 pages += ((size_t) 1 << pg);
d33501a5
MM
690
691 /* We try to allocate the requested size.
692 If that fails, try to get as much as we can. */
693 for (; pages > 0; pages >>= 1)
d0fa7535 694 {
d33501a5 695 size_t length;
e7b01ce0 696 __u64 data_size;
d33501a5 697
e7b01ce0
MM
698 data_size = (__u64) pages * PAGE_SIZE;
699
700 /* Don't ask for more than we can represent in the configuration. */
701 if ((__u64) UINT_MAX < data_size)
702 continue;
703
704 size = (size_t) data_size;
d33501a5
MM
705 length = size + PAGE_SIZE;
706
707 /* Check for overflows. */
e7b01ce0 708 if ((__u64) length != data_size + PAGE_SIZE)
d33501a5
MM
709 continue;
710
d0fa7535 711 /* The number of pages we request needs to be a power of two. */
d33501a5 712 header = mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0);
aadf7753
MM
713 if (header != MAP_FAILED)
714 break;
d0fa7535 715 }
7c97f91e 716
010a18a1 717 if (pages == 0)
aadf7753
MM
718 goto err_file;
719
010a18a1 720 data_offset = PAGE_SIZE;
010a18a1
MM
721
722#if defined (PERF_ATTR_SIZE_VER5)
723 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
724 {
e7b01ce0
MM
725 __u64 data_size;
726
010a18a1
MM
727 data_offset = header->data_offset;
728 data_size = header->data_size;
e7b01ce0
MM
729
730 size = (unsigned int) data_size;
731
732 /* Check for overflows. */
733 if ((__u64) size != data_size)
734 {
735 munmap ((void *) header, size + PAGE_SIZE);
736 goto err_file;
737 }
010a18a1
MM
738 }
739#endif /* defined (PERF_ATTR_SIZE_VER5) */
740
f4abbc16 741 bts->header = header;
010a18a1 742 bts->bts.mem = ((const uint8_t *) header) + data_offset;
e7b01ce0 743 bts->bts.size = size;
f4abbc16 744 bts->bts.data_head = &header->data_head;
e7b01ce0 745 bts->bts.last_head = 0ull;
aadf7753 746
e7b01ce0 747 tinfo->conf.bts.size = (unsigned int) size;
aadf7753
MM
748 return tinfo;
749
750 err_file:
d0fa7535 751 /* We were not able to allocate any buffer. */
f4abbc16 752 close (bts->file);
7c97f91e 753
b20a6524
MM
754 err_out:
755 xfree (tinfo);
756 return NULL;
757}
758
759#if defined (PERF_ATTR_SIZE_VER5)
760
761/* Enable branch tracing in Intel(R) Processor Trace format. */
762
763static struct btrace_target_info *
764linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
765{
766 struct perf_event_mmap_page *header;
767 struct btrace_target_info *tinfo;
768 struct btrace_tinfo_pt *pt;
e7b01ce0 769 size_t pages, size;
b20a6524
MM
770 int pid, pg, errcode, type;
771
772 if (conf->size == 0)
773 return NULL;
774
775 errcode = perf_event_pt_event_type (&type);
776 if (errcode != 0)
777 return NULL;
778
779 pid = ptid_get_lwp (ptid);
780 if (pid == 0)
781 pid = ptid_get_pid (ptid);
782
783 tinfo = xzalloc (sizeof (*tinfo));
784 tinfo->ptid = ptid;
785 tinfo->ptr_bits = 0;
786
787 tinfo->conf.format = BTRACE_FORMAT_PT;
788 pt = &tinfo->variant.pt;
789
790 pt->attr.size = sizeof (pt->attr);
791 pt->attr.type = type;
792
793 pt->attr.exclude_kernel = 1;
794 pt->attr.exclude_hv = 1;
795 pt->attr.exclude_idle = 1;
796
797 errno = 0;
798 pt->file = syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0);
799 if (pt->file < 0)
800 goto err;
801
802 /* Allocate the configuration page. */
803 header = mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
804 pt->file, 0);
805 if (header == MAP_FAILED)
806 goto err_file;
807
808 header->aux_offset = header->data_offset + header->data_size;
809
810 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
811 pages = ((size_t) conf->size / PAGE_SIZE
812 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
b20a6524
MM
813 /* We need at least one page. */
814 if (pages == 0)
815 pages = 1;
816
817 /* The buffer size can be requested in powers of two pages. Adjust PAGES
818 to the next power of two. */
e7b01ce0
MM
819 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
820 if ((pages & ((size_t) 1 << pg)) != 0)
821 pages += ((size_t) 1 << pg);
b20a6524
MM
822
823 /* We try to allocate the requested size.
824 If that fails, try to get as much as we can. */
825 for (; pages > 0; pages >>= 1)
826 {
827 size_t length;
e7b01ce0 828 __u64 data_size;
b20a6524 829
e7b01ce0
MM
830 data_size = (__u64) pages * PAGE_SIZE;
831
832 /* Don't ask for more than we can represent in the configuration. */
833 if ((__u64) UINT_MAX < data_size)
834 continue;
835
836 size = (size_t) data_size;
b20a6524
MM
837
838 /* Check for overflows. */
e7b01ce0 839 if ((__u64) size != data_size)
b20a6524
MM
840 continue;
841
e7b01ce0
MM
842 header->aux_size = data_size;
843 length = size;
b20a6524
MM
844
845 pt->pt.mem = mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
846 header->aux_offset);
847 if (pt->pt.mem != MAP_FAILED)
848 break;
849 }
850
851 if (pages == 0)
852 goto err_conf;
853
854 pt->header = header;
855 pt->pt.size = size;
856 pt->pt.data_head = &header->aux_head;
857
e7b01ce0 858 tinfo->conf.pt.size = (unsigned int) size;
b20a6524
MM
859 return tinfo;
860
861 err_conf:
862 munmap((void *) header, PAGE_SIZE);
863
864 err_file:
865 close (pt->file);
866
7c97f91e
MM
867 err:
868 xfree (tinfo);
869 return NULL;
870}
871
b20a6524
MM
872#else /* !defined (PERF_ATTR_SIZE_VER5) */
873
874static struct btrace_target_info *
875linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
876{
877 errno = EOPNOTSUPP;
878 return NULL;
879}
880
881#endif /* !defined (PERF_ATTR_SIZE_VER5) */
882
7c97f91e
MM
883/* See linux-btrace.h. */
884
f4abbc16
MM
885struct btrace_target_info *
886linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
887{
888 struct btrace_target_info *tinfo;
889
890 tinfo = NULL;
891 switch (conf->format)
892 {
893 case BTRACE_FORMAT_NONE:
894 break;
895
896 case BTRACE_FORMAT_BTS:
d33501a5 897 tinfo = linux_enable_bts (ptid, &conf->bts);
f4abbc16 898 break;
b20a6524
MM
899
900 case BTRACE_FORMAT_PT:
901 tinfo = linux_enable_pt (ptid, &conf->pt);
902 break;
f4abbc16
MM
903 }
904
905 return tinfo;
906}
907
908/* Disable BTS tracing. */
909
910static enum btrace_error
911linux_disable_bts (struct btrace_tinfo_bts *tinfo)
7c97f91e 912{
aadf7753 913 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
7c97f91e 914 close (tinfo->file);
7c97f91e 915
969c39fb 916 return BTRACE_ERR_NONE;
7c97f91e
MM
917}
918
b20a6524
MM
919/* Disable Intel(R) Processor Trace tracing. */
920
921static enum btrace_error
922linux_disable_pt (struct btrace_tinfo_pt *tinfo)
923{
924 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
925 munmap((void *) tinfo->header, PAGE_SIZE);
926 close (tinfo->file);
927
928 return BTRACE_ERR_NONE;
929}
930
f4abbc16
MM
931/* See linux-btrace.h. */
932
933enum btrace_error
934linux_disable_btrace (struct btrace_target_info *tinfo)
935{
936 enum btrace_error errcode;
937
938 errcode = BTRACE_ERR_NOT_SUPPORTED;
939 switch (tinfo->conf.format)
940 {
941 case BTRACE_FORMAT_NONE:
942 break;
943
944 case BTRACE_FORMAT_BTS:
945 errcode = linux_disable_bts (&tinfo->variant.bts);
946 break;
b20a6524
MM
947
948 case BTRACE_FORMAT_PT:
949 errcode = linux_disable_pt (&tinfo->variant.pt);
950 break;
f4abbc16
MM
951 }
952
953 if (errcode == BTRACE_ERR_NONE)
954 xfree (tinfo);
955
956 return errcode;
957}
958
734b0e4b
MM
959/* Read branch trace data in BTS format for the thread given by TINFO into
960 BTRACE using the TYPE reading method. */
7c97f91e 961
734b0e4b
MM
962static enum btrace_error
963linux_read_bts (struct btrace_data_bts *btrace,
964 struct btrace_target_info *tinfo,
965 enum btrace_read_type type)
7c97f91e 966{
aadf7753 967 struct perf_event_buffer *pevent;
7c97f91e 968 const uint8_t *begin, *end, *start;
e7b01ce0
MM
969 size_t buffer_size, size;
970 __u64 data_head, data_tail;
aadf7753
MM
971 unsigned int retries = 5;
972
f4abbc16 973 pevent = &tinfo->variant.bts.bts;
7c97f91e 974
969c39fb
MM
975 /* For delta reads, we return at least the partial last block containing
976 the current PC. */
aadf7753 977 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
969c39fb 978 return BTRACE_ERR_NONE;
7c97f91e 979
aadf7753
MM
980 buffer_size = pevent->size;
981 data_tail = pevent->last_head;
7c97f91e
MM
982
983 /* We may need to retry reading the trace. See below. */
984 while (retries--)
985 {
aadf7753 986 data_head = *pevent->data_head;
7c97f91e 987
ed9edfb5 988 /* Delete any leftover trace from the previous iteration. */
734b0e4b 989 VEC_free (btrace_block_s, btrace->blocks);
ed9edfb5 990
969c39fb 991 if (type == BTRACE_READ_DELTA)
7c97f91e 992 {
e7b01ce0
MM
993 __u64 data_size;
994
969c39fb
MM
995 /* Determine the number of bytes to read and check for buffer
996 overflows. */
997
998 /* Check for data head overflows. We might be able to recover from
999 those but they are very unlikely and it's not really worth the
1000 effort, I think. */
1001 if (data_head < data_tail)
1002 return BTRACE_ERR_OVERFLOW;
1003
1004 /* If the buffer is smaller than the trace delta, we overflowed. */
e7b01ce0
MM
1005 data_size = data_head - data_tail;
1006 if (buffer_size < data_size)
969c39fb 1007 return BTRACE_ERR_OVERFLOW;
e7b01ce0
MM
1008
1009 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1010 size = (size_t) data_size;
969c39fb
MM
1011 }
1012 else
1013 {
1014 /* Read the entire buffer. */
1015 size = buffer_size;
7c97f91e 1016
969c39fb
MM
1017 /* Adjust the size if the buffer has not overflowed, yet. */
1018 if (data_head < size)
e7b01ce0 1019 size = (size_t) data_head;
7c97f91e
MM
1020 }
1021
969c39fb 1022 /* Data_head keeps growing; the buffer itself is circular. */
aadf7753 1023 begin = pevent->mem;
969c39fb
MM
1024 start = begin + data_head % buffer_size;
1025
1026 if (data_head <= buffer_size)
1027 end = start;
1028 else
aadf7753 1029 end = begin + pevent->size;
969c39fb 1030
734b0e4b 1031 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
969c39fb 1032
7c97f91e
MM
1033 /* The stopping thread notifies its ptracer before it is scheduled out.
1034 On multi-core systems, the debugger might therefore run while the
1035 kernel might be writing the last branch trace records.
1036
1037 Let's check whether the data head moved while we read the trace. */
aadf7753 1038 if (data_head == *pevent->data_head)
7c97f91e
MM
1039 break;
1040 }
1041
aadf7753 1042 pevent->last_head = data_head;
7c97f91e 1043
969c39fb
MM
1044 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1045 if we're not doing a delta read. There is no way of filling in its zeroed
1046 BEGIN element. */
734b0e4b
MM
1047 if (!VEC_empty (btrace_block_s, btrace->blocks)
1048 && type != BTRACE_READ_DELTA)
1049 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1050
1051 return BTRACE_ERR_NONE;
7c97f91e
MM
1052}
1053
b20a6524
MM
1054/* Fill in the Intel(R) Processor Trace configuration information. */
1055
1056static void
1057linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
1058{
1059 conf->cpu = btrace_this_cpu ();
1060}
1061
1062/* Read branch trace data in Intel(R) Processor Trace format for the thread
1063 given by TINFO into BTRACE using the TYPE reading method. */
1064
1065static enum btrace_error
1066linux_read_pt (struct btrace_data_pt *btrace,
1067 struct btrace_target_info *tinfo,
1068 enum btrace_read_type type)
1069{
1070 struct perf_event_buffer *pt;
1071
1072 pt = &tinfo->variant.pt.pt;
1073
1074 linux_fill_btrace_pt_config (&btrace->config);
1075
1076 switch (type)
1077 {
1078 case BTRACE_READ_DELTA:
1079 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1080 around to stay inside the aux buffer. */
1081 return BTRACE_ERR_NOT_SUPPORTED;
1082
1083 case BTRACE_READ_NEW:
1084 if (!perf_event_new_data (pt))
1085 return BTRACE_ERR_NONE;
1086
1087 /* Fall through. */
1088 case BTRACE_READ_ALL:
1089 perf_event_read_all (pt, &btrace->data, &btrace->size);
1090 return BTRACE_ERR_NONE;
1091 }
1092
1093 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
1094}
1095
734b0e4b
MM
1096/* See linux-btrace.h. */
1097
1098enum btrace_error
1099linux_read_btrace (struct btrace_data *btrace,
1100 struct btrace_target_info *tinfo,
1101 enum btrace_read_type type)
1102{
f4abbc16
MM
1103 switch (tinfo->conf.format)
1104 {
1105 case BTRACE_FORMAT_NONE:
1106 return BTRACE_ERR_NOT_SUPPORTED;
1107
1108 case BTRACE_FORMAT_BTS:
1109 /* We read btrace in BTS format. */
1110 btrace->format = BTRACE_FORMAT_BTS;
1111 btrace->variant.bts.blocks = NULL;
1112
1113 return linux_read_bts (&btrace->variant.bts, tinfo, type);
b20a6524
MM
1114
1115 case BTRACE_FORMAT_PT:
1116 /* We read btrace in Intel(R) Processor Trace format. */
1117 btrace->format = BTRACE_FORMAT_PT;
1118 btrace->variant.pt.data = NULL;
1119 btrace->variant.pt.size = 0;
1120
1121 return linux_read_pt (&btrace->variant.pt, tinfo, type);
f4abbc16
MM
1122 }
1123
1124 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1125}
1126
1127/* See linux-btrace.h. */
734b0e4b 1128
f4abbc16
MM
1129const struct btrace_config *
1130linux_btrace_conf (const struct btrace_target_info *tinfo)
1131{
1132 return &tinfo->conf;
734b0e4b
MM
1133}
1134
7c97f91e
MM
1135#else /* !HAVE_LINUX_PERF_EVENT_H */
1136
1137/* See linux-btrace.h. */
1138
1139int
043c3577 1140linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
7c97f91e
MM
1141{
1142 return 0;
1143}
1144
1145/* See linux-btrace.h. */
1146
1147struct btrace_target_info *
f4abbc16 1148linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
7c97f91e
MM
1149{
1150 return NULL;
1151}
1152
1153/* See linux-btrace.h. */
1154
969c39fb 1155enum btrace_error
7c97f91e
MM
1156linux_disable_btrace (struct btrace_target_info *tinfo)
1157{
969c39fb 1158 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
1159}
1160
1161/* See linux-btrace.h. */
1162
969c39fb 1163enum btrace_error
734b0e4b 1164linux_read_btrace (struct btrace_data *btrace,
969c39fb 1165 struct btrace_target_info *tinfo,
7c97f91e
MM
1166 enum btrace_read_type type)
1167{
969c39fb 1168 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
1169}
1170
f4abbc16
MM
1171/* See linux-btrace.h. */
1172
1173const struct btrace_config *
1174linux_btrace_conf (const struct btrace_target_info *tinfo)
1175{
1176 return NULL;
1177}
1178
7c97f91e 1179#endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.347798 seconds and 4 git commands to generate.