Make gdb.mi/user-selected-context-sync.exp use proc_with_prefix
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
CommitLineData
7c97f91e
MM
1/* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
618f726f 3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
7c97f91e
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
53f81362 22#include "common-defs.h"
7c97f91e 23#include "linux-btrace.h"
361c8ade 24#include "common-regcache.h"
be8b1ea6 25#include "gdb_wait.h"
df7e5265 26#include "x86-cpuid.h"
0568462b
MM
27#include "filestuff.h"
28
29#include <inttypes.h>
7c97f91e 30
5b4e221c
MF
31#ifdef HAVE_SYS_SYSCALL_H
32#include <sys/syscall.h>
33#endif
34
35#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
7c97f91e 36#include <unistd.h>
7c97f91e
MM
37#include <sys/mman.h>
38#include <sys/user.h>
5826e159 39#include "nat/gdb_ptrace.h"
a950d57c 40#include <sys/types.h>
a950d57c 41#include <signal.h>
7c97f91e
MM
42
43/* A branch trace record in perf_event. */
44struct perf_event_bts
45{
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51};
52
53/* A perf_event branch trace sample. */
54struct perf_event_sample
55{
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61};
62
afb778a2
MM
63/* Identify the cpu we're running on. */
64static struct btrace_cpu
65btrace_this_cpu (void)
66{
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 cpu.model = (cpuid >> 4) & 0xf;
88
89 if (cpu.family == 0x6)
90 cpu.model += (cpuid >> 12) & 0xf0;
91 }
92 }
93 }
94
95 return cpu;
96}
97
aadf7753 98/* Return non-zero if there is new data in PEVENT; zero otherwise. */
7c97f91e 99
aadf7753
MM
100static int
101perf_event_new_data (const struct perf_event_buffer *pev)
7c97f91e 102{
aadf7753 103 return *pev->data_head != pev->last_head;
7c97f91e
MM
104}
105
b20a6524
MM
106/* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
109
110static gdb_byte *
e7b01ce0
MM
111perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
112 size_t size)
b20a6524
MM
113{
114 const gdb_byte *begin, *end, *start, *stop;
115 gdb_byte *buffer;
e7b01ce0
MM
116 size_t buffer_size;
117 __u64 data_tail;
b20a6524
MM
118
119 if (size == 0)
120 return NULL;
121
122 gdb_assert (size <= data_head);
123 data_tail = data_head - size;
124
125 buffer_size = pev->size;
126 begin = pev->mem;
127 start = begin + data_tail % buffer_size;
128 stop = begin + data_head % buffer_size;
129
224c3ddb 130 buffer = (gdb_byte *) xmalloc (size);
b20a6524
MM
131
132 if (start < stop)
133 memcpy (buffer, start, stop - start);
134 else
135 {
136 end = begin + buffer_size;
137
138 memcpy (buffer, start, end - start);
139 memcpy (buffer + (end - start), begin, stop - begin);
140 }
141
142 return buffer;
143}
144
145/* Copy the perf event buffer data from PEV.
146 Store a pointer to the copy into DATA and its size in SIZE. */
147
148static void
149perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
e7b01ce0 150 size_t *psize)
b20a6524 151{
e7b01ce0
MM
152 size_t size;
153 __u64 data_head;
b20a6524
MM
154
155 data_head = *pev->data_head;
156
157 size = pev->size;
158 if (data_head < size)
e7b01ce0 159 size = (size_t) data_head;
b20a6524
MM
160
161 *data = perf_event_read (pev, data_head, size);
162 *psize = size;
163
164 pev->last_head = data_head;
165}
166
167/* Determine the event type.
168 Returns zero on success and fills in TYPE; returns -1 otherwise. */
169
170static int
171perf_event_pt_event_type (int *type)
172{
173 FILE *file;
174 int found;
175
176 file = fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
177 if (file == NULL)
178 return -1;
179
180 found = fscanf (file, "%d", type);
181
182 fclose (file);
183
184 if (found == 1)
185 return 0;
186 return -1;
187}
188
0568462b
MM
189/* Try to determine the start address of the Linux kernel. */
190
191static uint64_t
192linux_determine_kernel_start (void)
d68e53f4 193{
0568462b
MM
194 static uint64_t kernel_start;
195 static int cached;
196 FILE *file;
d68e53f4 197
0568462b
MM
198 if (cached != 0)
199 return kernel_start;
d68e53f4 200
0568462b
MM
201 cached = 1;
202
203 file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
204 if (file == NULL)
205 return kernel_start;
206
207 while (!feof (file))
208 {
209 char buffer[1024], symbol[8], *line;
210 uint64_t addr;
211 int match;
212
213 line = fgets (buffer, sizeof (buffer), file);
214 if (line == NULL)
215 break;
d68e53f4 216
0568462b
MM
217 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
218 if (match != 2)
219 continue;
d68e53f4 220
0568462b
MM
221 if (strcmp (symbol, "_text") == 0)
222 {
223 kernel_start = addr;
224 break;
225 }
226 }
227
228 fclose (file);
229
230 return kernel_start;
d68e53f4
MM
231}
232
7c97f91e
MM
233/* Check whether an address is in the kernel. */
234
235static inline int
0568462b 236perf_event_is_kernel_addr (uint64_t addr)
7c97f91e 237{
0568462b 238 uint64_t kernel_start;
7c97f91e 239
0568462b
MM
240 kernel_start = linux_determine_kernel_start ();
241 if (kernel_start != 0ull)
242 return (addr >= kernel_start);
7c97f91e 243
0568462b
MM
244 /* If we don't know the kernel's start address, let's check the most
245 significant bit. This will work at least for 64-bit kernels. */
246 return ((addr & (1ull << 63)) != 0);
7c97f91e
MM
247}
248
249/* Check whether a perf event record should be skipped. */
250
251static inline int
0568462b 252perf_event_skip_bts_record (const struct perf_event_bts *bts)
7c97f91e
MM
253{
254 /* The hardware may report branches from kernel into user space. Branches
255 from user into kernel space will be suppressed. We filter the former to
256 provide a consistent branch trace excluding kernel. */
0568462b 257 return perf_event_is_kernel_addr (bts->from);
7c97f91e
MM
258}
259
260/* Perform a few consistency checks on a perf event sample record. This is
261 meant to catch cases when we get out of sync with the perf event stream. */
262
263static inline int
264perf_event_sample_ok (const struct perf_event_sample *sample)
265{
266 if (sample->header.type != PERF_RECORD_SAMPLE)
267 return 0;
268
269 if (sample->header.size != sizeof (*sample))
270 return 0;
271
272 return 1;
273}
274
275/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
276 and to addresses (plus a header).
277
278 Start points into that buffer at the next sample position.
279 We read the collected samples backwards from start.
280
281 While reading the samples, we convert the information into a list of blocks.
282 For two adjacent samples s1 and s2, we form a block b such that b.begin =
283 s1.to and b.end = s2.from.
284
285 In case the buffer overflows during sampling, one sample may have its lower
286 part at the end and its upper part at the beginning of the buffer. */
287
288static VEC (btrace_block_s) *
289perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
e7b01ce0 290 const uint8_t *end, const uint8_t *start, size_t size)
7c97f91e
MM
291{
292 VEC (btrace_block_s) *btrace = NULL;
293 struct perf_event_sample sample;
e7b01ce0 294 size_t read = 0;
7c97f91e
MM
295 struct btrace_block block = { 0, 0 };
296 struct regcache *regcache;
297
298 gdb_assert (begin <= start);
299 gdb_assert (start <= end);
300
301 /* The first block ends at the current pc. */
361c8ade 302 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
7c97f91e
MM
303 block.end = regcache_read_pc (regcache);
304
305 /* The buffer may contain a partial record as its last entry (i.e. when the
306 buffer size is not a multiple of the sample size). */
307 read = sizeof (sample) - 1;
308
309 for (; read < size; read += sizeof (sample))
310 {
311 const struct perf_event_sample *psample;
312
313 /* Find the next perf_event sample in a backwards traversal. */
314 start -= sizeof (sample);
315
316 /* If we're still inside the buffer, we're done. */
317 if (begin <= start)
318 psample = (const struct perf_event_sample *) start;
319 else
320 {
321 int missing;
322
323 /* We're to the left of the ring buffer, we will wrap around and
324 reappear at the very right of the ring buffer. */
325
326 missing = (begin - start);
327 start = (end - missing);
328
329 /* If the entire sample is missing, we're done. */
330 if (missing == sizeof (sample))
331 psample = (const struct perf_event_sample *) start;
332 else
333 {
334 uint8_t *stack;
335
336 /* The sample wrapped around. The lower part is at the end and
337 the upper part is at the beginning of the buffer. */
338 stack = (uint8_t *) &sample;
339
340 /* Copy the two parts so we have a contiguous sample. */
341 memcpy (stack, start, missing);
342 memcpy (stack + missing, begin, sizeof (sample) - missing);
343
344 psample = &sample;
345 }
346 }
347
348 if (!perf_event_sample_ok (psample))
349 {
350 warning (_("Branch trace may be incomplete."));
351 break;
352 }
353
0568462b 354 if (perf_event_skip_bts_record (&psample->bts))
7c97f91e
MM
355 continue;
356
357 /* We found a valid sample, so we can complete the current block. */
358 block.begin = psample->bts.to;
359
360 VEC_safe_push (btrace_block_s, btrace, &block);
361
362 /* Start the next block. */
363 block.end = psample->bts.from;
364 }
365
969c39fb
MM
366 /* Push the last block (i.e. the first one of inferior execution), as well.
367 We don't know where it ends, but we know where it starts. If we're
368 reading delta trace, we can fill in the start address later on.
369 Otherwise we will prune it. */
370 block.begin = 0;
371 VEC_safe_push (btrace_block_s, btrace, &block);
372
7c97f91e
MM
373 return btrace;
374}
375
043c3577 376/* Check whether the kernel supports BTS. */
a950d57c
MM
377
378static int
043c3577 379kernel_supports_bts (void)
a950d57c
MM
380{
381 struct perf_event_attr attr;
382 pid_t child, pid;
383 int status, file;
384
385 errno = 0;
386 child = fork ();
387 switch (child)
388 {
389 case -1:
76fb6829 390 warning (_("test bts: cannot fork: %s."), safe_strerror (errno));
a950d57c
MM
391 return 0;
392
393 case 0:
394 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
395 if (status != 0)
396 {
043c3577 397 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
76fb6829 398 safe_strerror (errno));
a950d57c
MM
399 _exit (1);
400 }
401
402 status = raise (SIGTRAP);
403 if (status != 0)
404 {
043c3577 405 warning (_("test bts: cannot raise SIGTRAP: %s."),
76fb6829 406 safe_strerror (errno));
a950d57c
MM
407 _exit (1);
408 }
409
410 _exit (1);
411
412 default:
413 pid = waitpid (child, &status, 0);
414 if (pid != child)
415 {
043c3577 416 warning (_("test bts: bad pid %ld, error: %s."),
76fb6829 417 (long) pid, safe_strerror (errno));
a950d57c
MM
418 return 0;
419 }
420
421 if (!WIFSTOPPED (status))
422 {
043c3577 423 warning (_("test bts: expected stop. status: %d."),
a950d57c
MM
424 status);
425 return 0;
426 }
427
428 memset (&attr, 0, sizeof (attr));
429
430 attr.type = PERF_TYPE_HARDWARE;
431 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
432 attr.sample_period = 1;
433 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
434 attr.exclude_kernel = 1;
435 attr.exclude_hv = 1;
436 attr.exclude_idle = 1;
437
438 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
439 if (file >= 0)
440 close (file);
441
442 kill (child, SIGKILL);
443 ptrace (PTRACE_KILL, child, NULL, NULL);
444
445 pid = waitpid (child, &status, 0);
446 if (pid != child)
447 {
043c3577 448 warning (_("test bts: bad pid %ld, error: %s."),
76fb6829 449 (long) pid, safe_strerror (errno));
a950d57c 450 if (!WIFSIGNALED (status))
043c3577 451 warning (_("test bts: expected killed. status: %d."),
a950d57c
MM
452 status);
453 }
454
455 return (file >= 0);
456 }
457}
458
bc504a31 459/* Check whether the kernel supports Intel Processor Trace. */
b20a6524
MM
460
461static int
462kernel_supports_pt (void)
463{
464 struct perf_event_attr attr;
465 pid_t child, pid;
466 int status, file, type;
467
468 errno = 0;
469 child = fork ();
470 switch (child)
471 {
472 case -1:
76fb6829 473 warning (_("test pt: cannot fork: %s."), safe_strerror (errno));
b20a6524
MM
474 return 0;
475
476 case 0:
477 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
478 if (status != 0)
479 {
480 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
76fb6829 481 safe_strerror (errno));
b20a6524
MM
482 _exit (1);
483 }
484
485 status = raise (SIGTRAP);
486 if (status != 0)
487 {
488 warning (_("test pt: cannot raise SIGTRAP: %s."),
76fb6829 489 safe_strerror (errno));
b20a6524
MM
490 _exit (1);
491 }
492
493 _exit (1);
494
495 default:
496 pid = waitpid (child, &status, 0);
497 if (pid != child)
498 {
499 warning (_("test pt: bad pid %ld, error: %s."),
76fb6829 500 (long) pid, safe_strerror (errno));
b20a6524
MM
501 return 0;
502 }
503
504 if (!WIFSTOPPED (status))
505 {
506 warning (_("test pt: expected stop. status: %d."),
507 status);
508 return 0;
509 }
510
511 status = perf_event_pt_event_type (&type);
512 if (status != 0)
513 file = -1;
514 else
515 {
516 memset (&attr, 0, sizeof (attr));
517
518 attr.size = sizeof (attr);
519 attr.type = type;
520 attr.exclude_kernel = 1;
521 attr.exclude_hv = 1;
522 attr.exclude_idle = 1;
523
524 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
525 if (file >= 0)
526 close (file);
527 }
528
529 kill (child, SIGKILL);
530 ptrace (PTRACE_KILL, child, NULL, NULL);
531
532 pid = waitpid (child, &status, 0);
533 if (pid != child)
534 {
535 warning (_("test pt: bad pid %ld, error: %s."),
76fb6829 536 (long) pid, safe_strerror (errno));
b20a6524
MM
537 if (!WIFSIGNALED (status))
538 warning (_("test pt: expected killed. status: %d."),
539 status);
540 }
541
542 return (file >= 0);
543 }
544}
545
043c3577 546/* Check whether an Intel cpu supports BTS. */
a950d57c
MM
547
548static int
afb778a2 549intel_supports_bts (const struct btrace_cpu *cpu)
a950d57c 550{
afb778a2 551 switch (cpu->family)
5f8e0b8f
MF
552 {
553 case 0x6:
afb778a2 554 switch (cpu->model)
5f8e0b8f
MF
555 {
556 case 0x1a: /* Nehalem */
557 case 0x1f:
558 case 0x1e:
559 case 0x2e:
560 case 0x25: /* Westmere */
561 case 0x2c:
562 case 0x2f:
563 case 0x2a: /* Sandy Bridge */
564 case 0x2d:
565 case 0x3a: /* Ivy Bridge */
566
567 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
568 "from" information afer an EIST transition, T-states, C1E, or
569 Adaptive Thermal Throttling. */
570 return 0;
571 }
572 }
a950d57c
MM
573
574 return 1;
a950d57c
MM
575}
576
043c3577 577/* Check whether the cpu supports BTS. */
a950d57c
MM
578
579static int
043c3577 580cpu_supports_bts (void)
a950d57c 581{
afb778a2 582 struct btrace_cpu cpu;
a950d57c 583
afb778a2
MM
584 cpu = btrace_this_cpu ();
585 switch (cpu.vendor)
586 {
587 default:
588 /* Don't know about others. Let's assume they do. */
589 return 1;
a950d57c 590
afb778a2
MM
591 case CV_INTEL:
592 return intel_supports_bts (&cpu);
593 }
a950d57c
MM
594}
595
043c3577 596/* Check whether the linux target supports BTS. */
7c97f91e 597
043c3577
MM
598static int
599linux_supports_bts (void)
7c97f91e 600{
a950d57c
MM
601 static int cached;
602
603 if (cached == 0)
604 {
043c3577 605 if (!kernel_supports_bts ())
a950d57c 606 cached = -1;
043c3577 607 else if (!cpu_supports_bts ())
a950d57c
MM
608 cached = -1;
609 else
610 cached = 1;
611 }
612
613 return cached > 0;
7c97f91e
MM
614}
615
bc504a31 616/* Check whether the linux target supports Intel Processor Trace. */
b20a6524
MM
617
618static int
619linux_supports_pt (void)
620{
621 static int cached;
622
623 if (cached == 0)
624 {
625 if (!kernel_supports_pt ())
626 cached = -1;
627 else
628 cached = 1;
629 }
630
631 return cached > 0;
632}
633
7c97f91e
MM
634/* See linux-btrace.h. */
635
043c3577
MM
636int
637linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
638{
639 switch (format)
640 {
641 case BTRACE_FORMAT_NONE:
642 return 0;
643
644 case BTRACE_FORMAT_BTS:
645 return linux_supports_bts ();
b20a6524
MM
646
647 case BTRACE_FORMAT_PT:
648 return linux_supports_pt ();
043c3577
MM
649 }
650
651 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
652}
653
f4abbc16 654/* Enable branch tracing in BTS format. */
043c3577 655
f4abbc16 656static struct btrace_target_info *
d33501a5 657linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
7c97f91e 658{
aadf7753 659 struct perf_event_mmap_page *header;
7c97f91e 660 struct btrace_target_info *tinfo;
f4abbc16 661 struct btrace_tinfo_bts *bts;
e7b01ce0
MM
662 size_t size, pages;
663 __u64 data_offset;
d0fa7535 664 int pid, pg;
7c97f91e 665
8d749320 666 tinfo = XCNEW (struct btrace_target_info);
7c97f91e
MM
667 tinfo->ptid = ptid;
668
f4abbc16
MM
669 tinfo->conf.format = BTRACE_FORMAT_BTS;
670 bts = &tinfo->variant.bts;
7c97f91e 671
f4abbc16
MM
672 bts->attr.size = sizeof (bts->attr);
673 bts->attr.type = PERF_TYPE_HARDWARE;
674 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
675 bts->attr.sample_period = 1;
7c97f91e 676
f4abbc16
MM
677 /* We sample from and to address. */
678 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
7c97f91e 679
f4abbc16
MM
680 bts->attr.exclude_kernel = 1;
681 bts->attr.exclude_hv = 1;
682 bts->attr.exclude_idle = 1;
7c97f91e
MM
683
684 pid = ptid_get_lwp (ptid);
685 if (pid == 0)
686 pid = ptid_get_pid (ptid);
687
688 errno = 0;
f4abbc16
MM
689 bts->file = syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0);
690 if (bts->file < 0)
b20a6524 691 goto err_out;
7c97f91e 692
d33501a5 693 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
694 pages = ((size_t) conf->size / PAGE_SIZE
695 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
d33501a5
MM
696 /* We need at least one page. */
697 if (pages == 0)
698 pages = 1;
699
700 /* The buffer size can be requested in powers of two pages. Adjust PAGES
701 to the next power of two. */
e7b01ce0
MM
702 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
703 if ((pages & ((size_t) 1 << pg)) != 0)
704 pages += ((size_t) 1 << pg);
d33501a5
MM
705
706 /* We try to allocate the requested size.
707 If that fails, try to get as much as we can. */
708 for (; pages > 0; pages >>= 1)
d0fa7535 709 {
d33501a5 710 size_t length;
e7b01ce0 711 __u64 data_size;
d33501a5 712
e7b01ce0
MM
713 data_size = (__u64) pages * PAGE_SIZE;
714
715 /* Don't ask for more than we can represent in the configuration. */
716 if ((__u64) UINT_MAX < data_size)
717 continue;
718
719 size = (size_t) data_size;
d33501a5
MM
720 length = size + PAGE_SIZE;
721
722 /* Check for overflows. */
e7b01ce0 723 if ((__u64) length != data_size + PAGE_SIZE)
d33501a5
MM
724 continue;
725
d0fa7535 726 /* The number of pages we request needs to be a power of two. */
224c3ddb
SM
727 header = ((struct perf_event_mmap_page *)
728 mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0));
aadf7753
MM
729 if (header != MAP_FAILED)
730 break;
d0fa7535 731 }
7c97f91e 732
010a18a1 733 if (pages == 0)
aadf7753
MM
734 goto err_file;
735
010a18a1 736 data_offset = PAGE_SIZE;
010a18a1
MM
737
738#if defined (PERF_ATTR_SIZE_VER5)
739 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
740 {
e7b01ce0
MM
741 __u64 data_size;
742
010a18a1
MM
743 data_offset = header->data_offset;
744 data_size = header->data_size;
e7b01ce0
MM
745
746 size = (unsigned int) data_size;
747
748 /* Check for overflows. */
749 if ((__u64) size != data_size)
750 {
751 munmap ((void *) header, size + PAGE_SIZE);
752 goto err_file;
753 }
010a18a1
MM
754 }
755#endif /* defined (PERF_ATTR_SIZE_VER5) */
756
f4abbc16 757 bts->header = header;
010a18a1 758 bts->bts.mem = ((const uint8_t *) header) + data_offset;
e7b01ce0 759 bts->bts.size = size;
f4abbc16 760 bts->bts.data_head = &header->data_head;
e7b01ce0 761 bts->bts.last_head = 0ull;
aadf7753 762
e7b01ce0 763 tinfo->conf.bts.size = (unsigned int) size;
aadf7753
MM
764 return tinfo;
765
766 err_file:
d0fa7535 767 /* We were not able to allocate any buffer. */
f4abbc16 768 close (bts->file);
7c97f91e 769
b20a6524
MM
770 err_out:
771 xfree (tinfo);
772 return NULL;
773}
774
775#if defined (PERF_ATTR_SIZE_VER5)
776
bc504a31 777/* Enable branch tracing in Intel Processor Trace format. */
b20a6524
MM
778
779static struct btrace_target_info *
780linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
781{
782 struct perf_event_mmap_page *header;
783 struct btrace_target_info *tinfo;
784 struct btrace_tinfo_pt *pt;
e7b01ce0 785 size_t pages, size;
b20a6524
MM
786 int pid, pg, errcode, type;
787
788 if (conf->size == 0)
789 return NULL;
790
791 errcode = perf_event_pt_event_type (&type);
792 if (errcode != 0)
793 return NULL;
794
795 pid = ptid_get_lwp (ptid);
796 if (pid == 0)
797 pid = ptid_get_pid (ptid);
798
8d749320 799 tinfo = XCNEW (struct btrace_target_info);
b20a6524 800 tinfo->ptid = ptid;
b20a6524
MM
801
802 tinfo->conf.format = BTRACE_FORMAT_PT;
803 pt = &tinfo->variant.pt;
804
805 pt->attr.size = sizeof (pt->attr);
806 pt->attr.type = type;
807
808 pt->attr.exclude_kernel = 1;
809 pt->attr.exclude_hv = 1;
810 pt->attr.exclude_idle = 1;
811
812 errno = 0;
813 pt->file = syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0);
814 if (pt->file < 0)
815 goto err;
816
817 /* Allocate the configuration page. */
a55515ee
SM
818 header = ((struct perf_event_mmap_page *)
819 mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
820 pt->file, 0));
b20a6524
MM
821 if (header == MAP_FAILED)
822 goto err_file;
823
824 header->aux_offset = header->data_offset + header->data_size;
825
826 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
827 pages = ((size_t) conf->size / PAGE_SIZE
828 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
b20a6524
MM
829 /* We need at least one page. */
830 if (pages == 0)
831 pages = 1;
832
833 /* The buffer size can be requested in powers of two pages. Adjust PAGES
834 to the next power of two. */
e7b01ce0
MM
835 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
836 if ((pages & ((size_t) 1 << pg)) != 0)
837 pages += ((size_t) 1 << pg);
b20a6524
MM
838
839 /* We try to allocate the requested size.
840 If that fails, try to get as much as we can. */
841 for (; pages > 0; pages >>= 1)
842 {
843 size_t length;
e7b01ce0 844 __u64 data_size;
b20a6524 845
e7b01ce0
MM
846 data_size = (__u64) pages * PAGE_SIZE;
847
848 /* Don't ask for more than we can represent in the configuration. */
849 if ((__u64) UINT_MAX < data_size)
850 continue;
851
852 size = (size_t) data_size;
b20a6524
MM
853
854 /* Check for overflows. */
e7b01ce0 855 if ((__u64) size != data_size)
b20a6524
MM
856 continue;
857
e7b01ce0
MM
858 header->aux_size = data_size;
859 length = size;
b20a6524 860
a55515ee
SM
861 pt->pt.mem = ((const uint8_t *)
862 mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
863 header->aux_offset));
b20a6524
MM
864 if (pt->pt.mem != MAP_FAILED)
865 break;
866 }
867
868 if (pages == 0)
869 goto err_conf;
870
871 pt->header = header;
872 pt->pt.size = size;
873 pt->pt.data_head = &header->aux_head;
874
e7b01ce0 875 tinfo->conf.pt.size = (unsigned int) size;
b20a6524
MM
876 return tinfo;
877
878 err_conf:
879 munmap((void *) header, PAGE_SIZE);
880
881 err_file:
882 close (pt->file);
883
7c97f91e
MM
884 err:
885 xfree (tinfo);
886 return NULL;
887}
888
b20a6524
MM
889#else /* !defined (PERF_ATTR_SIZE_VER5) */
890
891static struct btrace_target_info *
892linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
893{
894 errno = EOPNOTSUPP;
895 return NULL;
896}
897
898#endif /* !defined (PERF_ATTR_SIZE_VER5) */
899
7c97f91e
MM
900/* See linux-btrace.h. */
901
f4abbc16
MM
902struct btrace_target_info *
903linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
904{
905 struct btrace_target_info *tinfo;
906
907 tinfo = NULL;
908 switch (conf->format)
909 {
910 case BTRACE_FORMAT_NONE:
911 break;
912
913 case BTRACE_FORMAT_BTS:
d33501a5 914 tinfo = linux_enable_bts (ptid, &conf->bts);
f4abbc16 915 break;
b20a6524
MM
916
917 case BTRACE_FORMAT_PT:
918 tinfo = linux_enable_pt (ptid, &conf->pt);
919 break;
f4abbc16
MM
920 }
921
922 return tinfo;
923}
924
925/* Disable BTS tracing. */
926
927static enum btrace_error
928linux_disable_bts (struct btrace_tinfo_bts *tinfo)
7c97f91e 929{
aadf7753 930 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
7c97f91e 931 close (tinfo->file);
7c97f91e 932
969c39fb 933 return BTRACE_ERR_NONE;
7c97f91e
MM
934}
935
bc504a31 936/* Disable Intel Processor Trace tracing. */
b20a6524
MM
937
938static enum btrace_error
939linux_disable_pt (struct btrace_tinfo_pt *tinfo)
940{
941 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
942 munmap((void *) tinfo->header, PAGE_SIZE);
943 close (tinfo->file);
944
945 return BTRACE_ERR_NONE;
946}
947
f4abbc16
MM
948/* See linux-btrace.h. */
949
950enum btrace_error
951linux_disable_btrace (struct btrace_target_info *tinfo)
952{
953 enum btrace_error errcode;
954
955 errcode = BTRACE_ERR_NOT_SUPPORTED;
956 switch (tinfo->conf.format)
957 {
958 case BTRACE_FORMAT_NONE:
959 break;
960
961 case BTRACE_FORMAT_BTS:
962 errcode = linux_disable_bts (&tinfo->variant.bts);
963 break;
b20a6524
MM
964
965 case BTRACE_FORMAT_PT:
966 errcode = linux_disable_pt (&tinfo->variant.pt);
967 break;
f4abbc16
MM
968 }
969
970 if (errcode == BTRACE_ERR_NONE)
971 xfree (tinfo);
972
973 return errcode;
974}
975
734b0e4b
MM
976/* Read branch trace data in BTS format for the thread given by TINFO into
977 BTRACE using the TYPE reading method. */
7c97f91e 978
734b0e4b
MM
979static enum btrace_error
980linux_read_bts (struct btrace_data_bts *btrace,
981 struct btrace_target_info *tinfo,
982 enum btrace_read_type type)
7c97f91e 983{
aadf7753 984 struct perf_event_buffer *pevent;
7c97f91e 985 const uint8_t *begin, *end, *start;
e7b01ce0
MM
986 size_t buffer_size, size;
987 __u64 data_head, data_tail;
aadf7753
MM
988 unsigned int retries = 5;
989
f4abbc16 990 pevent = &tinfo->variant.bts.bts;
7c97f91e 991
969c39fb
MM
992 /* For delta reads, we return at least the partial last block containing
993 the current PC. */
aadf7753 994 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
969c39fb 995 return BTRACE_ERR_NONE;
7c97f91e 996
aadf7753
MM
997 buffer_size = pevent->size;
998 data_tail = pevent->last_head;
7c97f91e
MM
999
1000 /* We may need to retry reading the trace. See below. */
1001 while (retries--)
1002 {
aadf7753 1003 data_head = *pevent->data_head;
7c97f91e 1004
ed9edfb5 1005 /* Delete any leftover trace from the previous iteration. */
734b0e4b 1006 VEC_free (btrace_block_s, btrace->blocks);
ed9edfb5 1007
969c39fb 1008 if (type == BTRACE_READ_DELTA)
7c97f91e 1009 {
e7b01ce0
MM
1010 __u64 data_size;
1011
969c39fb
MM
1012 /* Determine the number of bytes to read and check for buffer
1013 overflows. */
1014
1015 /* Check for data head overflows. We might be able to recover from
1016 those but they are very unlikely and it's not really worth the
1017 effort, I think. */
1018 if (data_head < data_tail)
1019 return BTRACE_ERR_OVERFLOW;
1020
1021 /* If the buffer is smaller than the trace delta, we overflowed. */
e7b01ce0
MM
1022 data_size = data_head - data_tail;
1023 if (buffer_size < data_size)
969c39fb 1024 return BTRACE_ERR_OVERFLOW;
e7b01ce0
MM
1025
1026 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1027 size = (size_t) data_size;
969c39fb
MM
1028 }
1029 else
1030 {
1031 /* Read the entire buffer. */
1032 size = buffer_size;
7c97f91e 1033
969c39fb
MM
1034 /* Adjust the size if the buffer has not overflowed, yet. */
1035 if (data_head < size)
e7b01ce0 1036 size = (size_t) data_head;
7c97f91e
MM
1037 }
1038
969c39fb 1039 /* Data_head keeps growing; the buffer itself is circular. */
aadf7753 1040 begin = pevent->mem;
969c39fb
MM
1041 start = begin + data_head % buffer_size;
1042
1043 if (data_head <= buffer_size)
1044 end = start;
1045 else
aadf7753 1046 end = begin + pevent->size;
969c39fb 1047
734b0e4b 1048 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
969c39fb 1049
7c97f91e
MM
1050 /* The stopping thread notifies its ptracer before it is scheduled out.
1051 On multi-core systems, the debugger might therefore run while the
1052 kernel might be writing the last branch trace records.
1053
1054 Let's check whether the data head moved while we read the trace. */
aadf7753 1055 if (data_head == *pevent->data_head)
7c97f91e
MM
1056 break;
1057 }
1058
aadf7753 1059 pevent->last_head = data_head;
7c97f91e 1060
969c39fb
MM
1061 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1062 if we're not doing a delta read. There is no way of filling in its zeroed
1063 BEGIN element. */
734b0e4b
MM
1064 if (!VEC_empty (btrace_block_s, btrace->blocks)
1065 && type != BTRACE_READ_DELTA)
1066 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1067
1068 return BTRACE_ERR_NONE;
7c97f91e
MM
1069}
1070
bc504a31 1071/* Fill in the Intel Processor Trace configuration information. */
b20a6524
MM
1072
1073static void
1074linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
1075{
1076 conf->cpu = btrace_this_cpu ();
1077}
1078
bc504a31 1079/* Read branch trace data in Intel Processor Trace format for the thread
b20a6524
MM
1080 given by TINFO into BTRACE using the TYPE reading method. */
1081
1082static enum btrace_error
1083linux_read_pt (struct btrace_data_pt *btrace,
1084 struct btrace_target_info *tinfo,
1085 enum btrace_read_type type)
1086{
1087 struct perf_event_buffer *pt;
1088
1089 pt = &tinfo->variant.pt.pt;
1090
1091 linux_fill_btrace_pt_config (&btrace->config);
1092
1093 switch (type)
1094 {
1095 case BTRACE_READ_DELTA:
1096 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1097 around to stay inside the aux buffer. */
1098 return BTRACE_ERR_NOT_SUPPORTED;
1099
1100 case BTRACE_READ_NEW:
1101 if (!perf_event_new_data (pt))
1102 return BTRACE_ERR_NONE;
1103
1104 /* Fall through. */
1105 case BTRACE_READ_ALL:
1106 perf_event_read_all (pt, &btrace->data, &btrace->size);
1107 return BTRACE_ERR_NONE;
1108 }
1109
1110 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
1111}
1112
734b0e4b
MM
1113/* See linux-btrace.h. */
1114
1115enum btrace_error
1116linux_read_btrace (struct btrace_data *btrace,
1117 struct btrace_target_info *tinfo,
1118 enum btrace_read_type type)
1119{
f4abbc16
MM
1120 switch (tinfo->conf.format)
1121 {
1122 case BTRACE_FORMAT_NONE:
1123 return BTRACE_ERR_NOT_SUPPORTED;
1124
1125 case BTRACE_FORMAT_BTS:
1126 /* We read btrace in BTS format. */
1127 btrace->format = BTRACE_FORMAT_BTS;
1128 btrace->variant.bts.blocks = NULL;
1129
1130 return linux_read_bts (&btrace->variant.bts, tinfo, type);
b20a6524
MM
1131
1132 case BTRACE_FORMAT_PT:
bc504a31 1133 /* We read btrace in Intel Processor Trace format. */
b20a6524
MM
1134 btrace->format = BTRACE_FORMAT_PT;
1135 btrace->variant.pt.data = NULL;
1136 btrace->variant.pt.size = 0;
1137
1138 return linux_read_pt (&btrace->variant.pt, tinfo, type);
f4abbc16
MM
1139 }
1140
1141 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1142}
1143
1144/* See linux-btrace.h. */
734b0e4b 1145
f4abbc16
MM
1146const struct btrace_config *
1147linux_btrace_conf (const struct btrace_target_info *tinfo)
1148{
1149 return &tinfo->conf;
734b0e4b
MM
1150}
1151
7c97f91e
MM
1152#else /* !HAVE_LINUX_PERF_EVENT_H */
1153
1154/* See linux-btrace.h. */
1155
1156int
043c3577 1157linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
7c97f91e
MM
1158{
1159 return 0;
1160}
1161
1162/* See linux-btrace.h. */
1163
1164struct btrace_target_info *
f4abbc16 1165linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
7c97f91e
MM
1166{
1167 return NULL;
1168}
1169
1170/* See linux-btrace.h. */
1171
969c39fb 1172enum btrace_error
7c97f91e
MM
1173linux_disable_btrace (struct btrace_target_info *tinfo)
1174{
969c39fb 1175 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
1176}
1177
1178/* See linux-btrace.h. */
1179
969c39fb 1180enum btrace_error
734b0e4b 1181linux_read_btrace (struct btrace_data *btrace,
969c39fb 1182 struct btrace_target_info *tinfo,
7c97f91e
MM
1183 enum btrace_read_type type)
1184{
969c39fb 1185 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
1186}
1187
f4abbc16
MM
1188/* See linux-btrace.h. */
1189
1190const struct btrace_config *
1191linux_btrace_conf (const struct btrace_target_info *tinfo)
1192{
1193 return NULL;
1194}
1195
7c97f91e 1196#endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.351323 seconds and 4 git commands to generate.