btrace: prepare for throwing exceptions when enabling btrace
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
CommitLineData
7c97f91e
MM
1/* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
e2882c85 3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
7c97f91e
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
53f81362 22#include "common-defs.h"
7c97f91e 23#include "linux-btrace.h"
361c8ade 24#include "common-regcache.h"
be8b1ea6 25#include "gdb_wait.h"
df7e5265 26#include "x86-cpuid.h"
0568462b 27#include "filestuff.h"
5c3284c1
MM
28#include "common/scoped_fd.h"
29#include "common/scoped_mmap.h"
0568462b
MM
30
31#include <inttypes.h>
7c97f91e 32
5b4e221c 33#include <sys/syscall.h>
5b4e221c
MF
34
35#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
7c97f91e 36#include <unistd.h>
7c97f91e
MM
37#include <sys/mman.h>
38#include <sys/user.h>
5826e159 39#include "nat/gdb_ptrace.h"
a950d57c 40#include <sys/types.h>
a950d57c 41#include <signal.h>
7c97f91e
MM
42
43/* A branch trace record in perf_event. */
44struct perf_event_bts
45{
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51};
52
53/* A perf_event branch trace sample. */
54struct perf_event_sample
55{
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61};
62
afb778a2
MM
63/* Identify the cpu we're running on. */
64static struct btrace_cpu
65btrace_this_cpu (void)
66{
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 cpu.model = (cpuid >> 4) & 0xf;
88
89 if (cpu.family == 0x6)
90 cpu.model += (cpuid >> 12) & 0xf0;
91 }
92 }
93 }
94
95 return cpu;
96}
97
aadf7753 98/* Return non-zero if there is new data in PEVENT; zero otherwise. */
7c97f91e 99
aadf7753
MM
100static int
101perf_event_new_data (const struct perf_event_buffer *pev)
7c97f91e 102{
aadf7753 103 return *pev->data_head != pev->last_head;
7c97f91e
MM
104}
105
b20a6524
MM
106/* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
109
110static gdb_byte *
e7b01ce0
MM
111perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
112 size_t size)
b20a6524
MM
113{
114 const gdb_byte *begin, *end, *start, *stop;
115 gdb_byte *buffer;
e7b01ce0
MM
116 size_t buffer_size;
117 __u64 data_tail;
b20a6524
MM
118
119 if (size == 0)
120 return NULL;
121
db58b373
MM
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size = pev->size;
124 gdb_assert (size <= buffer_size);
125
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
129 don't underflow.
130
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head < size)
135 data_head += buffer_size;
136
b20a6524
MM
137 gdb_assert (size <= data_head);
138 data_tail = data_head - size;
139
b20a6524
MM
140 begin = pev->mem;
141 start = begin + data_tail % buffer_size;
142 stop = begin + data_head % buffer_size;
143
224c3ddb 144 buffer = (gdb_byte *) xmalloc (size);
b20a6524
MM
145
146 if (start < stop)
147 memcpy (buffer, start, stop - start);
148 else
149 {
150 end = begin + buffer_size;
151
152 memcpy (buffer, start, end - start);
153 memcpy (buffer + (end - start), begin, stop - begin);
154 }
155
156 return buffer;
157}
158
159/* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
161
162static void
163perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
e7b01ce0 164 size_t *psize)
b20a6524 165{
e7b01ce0
MM
166 size_t size;
167 __u64 data_head;
b20a6524
MM
168
169 data_head = *pev->data_head;
b20a6524 170 size = pev->size;
b20a6524
MM
171
172 *data = perf_event_read (pev, data_head, size);
173 *psize = size;
174
175 pev->last_head = data_head;
176}
177
178/* Determine the event type.
179 Returns zero on success and fills in TYPE; returns -1 otherwise. */
180
181static int
182perf_event_pt_event_type (int *type)
183{
5c3284c1
MM
184 gdb_file_up file
185 = gdb_fopen_cloexec ("/sys/bus/event_source/devices/intel_pt/type", "r");
186 if (file == nullptr)
b20a6524
MM
187 return -1;
188
5c3284c1 189 int found = fscanf (file.get (), "%d", type);
b20a6524
MM
190 if (found == 1)
191 return 0;
192 return -1;
193}
194
0568462b
MM
195/* Try to determine the start address of the Linux kernel. */
196
197static uint64_t
198linux_determine_kernel_start (void)
d68e53f4 199{
0568462b
MM
200 static uint64_t kernel_start;
201 static int cached;
d68e53f4 202
0568462b
MM
203 if (cached != 0)
204 return kernel_start;
d68e53f4 205
0568462b
MM
206 cached = 1;
207
d419f42d 208 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
0568462b
MM
209 if (file == NULL)
210 return kernel_start;
211
d419f42d 212 while (!feof (file.get ()))
0568462b
MM
213 {
214 char buffer[1024], symbol[8], *line;
215 uint64_t addr;
216 int match;
217
d419f42d 218 line = fgets (buffer, sizeof (buffer), file.get ());
0568462b
MM
219 if (line == NULL)
220 break;
d68e53f4 221
0568462b
MM
222 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
223 if (match != 2)
224 continue;
d68e53f4 225
0568462b
MM
226 if (strcmp (symbol, "_text") == 0)
227 {
228 kernel_start = addr;
229 break;
230 }
231 }
232
0568462b 233 return kernel_start;
d68e53f4
MM
234}
235
7c97f91e
MM
236/* Check whether an address is in the kernel. */
237
238static inline int
0568462b 239perf_event_is_kernel_addr (uint64_t addr)
7c97f91e 240{
0568462b 241 uint64_t kernel_start;
7c97f91e 242
0568462b
MM
243 kernel_start = linux_determine_kernel_start ();
244 if (kernel_start != 0ull)
245 return (addr >= kernel_start);
7c97f91e 246
0568462b
MM
247 /* If we don't know the kernel's start address, let's check the most
248 significant bit. This will work at least for 64-bit kernels. */
249 return ((addr & (1ull << 63)) != 0);
7c97f91e
MM
250}
251
252/* Check whether a perf event record should be skipped. */
253
254static inline int
0568462b 255perf_event_skip_bts_record (const struct perf_event_bts *bts)
7c97f91e
MM
256{
257 /* The hardware may report branches from kernel into user space. Branches
258 from user into kernel space will be suppressed. We filter the former to
259 provide a consistent branch trace excluding kernel. */
0568462b 260 return perf_event_is_kernel_addr (bts->from);
7c97f91e
MM
261}
262
263/* Perform a few consistency checks on a perf event sample record. This is
264 meant to catch cases when we get out of sync with the perf event stream. */
265
266static inline int
267perf_event_sample_ok (const struct perf_event_sample *sample)
268{
269 if (sample->header.type != PERF_RECORD_SAMPLE)
270 return 0;
271
272 if (sample->header.size != sizeof (*sample))
273 return 0;
274
275 return 1;
276}
277
278/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
279 and to addresses (plus a header).
280
281 Start points into that buffer at the next sample position.
282 We read the collected samples backwards from start.
283
284 While reading the samples, we convert the information into a list of blocks.
285 For two adjacent samples s1 and s2, we form a block b such that b.begin =
286 s1.to and b.end = s2.from.
287
288 In case the buffer overflows during sampling, one sample may have its lower
289 part at the end and its upper part at the beginning of the buffer. */
290
291static VEC (btrace_block_s) *
292perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
e7b01ce0 293 const uint8_t *end, const uint8_t *start, size_t size)
7c97f91e
MM
294{
295 VEC (btrace_block_s) *btrace = NULL;
296 struct perf_event_sample sample;
e7b01ce0 297 size_t read = 0;
7c97f91e
MM
298 struct btrace_block block = { 0, 0 };
299 struct regcache *regcache;
300
301 gdb_assert (begin <= start);
302 gdb_assert (start <= end);
303
304 /* The first block ends at the current pc. */
361c8ade 305 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
7c97f91e
MM
306 block.end = regcache_read_pc (regcache);
307
308 /* The buffer may contain a partial record as its last entry (i.e. when the
309 buffer size is not a multiple of the sample size). */
310 read = sizeof (sample) - 1;
311
312 for (; read < size; read += sizeof (sample))
313 {
314 const struct perf_event_sample *psample;
315
316 /* Find the next perf_event sample in a backwards traversal. */
317 start -= sizeof (sample);
318
319 /* If we're still inside the buffer, we're done. */
320 if (begin <= start)
321 psample = (const struct perf_event_sample *) start;
322 else
323 {
324 int missing;
325
326 /* We're to the left of the ring buffer, we will wrap around and
327 reappear at the very right of the ring buffer. */
328
329 missing = (begin - start);
330 start = (end - missing);
331
332 /* If the entire sample is missing, we're done. */
333 if (missing == sizeof (sample))
334 psample = (const struct perf_event_sample *) start;
335 else
336 {
337 uint8_t *stack;
338
339 /* The sample wrapped around. The lower part is at the end and
340 the upper part is at the beginning of the buffer. */
341 stack = (uint8_t *) &sample;
342
343 /* Copy the two parts so we have a contiguous sample. */
344 memcpy (stack, start, missing);
345 memcpy (stack + missing, begin, sizeof (sample) - missing);
346
347 psample = &sample;
348 }
349 }
350
351 if (!perf_event_sample_ok (psample))
352 {
353 warning (_("Branch trace may be incomplete."));
354 break;
355 }
356
0568462b 357 if (perf_event_skip_bts_record (&psample->bts))
7c97f91e
MM
358 continue;
359
360 /* We found a valid sample, so we can complete the current block. */
361 block.begin = psample->bts.to;
362
363 VEC_safe_push (btrace_block_s, btrace, &block);
364
365 /* Start the next block. */
366 block.end = psample->bts.from;
367 }
368
969c39fb
MM
369 /* Push the last block (i.e. the first one of inferior execution), as well.
370 We don't know where it ends, but we know where it starts. If we're
371 reading delta trace, we can fill in the start address later on.
372 Otherwise we will prune it. */
373 block.begin = 0;
374 VEC_safe_push (btrace_block_s, btrace, &block);
375
7c97f91e
MM
376 return btrace;
377}
378
043c3577 379/* Check whether the kernel supports BTS. */
a950d57c
MM
380
381static int
043c3577 382kernel_supports_bts (void)
a950d57c
MM
383{
384 struct perf_event_attr attr;
385 pid_t child, pid;
386 int status, file;
387
388 errno = 0;
389 child = fork ();
390 switch (child)
391 {
392 case -1:
76fb6829 393 warning (_("test bts: cannot fork: %s."), safe_strerror (errno));
a950d57c
MM
394 return 0;
395
396 case 0:
397 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
398 if (status != 0)
399 {
043c3577 400 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
76fb6829 401 safe_strerror (errno));
a950d57c
MM
402 _exit (1);
403 }
404
405 status = raise (SIGTRAP);
406 if (status != 0)
407 {
043c3577 408 warning (_("test bts: cannot raise SIGTRAP: %s."),
76fb6829 409 safe_strerror (errno));
a950d57c
MM
410 _exit (1);
411 }
412
413 _exit (1);
414
415 default:
416 pid = waitpid (child, &status, 0);
417 if (pid != child)
418 {
043c3577 419 warning (_("test bts: bad pid %ld, error: %s."),
76fb6829 420 (long) pid, safe_strerror (errno));
a950d57c
MM
421 return 0;
422 }
423
424 if (!WIFSTOPPED (status))
425 {
043c3577 426 warning (_("test bts: expected stop. status: %d."),
a950d57c
MM
427 status);
428 return 0;
429 }
430
431 memset (&attr, 0, sizeof (attr));
432
433 attr.type = PERF_TYPE_HARDWARE;
434 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
435 attr.sample_period = 1;
436 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
437 attr.exclude_kernel = 1;
438 attr.exclude_hv = 1;
439 attr.exclude_idle = 1;
440
441 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
442 if (file >= 0)
443 close (file);
444
445 kill (child, SIGKILL);
446 ptrace (PTRACE_KILL, child, NULL, NULL);
447
448 pid = waitpid (child, &status, 0);
449 if (pid != child)
450 {
043c3577 451 warning (_("test bts: bad pid %ld, error: %s."),
76fb6829 452 (long) pid, safe_strerror (errno));
a950d57c 453 if (!WIFSIGNALED (status))
043c3577 454 warning (_("test bts: expected killed. status: %d."),
a950d57c
MM
455 status);
456 }
457
458 return (file >= 0);
459 }
460}
461
bc504a31 462/* Check whether the kernel supports Intel Processor Trace. */
b20a6524
MM
463
464static int
465kernel_supports_pt (void)
466{
467 struct perf_event_attr attr;
468 pid_t child, pid;
469 int status, file, type;
470
471 errno = 0;
472 child = fork ();
473 switch (child)
474 {
475 case -1:
76fb6829 476 warning (_("test pt: cannot fork: %s."), safe_strerror (errno));
b20a6524
MM
477 return 0;
478
479 case 0:
480 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
481 if (status != 0)
482 {
483 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
76fb6829 484 safe_strerror (errno));
b20a6524
MM
485 _exit (1);
486 }
487
488 status = raise (SIGTRAP);
489 if (status != 0)
490 {
491 warning (_("test pt: cannot raise SIGTRAP: %s."),
76fb6829 492 safe_strerror (errno));
b20a6524
MM
493 _exit (1);
494 }
495
496 _exit (1);
497
498 default:
499 pid = waitpid (child, &status, 0);
500 if (pid != child)
501 {
502 warning (_("test pt: bad pid %ld, error: %s."),
76fb6829 503 (long) pid, safe_strerror (errno));
b20a6524
MM
504 return 0;
505 }
506
507 if (!WIFSTOPPED (status))
508 {
509 warning (_("test pt: expected stop. status: %d."),
510 status);
511 return 0;
512 }
513
514 status = perf_event_pt_event_type (&type);
515 if (status != 0)
516 file = -1;
517 else
518 {
519 memset (&attr, 0, sizeof (attr));
520
521 attr.size = sizeof (attr);
522 attr.type = type;
523 attr.exclude_kernel = 1;
524 attr.exclude_hv = 1;
525 attr.exclude_idle = 1;
526
527 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
528 if (file >= 0)
529 close (file);
530 }
531
532 kill (child, SIGKILL);
533 ptrace (PTRACE_KILL, child, NULL, NULL);
534
535 pid = waitpid (child, &status, 0);
536 if (pid != child)
537 {
538 warning (_("test pt: bad pid %ld, error: %s."),
76fb6829 539 (long) pid, safe_strerror (errno));
b20a6524
MM
540 if (!WIFSIGNALED (status))
541 warning (_("test pt: expected killed. status: %d."),
542 status);
543 }
544
545 return (file >= 0);
546 }
547}
548
043c3577 549/* Check whether an Intel cpu supports BTS. */
a950d57c
MM
550
551static int
afb778a2 552intel_supports_bts (const struct btrace_cpu *cpu)
a950d57c 553{
afb778a2 554 switch (cpu->family)
5f8e0b8f
MF
555 {
556 case 0x6:
afb778a2 557 switch (cpu->model)
5f8e0b8f
MF
558 {
559 case 0x1a: /* Nehalem */
560 case 0x1f:
561 case 0x1e:
562 case 0x2e:
563 case 0x25: /* Westmere */
564 case 0x2c:
565 case 0x2f:
566 case 0x2a: /* Sandy Bridge */
567 case 0x2d:
568 case 0x3a: /* Ivy Bridge */
569
570 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
571 "from" information afer an EIST transition, T-states, C1E, or
572 Adaptive Thermal Throttling. */
573 return 0;
574 }
575 }
a950d57c
MM
576
577 return 1;
a950d57c
MM
578}
579
043c3577 580/* Check whether the cpu supports BTS. */
a950d57c
MM
581
582static int
043c3577 583cpu_supports_bts (void)
a950d57c 584{
afb778a2 585 struct btrace_cpu cpu;
a950d57c 586
afb778a2
MM
587 cpu = btrace_this_cpu ();
588 switch (cpu.vendor)
589 {
590 default:
591 /* Don't know about others. Let's assume they do. */
592 return 1;
a950d57c 593
afb778a2
MM
594 case CV_INTEL:
595 return intel_supports_bts (&cpu);
596 }
a950d57c
MM
597}
598
043c3577 599/* Check whether the linux target supports BTS. */
7c97f91e 600
043c3577
MM
601static int
602linux_supports_bts (void)
7c97f91e 603{
a950d57c
MM
604 static int cached;
605
606 if (cached == 0)
607 {
043c3577 608 if (!kernel_supports_bts ())
a950d57c 609 cached = -1;
043c3577 610 else if (!cpu_supports_bts ())
a950d57c
MM
611 cached = -1;
612 else
613 cached = 1;
614 }
615
616 return cached > 0;
7c97f91e
MM
617}
618
bc504a31 619/* Check whether the linux target supports Intel Processor Trace. */
b20a6524
MM
620
621static int
622linux_supports_pt (void)
623{
624 static int cached;
625
626 if (cached == 0)
627 {
628 if (!kernel_supports_pt ())
629 cached = -1;
630 else
631 cached = 1;
632 }
633
634 return cached > 0;
635}
636
7c97f91e
MM
637/* See linux-btrace.h. */
638
043c3577
MM
639int
640linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
641{
642 switch (format)
643 {
644 case BTRACE_FORMAT_NONE:
645 return 0;
646
647 case BTRACE_FORMAT_BTS:
648 return linux_supports_bts ();
b20a6524
MM
649
650 case BTRACE_FORMAT_PT:
651 return linux_supports_pt ();
043c3577
MM
652 }
653
654 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
655}
656
f4abbc16 657/* Enable branch tracing in BTS format. */
043c3577 658
f4abbc16 659static struct btrace_target_info *
d33501a5 660linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
7c97f91e 661{
f4abbc16 662 struct btrace_tinfo_bts *bts;
e7b01ce0
MM
663 size_t size, pages;
664 __u64 data_offset;
d0fa7535 665 int pid, pg;
7c97f91e 666
5c3284c1
MM
667 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
668 (XCNEW (btrace_target_info));
7c97f91e
MM
669 tinfo->ptid = ptid;
670
f4abbc16
MM
671 tinfo->conf.format = BTRACE_FORMAT_BTS;
672 bts = &tinfo->variant.bts;
7c97f91e 673
f4abbc16
MM
674 bts->attr.size = sizeof (bts->attr);
675 bts->attr.type = PERF_TYPE_HARDWARE;
676 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
677 bts->attr.sample_period = 1;
7c97f91e 678
f4abbc16
MM
679 /* We sample from and to address. */
680 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
7c97f91e 681
f4abbc16
MM
682 bts->attr.exclude_kernel = 1;
683 bts->attr.exclude_hv = 1;
684 bts->attr.exclude_idle = 1;
7c97f91e
MM
685
686 pid = ptid_get_lwp (ptid);
687 if (pid == 0)
688 pid = ptid_get_pid (ptid);
689
690 errno = 0;
5c3284c1
MM
691 scoped_fd fd (syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0));
692 if (fd.get () < 0)
693 return nullptr;
7c97f91e 694
d33501a5 695 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
696 pages = ((size_t) conf->size / PAGE_SIZE
697 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
d33501a5
MM
698 /* We need at least one page. */
699 if (pages == 0)
700 pages = 1;
701
702 /* The buffer size can be requested in powers of two pages. Adjust PAGES
703 to the next power of two. */
e7b01ce0
MM
704 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
705 if ((pages & ((size_t) 1 << pg)) != 0)
706 pages += ((size_t) 1 << pg);
d33501a5
MM
707
708 /* We try to allocate the requested size.
709 If that fails, try to get as much as we can. */
5c3284c1 710 scoped_mmap data;
d33501a5 711 for (; pages > 0; pages >>= 1)
d0fa7535 712 {
d33501a5 713 size_t length;
e7b01ce0 714 __u64 data_size;
d33501a5 715
e7b01ce0
MM
716 data_size = (__u64) pages * PAGE_SIZE;
717
718 /* Don't ask for more than we can represent in the configuration. */
719 if ((__u64) UINT_MAX < data_size)
720 continue;
721
722 size = (size_t) data_size;
d33501a5
MM
723 length = size + PAGE_SIZE;
724
725 /* Check for overflows. */
e7b01ce0 726 if ((__u64) length != data_size + PAGE_SIZE)
d33501a5
MM
727 continue;
728
d0fa7535 729 /* The number of pages we request needs to be a power of two. */
5c3284c1
MM
730 data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0);
731 if (data.get () != MAP_FAILED)
aadf7753 732 break;
d0fa7535 733 }
7c97f91e 734
010a18a1 735 if (pages == 0)
5c3284c1 736 return nullptr;
aadf7753 737
5c3284c1
MM
738 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
739 data.get ();
010a18a1 740 data_offset = PAGE_SIZE;
010a18a1
MM
741
742#if defined (PERF_ATTR_SIZE_VER5)
743 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
744 {
e7b01ce0
MM
745 __u64 data_size;
746
010a18a1
MM
747 data_offset = header->data_offset;
748 data_size = header->data_size;
e7b01ce0
MM
749
750 size = (unsigned int) data_size;
751
752 /* Check for overflows. */
753 if ((__u64) size != data_size)
5c3284c1 754 return nullptr;
010a18a1
MM
755 }
756#endif /* defined (PERF_ATTR_SIZE_VER5) */
757
e7b01ce0 758 bts->bts.size = size;
f4abbc16 759 bts->bts.data_head = &header->data_head;
5c3284c1 760 bts->bts.mem = (const uint8_t *) data.get () + data_offset;
e7b01ce0 761 bts->bts.last_head = 0ull;
5c3284c1
MM
762 bts->header = header;
763 bts->file = fd.release ();
aadf7753 764
5c3284c1 765 data.release ();
7c97f91e 766
5c3284c1
MM
767 tinfo->conf.bts.size = (unsigned int) size;
768 return tinfo.release ();
b20a6524
MM
769}
770
771#if defined (PERF_ATTR_SIZE_VER5)
772
bc504a31 773/* Enable branch tracing in Intel Processor Trace format. */
b20a6524
MM
774
775static struct btrace_target_info *
776linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
777{
b20a6524 778 struct btrace_tinfo_pt *pt;
5c3284c1 779 size_t pages;
b20a6524
MM
780 int pid, pg, errcode, type;
781
782 if (conf->size == 0)
783 return NULL;
784
785 errcode = perf_event_pt_event_type (&type);
786 if (errcode != 0)
787 return NULL;
788
789 pid = ptid_get_lwp (ptid);
790 if (pid == 0)
791 pid = ptid_get_pid (ptid);
792
5c3284c1
MM
793 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
794 (XCNEW (btrace_target_info));
b20a6524 795 tinfo->ptid = ptid;
b20a6524
MM
796
797 tinfo->conf.format = BTRACE_FORMAT_PT;
798 pt = &tinfo->variant.pt;
799
800 pt->attr.size = sizeof (pt->attr);
801 pt->attr.type = type;
802
803 pt->attr.exclude_kernel = 1;
804 pt->attr.exclude_hv = 1;
805 pt->attr.exclude_idle = 1;
806
807 errno = 0;
5c3284c1
MM
808 scoped_fd fd (syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0));
809 if (fd.get () < 0)
810 return nullptr;
b20a6524
MM
811
812 /* Allocate the configuration page. */
5c3284c1
MM
813 scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
814 fd.get (), 0);
815 if (data.get () == MAP_FAILED)
816 return nullptr;
817
818 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
819 data.get ();
b20a6524
MM
820
821 header->aux_offset = header->data_offset + header->data_size;
822
823 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
824 pages = ((size_t) conf->size / PAGE_SIZE
825 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
b20a6524
MM
826 /* We need at least one page. */
827 if (pages == 0)
828 pages = 1;
829
830 /* The buffer size can be requested in powers of two pages. Adjust PAGES
831 to the next power of two. */
e7b01ce0
MM
832 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
833 if ((pages & ((size_t) 1 << pg)) != 0)
834 pages += ((size_t) 1 << pg);
b20a6524
MM
835
836 /* We try to allocate the requested size.
837 If that fails, try to get as much as we can. */
5c3284c1 838 scoped_mmap aux;
b20a6524
MM
839 for (; pages > 0; pages >>= 1)
840 {
841 size_t length;
e7b01ce0 842 __u64 data_size;
b20a6524 843
e7b01ce0
MM
844 data_size = (__u64) pages * PAGE_SIZE;
845
846 /* Don't ask for more than we can represent in the configuration. */
847 if ((__u64) UINT_MAX < data_size)
848 continue;
849
5c3284c1 850 length = (size_t) data_size;
b20a6524
MM
851
852 /* Check for overflows. */
5c3284c1 853 if ((__u64) length != data_size)
b20a6524
MM
854 continue;
855
e7b01ce0 856 header->aux_size = data_size;
b20a6524 857
5c3284c1
MM
858 aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
859 header->aux_offset);
860 if (aux.get () != MAP_FAILED)
b20a6524
MM
861 break;
862 }
863
864 if (pages == 0)
5c3284c1 865 return nullptr;
b20a6524 866
5c3284c1
MM
867 pt->pt.size = aux.size ();
868 pt->pt.mem = (const uint8_t *) aux.release ();
b20a6524 869 pt->pt.data_head = &header->aux_head;
5c3284c1
MM
870 pt->header = header;
871 pt->file = fd.release ();
b20a6524 872
5c3284c1 873 data.release ();
b20a6524 874
5c3284c1
MM
875 tinfo->conf.pt.size = (unsigned int) pt->pt.size;
876 return tinfo.release ();
7c97f91e
MM
877}
878
b20a6524
MM
879#else /* !defined (PERF_ATTR_SIZE_VER5) */
880
881static struct btrace_target_info *
882linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
883{
884 errno = EOPNOTSUPP;
885 return NULL;
886}
887
888#endif /* !defined (PERF_ATTR_SIZE_VER5) */
889
7c97f91e
MM
890/* See linux-btrace.h. */
891
f4abbc16
MM
892struct btrace_target_info *
893linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
894{
895 struct btrace_target_info *tinfo;
896
897 tinfo = NULL;
898 switch (conf->format)
899 {
900 case BTRACE_FORMAT_NONE:
901 break;
902
903 case BTRACE_FORMAT_BTS:
d33501a5 904 tinfo = linux_enable_bts (ptid, &conf->bts);
f4abbc16 905 break;
b20a6524
MM
906
907 case BTRACE_FORMAT_PT:
908 tinfo = linux_enable_pt (ptid, &conf->pt);
909 break;
f4abbc16
MM
910 }
911
912 return tinfo;
913}
914
915/* Disable BTS tracing. */
916
917static enum btrace_error
918linux_disable_bts (struct btrace_tinfo_bts *tinfo)
7c97f91e 919{
aadf7753 920 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
7c97f91e 921 close (tinfo->file);
7c97f91e 922
969c39fb 923 return BTRACE_ERR_NONE;
7c97f91e
MM
924}
925
bc504a31 926/* Disable Intel Processor Trace tracing. */
b20a6524
MM
927
928static enum btrace_error
929linux_disable_pt (struct btrace_tinfo_pt *tinfo)
930{
931 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
932 munmap((void *) tinfo->header, PAGE_SIZE);
933 close (tinfo->file);
934
935 return BTRACE_ERR_NONE;
936}
937
f4abbc16
MM
938/* See linux-btrace.h. */
939
940enum btrace_error
941linux_disable_btrace (struct btrace_target_info *tinfo)
942{
943 enum btrace_error errcode;
944
945 errcode = BTRACE_ERR_NOT_SUPPORTED;
946 switch (tinfo->conf.format)
947 {
948 case BTRACE_FORMAT_NONE:
949 break;
950
951 case BTRACE_FORMAT_BTS:
952 errcode = linux_disable_bts (&tinfo->variant.bts);
953 break;
b20a6524
MM
954
955 case BTRACE_FORMAT_PT:
956 errcode = linux_disable_pt (&tinfo->variant.pt);
957 break;
f4abbc16
MM
958 }
959
960 if (errcode == BTRACE_ERR_NONE)
961 xfree (tinfo);
962
963 return errcode;
964}
965
734b0e4b
MM
966/* Read branch trace data in BTS format for the thread given by TINFO into
967 BTRACE using the TYPE reading method. */
7c97f91e 968
734b0e4b
MM
969static enum btrace_error
970linux_read_bts (struct btrace_data_bts *btrace,
971 struct btrace_target_info *tinfo,
972 enum btrace_read_type type)
7c97f91e 973{
aadf7753 974 struct perf_event_buffer *pevent;
7c97f91e 975 const uint8_t *begin, *end, *start;
e7b01ce0
MM
976 size_t buffer_size, size;
977 __u64 data_head, data_tail;
aadf7753
MM
978 unsigned int retries = 5;
979
f4abbc16 980 pevent = &tinfo->variant.bts.bts;
7c97f91e 981
969c39fb
MM
982 /* For delta reads, we return at least the partial last block containing
983 the current PC. */
aadf7753 984 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
969c39fb 985 return BTRACE_ERR_NONE;
7c97f91e 986
aadf7753
MM
987 buffer_size = pevent->size;
988 data_tail = pevent->last_head;
7c97f91e
MM
989
990 /* We may need to retry reading the trace. See below. */
991 while (retries--)
992 {
aadf7753 993 data_head = *pevent->data_head;
7c97f91e 994
ed9edfb5 995 /* Delete any leftover trace from the previous iteration. */
734b0e4b 996 VEC_free (btrace_block_s, btrace->blocks);
ed9edfb5 997
969c39fb 998 if (type == BTRACE_READ_DELTA)
7c97f91e 999 {
e7b01ce0
MM
1000 __u64 data_size;
1001
969c39fb
MM
1002 /* Determine the number of bytes to read and check for buffer
1003 overflows. */
1004
1005 /* Check for data head overflows. We might be able to recover from
1006 those but they are very unlikely and it's not really worth the
1007 effort, I think. */
1008 if (data_head < data_tail)
1009 return BTRACE_ERR_OVERFLOW;
1010
1011 /* If the buffer is smaller than the trace delta, we overflowed. */
e7b01ce0
MM
1012 data_size = data_head - data_tail;
1013 if (buffer_size < data_size)
969c39fb 1014 return BTRACE_ERR_OVERFLOW;
e7b01ce0
MM
1015
1016 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1017 size = (size_t) data_size;
969c39fb
MM
1018 }
1019 else
1020 {
1021 /* Read the entire buffer. */
1022 size = buffer_size;
7c97f91e 1023
969c39fb
MM
1024 /* Adjust the size if the buffer has not overflowed, yet. */
1025 if (data_head < size)
e7b01ce0 1026 size = (size_t) data_head;
7c97f91e
MM
1027 }
1028
969c39fb 1029 /* Data_head keeps growing; the buffer itself is circular. */
aadf7753 1030 begin = pevent->mem;
969c39fb
MM
1031 start = begin + data_head % buffer_size;
1032
1033 if (data_head <= buffer_size)
1034 end = start;
1035 else
aadf7753 1036 end = begin + pevent->size;
969c39fb 1037
734b0e4b 1038 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
969c39fb 1039
7c97f91e
MM
1040 /* The stopping thread notifies its ptracer before it is scheduled out.
1041 On multi-core systems, the debugger might therefore run while the
1042 kernel might be writing the last branch trace records.
1043
1044 Let's check whether the data head moved while we read the trace. */
aadf7753 1045 if (data_head == *pevent->data_head)
7c97f91e
MM
1046 break;
1047 }
1048
aadf7753 1049 pevent->last_head = data_head;
7c97f91e 1050
969c39fb
MM
1051 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1052 if we're not doing a delta read. There is no way of filling in its zeroed
1053 BEGIN element. */
734b0e4b
MM
1054 if (!VEC_empty (btrace_block_s, btrace->blocks)
1055 && type != BTRACE_READ_DELTA)
1056 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1057
1058 return BTRACE_ERR_NONE;
7c97f91e
MM
1059}
1060
bc504a31 1061/* Fill in the Intel Processor Trace configuration information. */
b20a6524
MM
1062
1063static void
1064linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
1065{
1066 conf->cpu = btrace_this_cpu ();
1067}
1068
bc504a31 1069/* Read branch trace data in Intel Processor Trace format for the thread
b20a6524
MM
1070 given by TINFO into BTRACE using the TYPE reading method. */
1071
1072static enum btrace_error
1073linux_read_pt (struct btrace_data_pt *btrace,
1074 struct btrace_target_info *tinfo,
1075 enum btrace_read_type type)
1076{
1077 struct perf_event_buffer *pt;
1078
1079 pt = &tinfo->variant.pt.pt;
1080
1081 linux_fill_btrace_pt_config (&btrace->config);
1082
1083 switch (type)
1084 {
1085 case BTRACE_READ_DELTA:
1086 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1087 around to stay inside the aux buffer. */
1088 return BTRACE_ERR_NOT_SUPPORTED;
1089
1090 case BTRACE_READ_NEW:
1091 if (!perf_event_new_data (pt))
1092 return BTRACE_ERR_NONE;
1093
1094 /* Fall through. */
1095 case BTRACE_READ_ALL:
1096 perf_event_read_all (pt, &btrace->data, &btrace->size);
1097 return BTRACE_ERR_NONE;
1098 }
1099
1100 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
1101}
1102
734b0e4b
MM
1103/* See linux-btrace.h. */
1104
1105enum btrace_error
1106linux_read_btrace (struct btrace_data *btrace,
1107 struct btrace_target_info *tinfo,
1108 enum btrace_read_type type)
1109{
f4abbc16
MM
1110 switch (tinfo->conf.format)
1111 {
1112 case BTRACE_FORMAT_NONE:
1113 return BTRACE_ERR_NOT_SUPPORTED;
1114
1115 case BTRACE_FORMAT_BTS:
1116 /* We read btrace in BTS format. */
1117 btrace->format = BTRACE_FORMAT_BTS;
1118 btrace->variant.bts.blocks = NULL;
1119
1120 return linux_read_bts (&btrace->variant.bts, tinfo, type);
b20a6524
MM
1121
1122 case BTRACE_FORMAT_PT:
bc504a31 1123 /* We read btrace in Intel Processor Trace format. */
b20a6524
MM
1124 btrace->format = BTRACE_FORMAT_PT;
1125 btrace->variant.pt.data = NULL;
1126 btrace->variant.pt.size = 0;
1127
1128 return linux_read_pt (&btrace->variant.pt, tinfo, type);
f4abbc16
MM
1129 }
1130
1131 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1132}
1133
1134/* See linux-btrace.h. */
734b0e4b 1135
f4abbc16
MM
1136const struct btrace_config *
1137linux_btrace_conf (const struct btrace_target_info *tinfo)
1138{
1139 return &tinfo->conf;
734b0e4b
MM
1140}
1141
7c97f91e
MM
1142#else /* !HAVE_LINUX_PERF_EVENT_H */
1143
1144/* See linux-btrace.h. */
1145
1146int
043c3577 1147linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
7c97f91e
MM
1148{
1149 return 0;
1150}
1151
1152/* See linux-btrace.h. */
1153
1154struct btrace_target_info *
f4abbc16 1155linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
7c97f91e
MM
1156{
1157 return NULL;
1158}
1159
1160/* See linux-btrace.h. */
1161
969c39fb 1162enum btrace_error
7c97f91e
MM
1163linux_disable_btrace (struct btrace_target_info *tinfo)
1164{
969c39fb 1165 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
1166}
1167
1168/* See linux-btrace.h. */
1169
969c39fb 1170enum btrace_error
734b0e4b 1171linux_read_btrace (struct btrace_data *btrace,
969c39fb 1172 struct btrace_target_info *tinfo,
7c97f91e
MM
1173 enum btrace_read_type type)
1174{
969c39fb 1175 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
1176}
1177
f4abbc16
MM
1178/* See linux-btrace.h. */
1179
1180const struct btrace_config *
1181linux_btrace_conf (const struct btrace_target_info *tinfo)
1182{
1183 return NULL;
1184}
1185
7c97f91e 1186#endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.465785 seconds and 4 git commands to generate.