PowerPC64 stubs don't match calculated size
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
CommitLineData
7c97f91e
MM
1/* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
7c97f91e
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
53f81362 22#include "common-defs.h"
7c97f91e 23#include "linux-btrace.h"
361c8ade 24#include "common-regcache.h"
be8b1ea6 25#include "gdb_wait.h"
df7e5265 26#include "x86-cpuid.h"
0568462b
MM
27#include "filestuff.h"
28
29#include <inttypes.h>
7c97f91e 30
5b4e221c
MF
31#ifdef HAVE_SYS_SYSCALL_H
32#include <sys/syscall.h>
33#endif
34
35#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
7c97f91e 36#include <unistd.h>
7c97f91e
MM
37#include <sys/mman.h>
38#include <sys/user.h>
5826e159 39#include "nat/gdb_ptrace.h"
a950d57c 40#include <sys/types.h>
a950d57c 41#include <signal.h>
7c97f91e
MM
42
43/* A branch trace record in perf_event. */
44struct perf_event_bts
45{
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51};
52
53/* A perf_event branch trace sample. */
54struct perf_event_sample
55{
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61};
62
afb778a2
MM
63/* Identify the cpu we're running on. */
64static struct btrace_cpu
65btrace_this_cpu (void)
66{
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 cpu.model = (cpuid >> 4) & 0xf;
88
89 if (cpu.family == 0x6)
90 cpu.model += (cpuid >> 12) & 0xf0;
91 }
92 }
93 }
94
95 return cpu;
96}
97
aadf7753 98/* Return non-zero if there is new data in PEVENT; zero otherwise. */
7c97f91e 99
aadf7753
MM
100static int
101perf_event_new_data (const struct perf_event_buffer *pev)
7c97f91e 102{
aadf7753 103 return *pev->data_head != pev->last_head;
7c97f91e
MM
104}
105
b20a6524
MM
106/* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
109
110static gdb_byte *
e7b01ce0
MM
111perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
112 size_t size)
b20a6524
MM
113{
114 const gdb_byte *begin, *end, *start, *stop;
115 gdb_byte *buffer;
e7b01ce0
MM
116 size_t buffer_size;
117 __u64 data_tail;
b20a6524
MM
118
119 if (size == 0)
120 return NULL;
121
db58b373
MM
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size = pev->size;
124 gdb_assert (size <= buffer_size);
125
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
129 don't underflow.
130
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head < size)
135 data_head += buffer_size;
136
b20a6524
MM
137 gdb_assert (size <= data_head);
138 data_tail = data_head - size;
139
b20a6524
MM
140 begin = pev->mem;
141 start = begin + data_tail % buffer_size;
142 stop = begin + data_head % buffer_size;
143
224c3ddb 144 buffer = (gdb_byte *) xmalloc (size);
b20a6524
MM
145
146 if (start < stop)
147 memcpy (buffer, start, stop - start);
148 else
149 {
150 end = begin + buffer_size;
151
152 memcpy (buffer, start, end - start);
153 memcpy (buffer + (end - start), begin, stop - begin);
154 }
155
156 return buffer;
157}
158
159/* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
161
162static void
163perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
e7b01ce0 164 size_t *psize)
b20a6524 165{
e7b01ce0
MM
166 size_t size;
167 __u64 data_head;
b20a6524
MM
168
169 data_head = *pev->data_head;
b20a6524 170 size = pev->size;
b20a6524
MM
171
172 *data = perf_event_read (pev, data_head, size);
173 *psize = size;
174
175 pev->last_head = data_head;
176}
177
178/* Determine the event type.
179 Returns zero on success and fills in TYPE; returns -1 otherwise. */
180
181static int
182perf_event_pt_event_type (int *type)
183{
184 FILE *file;
185 int found;
186
187 file = fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
188 if (file == NULL)
189 return -1;
190
191 found = fscanf (file, "%d", type);
192
193 fclose (file);
194
195 if (found == 1)
196 return 0;
197 return -1;
198}
199
0568462b
MM
200/* Try to determine the start address of the Linux kernel. */
201
202static uint64_t
203linux_determine_kernel_start (void)
d68e53f4 204{
0568462b
MM
205 static uint64_t kernel_start;
206 static int cached;
d68e53f4 207
0568462b
MM
208 if (cached != 0)
209 return kernel_start;
d68e53f4 210
0568462b
MM
211 cached = 1;
212
d419f42d 213 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
0568462b
MM
214 if (file == NULL)
215 return kernel_start;
216
d419f42d 217 while (!feof (file.get ()))
0568462b
MM
218 {
219 char buffer[1024], symbol[8], *line;
220 uint64_t addr;
221 int match;
222
d419f42d 223 line = fgets (buffer, sizeof (buffer), file.get ());
0568462b
MM
224 if (line == NULL)
225 break;
d68e53f4 226
0568462b
MM
227 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
228 if (match != 2)
229 continue;
d68e53f4 230
0568462b
MM
231 if (strcmp (symbol, "_text") == 0)
232 {
233 kernel_start = addr;
234 break;
235 }
236 }
237
0568462b 238 return kernel_start;
d68e53f4
MM
239}
240
7c97f91e
MM
241/* Check whether an address is in the kernel. */
242
243static inline int
0568462b 244perf_event_is_kernel_addr (uint64_t addr)
7c97f91e 245{
0568462b 246 uint64_t kernel_start;
7c97f91e 247
0568462b
MM
248 kernel_start = linux_determine_kernel_start ();
249 if (kernel_start != 0ull)
250 return (addr >= kernel_start);
7c97f91e 251
0568462b
MM
252 /* If we don't know the kernel's start address, let's check the most
253 significant bit. This will work at least for 64-bit kernels. */
254 return ((addr & (1ull << 63)) != 0);
7c97f91e
MM
255}
256
257/* Check whether a perf event record should be skipped. */
258
259static inline int
0568462b 260perf_event_skip_bts_record (const struct perf_event_bts *bts)
7c97f91e
MM
261{
262 /* The hardware may report branches from kernel into user space. Branches
263 from user into kernel space will be suppressed. We filter the former to
264 provide a consistent branch trace excluding kernel. */
0568462b 265 return perf_event_is_kernel_addr (bts->from);
7c97f91e
MM
266}
267
268/* Perform a few consistency checks on a perf event sample record. This is
269 meant to catch cases when we get out of sync with the perf event stream. */
270
271static inline int
272perf_event_sample_ok (const struct perf_event_sample *sample)
273{
274 if (sample->header.type != PERF_RECORD_SAMPLE)
275 return 0;
276
277 if (sample->header.size != sizeof (*sample))
278 return 0;
279
280 return 1;
281}
282
283/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
284 and to addresses (plus a header).
285
286 Start points into that buffer at the next sample position.
287 We read the collected samples backwards from start.
288
289 While reading the samples, we convert the information into a list of blocks.
290 For two adjacent samples s1 and s2, we form a block b such that b.begin =
291 s1.to and b.end = s2.from.
292
293 In case the buffer overflows during sampling, one sample may have its lower
294 part at the end and its upper part at the beginning of the buffer. */
295
296static VEC (btrace_block_s) *
297perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
e7b01ce0 298 const uint8_t *end, const uint8_t *start, size_t size)
7c97f91e
MM
299{
300 VEC (btrace_block_s) *btrace = NULL;
301 struct perf_event_sample sample;
e7b01ce0 302 size_t read = 0;
7c97f91e
MM
303 struct btrace_block block = { 0, 0 };
304 struct regcache *regcache;
305
306 gdb_assert (begin <= start);
307 gdb_assert (start <= end);
308
309 /* The first block ends at the current pc. */
361c8ade 310 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
7c97f91e
MM
311 block.end = regcache_read_pc (regcache);
312
313 /* The buffer may contain a partial record as its last entry (i.e. when the
314 buffer size is not a multiple of the sample size). */
315 read = sizeof (sample) - 1;
316
317 for (; read < size; read += sizeof (sample))
318 {
319 const struct perf_event_sample *psample;
320
321 /* Find the next perf_event sample in a backwards traversal. */
322 start -= sizeof (sample);
323
324 /* If we're still inside the buffer, we're done. */
325 if (begin <= start)
326 psample = (const struct perf_event_sample *) start;
327 else
328 {
329 int missing;
330
331 /* We're to the left of the ring buffer, we will wrap around and
332 reappear at the very right of the ring buffer. */
333
334 missing = (begin - start);
335 start = (end - missing);
336
337 /* If the entire sample is missing, we're done. */
338 if (missing == sizeof (sample))
339 psample = (const struct perf_event_sample *) start;
340 else
341 {
342 uint8_t *stack;
343
344 /* The sample wrapped around. The lower part is at the end and
345 the upper part is at the beginning of the buffer. */
346 stack = (uint8_t *) &sample;
347
348 /* Copy the two parts so we have a contiguous sample. */
349 memcpy (stack, start, missing);
350 memcpy (stack + missing, begin, sizeof (sample) - missing);
351
352 psample = &sample;
353 }
354 }
355
356 if (!perf_event_sample_ok (psample))
357 {
358 warning (_("Branch trace may be incomplete."));
359 break;
360 }
361
0568462b 362 if (perf_event_skip_bts_record (&psample->bts))
7c97f91e
MM
363 continue;
364
365 /* We found a valid sample, so we can complete the current block. */
366 block.begin = psample->bts.to;
367
368 VEC_safe_push (btrace_block_s, btrace, &block);
369
370 /* Start the next block. */
371 block.end = psample->bts.from;
372 }
373
969c39fb
MM
374 /* Push the last block (i.e. the first one of inferior execution), as well.
375 We don't know where it ends, but we know where it starts. If we're
376 reading delta trace, we can fill in the start address later on.
377 Otherwise we will prune it. */
378 block.begin = 0;
379 VEC_safe_push (btrace_block_s, btrace, &block);
380
7c97f91e
MM
381 return btrace;
382}
383
043c3577 384/* Check whether the kernel supports BTS. */
a950d57c
MM
385
386static int
043c3577 387kernel_supports_bts (void)
a950d57c
MM
388{
389 struct perf_event_attr attr;
390 pid_t child, pid;
391 int status, file;
392
393 errno = 0;
394 child = fork ();
395 switch (child)
396 {
397 case -1:
76fb6829 398 warning (_("test bts: cannot fork: %s."), safe_strerror (errno));
a950d57c
MM
399 return 0;
400
401 case 0:
402 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
403 if (status != 0)
404 {
043c3577 405 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
76fb6829 406 safe_strerror (errno));
a950d57c
MM
407 _exit (1);
408 }
409
410 status = raise (SIGTRAP);
411 if (status != 0)
412 {
043c3577 413 warning (_("test bts: cannot raise SIGTRAP: %s."),
76fb6829 414 safe_strerror (errno));
a950d57c
MM
415 _exit (1);
416 }
417
418 _exit (1);
419
420 default:
421 pid = waitpid (child, &status, 0);
422 if (pid != child)
423 {
043c3577 424 warning (_("test bts: bad pid %ld, error: %s."),
76fb6829 425 (long) pid, safe_strerror (errno));
a950d57c
MM
426 return 0;
427 }
428
429 if (!WIFSTOPPED (status))
430 {
043c3577 431 warning (_("test bts: expected stop. status: %d."),
a950d57c
MM
432 status);
433 return 0;
434 }
435
436 memset (&attr, 0, sizeof (attr));
437
438 attr.type = PERF_TYPE_HARDWARE;
439 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
440 attr.sample_period = 1;
441 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
442 attr.exclude_kernel = 1;
443 attr.exclude_hv = 1;
444 attr.exclude_idle = 1;
445
446 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
447 if (file >= 0)
448 close (file);
449
450 kill (child, SIGKILL);
451 ptrace (PTRACE_KILL, child, NULL, NULL);
452
453 pid = waitpid (child, &status, 0);
454 if (pid != child)
455 {
043c3577 456 warning (_("test bts: bad pid %ld, error: %s."),
76fb6829 457 (long) pid, safe_strerror (errno));
a950d57c 458 if (!WIFSIGNALED (status))
043c3577 459 warning (_("test bts: expected killed. status: %d."),
a950d57c
MM
460 status);
461 }
462
463 return (file >= 0);
464 }
465}
466
bc504a31 467/* Check whether the kernel supports Intel Processor Trace. */
b20a6524
MM
468
469static int
470kernel_supports_pt (void)
471{
472 struct perf_event_attr attr;
473 pid_t child, pid;
474 int status, file, type;
475
476 errno = 0;
477 child = fork ();
478 switch (child)
479 {
480 case -1:
76fb6829 481 warning (_("test pt: cannot fork: %s."), safe_strerror (errno));
b20a6524
MM
482 return 0;
483
484 case 0:
485 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
486 if (status != 0)
487 {
488 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
76fb6829 489 safe_strerror (errno));
b20a6524
MM
490 _exit (1);
491 }
492
493 status = raise (SIGTRAP);
494 if (status != 0)
495 {
496 warning (_("test pt: cannot raise SIGTRAP: %s."),
76fb6829 497 safe_strerror (errno));
b20a6524
MM
498 _exit (1);
499 }
500
501 _exit (1);
502
503 default:
504 pid = waitpid (child, &status, 0);
505 if (pid != child)
506 {
507 warning (_("test pt: bad pid %ld, error: %s."),
76fb6829 508 (long) pid, safe_strerror (errno));
b20a6524
MM
509 return 0;
510 }
511
512 if (!WIFSTOPPED (status))
513 {
514 warning (_("test pt: expected stop. status: %d."),
515 status);
516 return 0;
517 }
518
519 status = perf_event_pt_event_type (&type);
520 if (status != 0)
521 file = -1;
522 else
523 {
524 memset (&attr, 0, sizeof (attr));
525
526 attr.size = sizeof (attr);
527 attr.type = type;
528 attr.exclude_kernel = 1;
529 attr.exclude_hv = 1;
530 attr.exclude_idle = 1;
531
532 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
533 if (file >= 0)
534 close (file);
535 }
536
537 kill (child, SIGKILL);
538 ptrace (PTRACE_KILL, child, NULL, NULL);
539
540 pid = waitpid (child, &status, 0);
541 if (pid != child)
542 {
543 warning (_("test pt: bad pid %ld, error: %s."),
76fb6829 544 (long) pid, safe_strerror (errno));
b20a6524
MM
545 if (!WIFSIGNALED (status))
546 warning (_("test pt: expected killed. status: %d."),
547 status);
548 }
549
550 return (file >= 0);
551 }
552}
553
043c3577 554/* Check whether an Intel cpu supports BTS. */
a950d57c
MM
555
556static int
afb778a2 557intel_supports_bts (const struct btrace_cpu *cpu)
a950d57c 558{
afb778a2 559 switch (cpu->family)
5f8e0b8f
MF
560 {
561 case 0x6:
afb778a2 562 switch (cpu->model)
5f8e0b8f
MF
563 {
564 case 0x1a: /* Nehalem */
565 case 0x1f:
566 case 0x1e:
567 case 0x2e:
568 case 0x25: /* Westmere */
569 case 0x2c:
570 case 0x2f:
571 case 0x2a: /* Sandy Bridge */
572 case 0x2d:
573 case 0x3a: /* Ivy Bridge */
574
575 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
576 "from" information afer an EIST transition, T-states, C1E, or
577 Adaptive Thermal Throttling. */
578 return 0;
579 }
580 }
a950d57c
MM
581
582 return 1;
a950d57c
MM
583}
584
043c3577 585/* Check whether the cpu supports BTS. */
a950d57c
MM
586
587static int
043c3577 588cpu_supports_bts (void)
a950d57c 589{
afb778a2 590 struct btrace_cpu cpu;
a950d57c 591
afb778a2
MM
592 cpu = btrace_this_cpu ();
593 switch (cpu.vendor)
594 {
595 default:
596 /* Don't know about others. Let's assume they do. */
597 return 1;
a950d57c 598
afb778a2
MM
599 case CV_INTEL:
600 return intel_supports_bts (&cpu);
601 }
a950d57c
MM
602}
603
043c3577 604/* Check whether the linux target supports BTS. */
7c97f91e 605
043c3577
MM
606static int
607linux_supports_bts (void)
7c97f91e 608{
a950d57c
MM
609 static int cached;
610
611 if (cached == 0)
612 {
043c3577 613 if (!kernel_supports_bts ())
a950d57c 614 cached = -1;
043c3577 615 else if (!cpu_supports_bts ())
a950d57c
MM
616 cached = -1;
617 else
618 cached = 1;
619 }
620
621 return cached > 0;
7c97f91e
MM
622}
623
bc504a31 624/* Check whether the linux target supports Intel Processor Trace. */
b20a6524
MM
625
626static int
627linux_supports_pt (void)
628{
629 static int cached;
630
631 if (cached == 0)
632 {
633 if (!kernel_supports_pt ())
634 cached = -1;
635 else
636 cached = 1;
637 }
638
639 return cached > 0;
640}
641
7c97f91e
MM
642/* See linux-btrace.h. */
643
043c3577
MM
644int
645linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
646{
647 switch (format)
648 {
649 case BTRACE_FORMAT_NONE:
650 return 0;
651
652 case BTRACE_FORMAT_BTS:
653 return linux_supports_bts ();
b20a6524
MM
654
655 case BTRACE_FORMAT_PT:
656 return linux_supports_pt ();
043c3577
MM
657 }
658
659 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
660}
661
f4abbc16 662/* Enable branch tracing in BTS format. */
043c3577 663
f4abbc16 664static struct btrace_target_info *
d33501a5 665linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
7c97f91e 666{
aadf7753 667 struct perf_event_mmap_page *header;
7c97f91e 668 struct btrace_target_info *tinfo;
f4abbc16 669 struct btrace_tinfo_bts *bts;
e7b01ce0
MM
670 size_t size, pages;
671 __u64 data_offset;
d0fa7535 672 int pid, pg;
7c97f91e 673
8d749320 674 tinfo = XCNEW (struct btrace_target_info);
7c97f91e
MM
675 tinfo->ptid = ptid;
676
f4abbc16
MM
677 tinfo->conf.format = BTRACE_FORMAT_BTS;
678 bts = &tinfo->variant.bts;
7c97f91e 679
f4abbc16
MM
680 bts->attr.size = sizeof (bts->attr);
681 bts->attr.type = PERF_TYPE_HARDWARE;
682 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
683 bts->attr.sample_period = 1;
7c97f91e 684
f4abbc16
MM
685 /* We sample from and to address. */
686 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
7c97f91e 687
f4abbc16
MM
688 bts->attr.exclude_kernel = 1;
689 bts->attr.exclude_hv = 1;
690 bts->attr.exclude_idle = 1;
7c97f91e
MM
691
692 pid = ptid_get_lwp (ptid);
693 if (pid == 0)
694 pid = ptid_get_pid (ptid);
695
696 errno = 0;
f4abbc16
MM
697 bts->file = syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0);
698 if (bts->file < 0)
b20a6524 699 goto err_out;
7c97f91e 700
d33501a5 701 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
702 pages = ((size_t) conf->size / PAGE_SIZE
703 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
d33501a5
MM
704 /* We need at least one page. */
705 if (pages == 0)
706 pages = 1;
707
708 /* The buffer size can be requested in powers of two pages. Adjust PAGES
709 to the next power of two. */
e7b01ce0
MM
710 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
711 if ((pages & ((size_t) 1 << pg)) != 0)
712 pages += ((size_t) 1 << pg);
d33501a5
MM
713
714 /* We try to allocate the requested size.
715 If that fails, try to get as much as we can. */
716 for (; pages > 0; pages >>= 1)
d0fa7535 717 {
d33501a5 718 size_t length;
e7b01ce0 719 __u64 data_size;
d33501a5 720
e7b01ce0
MM
721 data_size = (__u64) pages * PAGE_SIZE;
722
723 /* Don't ask for more than we can represent in the configuration. */
724 if ((__u64) UINT_MAX < data_size)
725 continue;
726
727 size = (size_t) data_size;
d33501a5
MM
728 length = size + PAGE_SIZE;
729
730 /* Check for overflows. */
e7b01ce0 731 if ((__u64) length != data_size + PAGE_SIZE)
d33501a5
MM
732 continue;
733
d0fa7535 734 /* The number of pages we request needs to be a power of two. */
224c3ddb
SM
735 header = ((struct perf_event_mmap_page *)
736 mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0));
aadf7753
MM
737 if (header != MAP_FAILED)
738 break;
d0fa7535 739 }
7c97f91e 740
010a18a1 741 if (pages == 0)
aadf7753
MM
742 goto err_file;
743
010a18a1 744 data_offset = PAGE_SIZE;
010a18a1
MM
745
746#if defined (PERF_ATTR_SIZE_VER5)
747 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
748 {
e7b01ce0
MM
749 __u64 data_size;
750
010a18a1
MM
751 data_offset = header->data_offset;
752 data_size = header->data_size;
e7b01ce0
MM
753
754 size = (unsigned int) data_size;
755
756 /* Check for overflows. */
757 if ((__u64) size != data_size)
758 {
759 munmap ((void *) header, size + PAGE_SIZE);
760 goto err_file;
761 }
010a18a1
MM
762 }
763#endif /* defined (PERF_ATTR_SIZE_VER5) */
764
f4abbc16 765 bts->header = header;
010a18a1 766 bts->bts.mem = ((const uint8_t *) header) + data_offset;
e7b01ce0 767 bts->bts.size = size;
f4abbc16 768 bts->bts.data_head = &header->data_head;
e7b01ce0 769 bts->bts.last_head = 0ull;
aadf7753 770
e7b01ce0 771 tinfo->conf.bts.size = (unsigned int) size;
aadf7753
MM
772 return tinfo;
773
774 err_file:
d0fa7535 775 /* We were not able to allocate any buffer. */
f4abbc16 776 close (bts->file);
7c97f91e 777
b20a6524
MM
778 err_out:
779 xfree (tinfo);
780 return NULL;
781}
782
783#if defined (PERF_ATTR_SIZE_VER5)
784
bc504a31 785/* Enable branch tracing in Intel Processor Trace format. */
b20a6524
MM
786
787static struct btrace_target_info *
788linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
789{
790 struct perf_event_mmap_page *header;
791 struct btrace_target_info *tinfo;
792 struct btrace_tinfo_pt *pt;
e7b01ce0 793 size_t pages, size;
b20a6524
MM
794 int pid, pg, errcode, type;
795
796 if (conf->size == 0)
797 return NULL;
798
799 errcode = perf_event_pt_event_type (&type);
800 if (errcode != 0)
801 return NULL;
802
803 pid = ptid_get_lwp (ptid);
804 if (pid == 0)
805 pid = ptid_get_pid (ptid);
806
8d749320 807 tinfo = XCNEW (struct btrace_target_info);
b20a6524 808 tinfo->ptid = ptid;
b20a6524
MM
809
810 tinfo->conf.format = BTRACE_FORMAT_PT;
811 pt = &tinfo->variant.pt;
812
813 pt->attr.size = sizeof (pt->attr);
814 pt->attr.type = type;
815
816 pt->attr.exclude_kernel = 1;
817 pt->attr.exclude_hv = 1;
818 pt->attr.exclude_idle = 1;
819
820 errno = 0;
821 pt->file = syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0);
822 if (pt->file < 0)
823 goto err;
824
825 /* Allocate the configuration page. */
a55515ee
SM
826 header = ((struct perf_event_mmap_page *)
827 mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
828 pt->file, 0));
b20a6524
MM
829 if (header == MAP_FAILED)
830 goto err_file;
831
832 header->aux_offset = header->data_offset + header->data_size;
833
834 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
835 pages = ((size_t) conf->size / PAGE_SIZE
836 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
b20a6524
MM
837 /* We need at least one page. */
838 if (pages == 0)
839 pages = 1;
840
841 /* The buffer size can be requested in powers of two pages. Adjust PAGES
842 to the next power of two. */
e7b01ce0
MM
843 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
844 if ((pages & ((size_t) 1 << pg)) != 0)
845 pages += ((size_t) 1 << pg);
b20a6524
MM
846
847 /* We try to allocate the requested size.
848 If that fails, try to get as much as we can. */
849 for (; pages > 0; pages >>= 1)
850 {
851 size_t length;
e7b01ce0 852 __u64 data_size;
b20a6524 853
e7b01ce0
MM
854 data_size = (__u64) pages * PAGE_SIZE;
855
856 /* Don't ask for more than we can represent in the configuration. */
857 if ((__u64) UINT_MAX < data_size)
858 continue;
859
860 size = (size_t) data_size;
b20a6524
MM
861
862 /* Check for overflows. */
e7b01ce0 863 if ((__u64) size != data_size)
b20a6524
MM
864 continue;
865
e7b01ce0
MM
866 header->aux_size = data_size;
867 length = size;
b20a6524 868
a55515ee
SM
869 pt->pt.mem = ((const uint8_t *)
870 mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
871 header->aux_offset));
b20a6524
MM
872 if (pt->pt.mem != MAP_FAILED)
873 break;
874 }
875
876 if (pages == 0)
877 goto err_conf;
878
879 pt->header = header;
880 pt->pt.size = size;
881 pt->pt.data_head = &header->aux_head;
882
e7b01ce0 883 tinfo->conf.pt.size = (unsigned int) size;
b20a6524
MM
884 return tinfo;
885
886 err_conf:
887 munmap((void *) header, PAGE_SIZE);
888
889 err_file:
890 close (pt->file);
891
7c97f91e
MM
892 err:
893 xfree (tinfo);
894 return NULL;
895}
896
b20a6524
MM
897#else /* !defined (PERF_ATTR_SIZE_VER5) */
898
899static struct btrace_target_info *
900linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
901{
902 errno = EOPNOTSUPP;
903 return NULL;
904}
905
906#endif /* !defined (PERF_ATTR_SIZE_VER5) */
907
7c97f91e
MM
908/* See linux-btrace.h. */
909
f4abbc16
MM
910struct btrace_target_info *
911linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
912{
913 struct btrace_target_info *tinfo;
914
915 tinfo = NULL;
916 switch (conf->format)
917 {
918 case BTRACE_FORMAT_NONE:
919 break;
920
921 case BTRACE_FORMAT_BTS:
d33501a5 922 tinfo = linux_enable_bts (ptid, &conf->bts);
f4abbc16 923 break;
b20a6524
MM
924
925 case BTRACE_FORMAT_PT:
926 tinfo = linux_enable_pt (ptid, &conf->pt);
927 break;
f4abbc16
MM
928 }
929
930 return tinfo;
931}
932
933/* Disable BTS tracing. */
934
935static enum btrace_error
936linux_disable_bts (struct btrace_tinfo_bts *tinfo)
7c97f91e 937{
aadf7753 938 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
7c97f91e 939 close (tinfo->file);
7c97f91e 940
969c39fb 941 return BTRACE_ERR_NONE;
7c97f91e
MM
942}
943
bc504a31 944/* Disable Intel Processor Trace tracing. */
b20a6524
MM
945
946static enum btrace_error
947linux_disable_pt (struct btrace_tinfo_pt *tinfo)
948{
949 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
950 munmap((void *) tinfo->header, PAGE_SIZE);
951 close (tinfo->file);
952
953 return BTRACE_ERR_NONE;
954}
955
f4abbc16
MM
956/* See linux-btrace.h. */
957
958enum btrace_error
959linux_disable_btrace (struct btrace_target_info *tinfo)
960{
961 enum btrace_error errcode;
962
963 errcode = BTRACE_ERR_NOT_SUPPORTED;
964 switch (tinfo->conf.format)
965 {
966 case BTRACE_FORMAT_NONE:
967 break;
968
969 case BTRACE_FORMAT_BTS:
970 errcode = linux_disable_bts (&tinfo->variant.bts);
971 break;
b20a6524
MM
972
973 case BTRACE_FORMAT_PT:
974 errcode = linux_disable_pt (&tinfo->variant.pt);
975 break;
f4abbc16
MM
976 }
977
978 if (errcode == BTRACE_ERR_NONE)
979 xfree (tinfo);
980
981 return errcode;
982}
983
734b0e4b
MM
984/* Read branch trace data in BTS format for the thread given by TINFO into
985 BTRACE using the TYPE reading method. */
7c97f91e 986
734b0e4b
MM
987static enum btrace_error
988linux_read_bts (struct btrace_data_bts *btrace,
989 struct btrace_target_info *tinfo,
990 enum btrace_read_type type)
7c97f91e 991{
aadf7753 992 struct perf_event_buffer *pevent;
7c97f91e 993 const uint8_t *begin, *end, *start;
e7b01ce0
MM
994 size_t buffer_size, size;
995 __u64 data_head, data_tail;
aadf7753
MM
996 unsigned int retries = 5;
997
f4abbc16 998 pevent = &tinfo->variant.bts.bts;
7c97f91e 999
969c39fb
MM
1000 /* For delta reads, we return at least the partial last block containing
1001 the current PC. */
aadf7753 1002 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
969c39fb 1003 return BTRACE_ERR_NONE;
7c97f91e 1004
aadf7753
MM
1005 buffer_size = pevent->size;
1006 data_tail = pevent->last_head;
7c97f91e
MM
1007
1008 /* We may need to retry reading the trace. See below. */
1009 while (retries--)
1010 {
aadf7753 1011 data_head = *pevent->data_head;
7c97f91e 1012
ed9edfb5 1013 /* Delete any leftover trace from the previous iteration. */
734b0e4b 1014 VEC_free (btrace_block_s, btrace->blocks);
ed9edfb5 1015
969c39fb 1016 if (type == BTRACE_READ_DELTA)
7c97f91e 1017 {
e7b01ce0
MM
1018 __u64 data_size;
1019
969c39fb
MM
1020 /* Determine the number of bytes to read and check for buffer
1021 overflows. */
1022
1023 /* Check for data head overflows. We might be able to recover from
1024 those but they are very unlikely and it's not really worth the
1025 effort, I think. */
1026 if (data_head < data_tail)
1027 return BTRACE_ERR_OVERFLOW;
1028
1029 /* If the buffer is smaller than the trace delta, we overflowed. */
e7b01ce0
MM
1030 data_size = data_head - data_tail;
1031 if (buffer_size < data_size)
969c39fb 1032 return BTRACE_ERR_OVERFLOW;
e7b01ce0
MM
1033
1034 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1035 size = (size_t) data_size;
969c39fb
MM
1036 }
1037 else
1038 {
1039 /* Read the entire buffer. */
1040 size = buffer_size;
7c97f91e 1041
969c39fb
MM
1042 /* Adjust the size if the buffer has not overflowed, yet. */
1043 if (data_head < size)
e7b01ce0 1044 size = (size_t) data_head;
7c97f91e
MM
1045 }
1046
969c39fb 1047 /* Data_head keeps growing; the buffer itself is circular. */
aadf7753 1048 begin = pevent->mem;
969c39fb
MM
1049 start = begin + data_head % buffer_size;
1050
1051 if (data_head <= buffer_size)
1052 end = start;
1053 else
aadf7753 1054 end = begin + pevent->size;
969c39fb 1055
734b0e4b 1056 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
969c39fb 1057
7c97f91e
MM
1058 /* The stopping thread notifies its ptracer before it is scheduled out.
1059 On multi-core systems, the debugger might therefore run while the
1060 kernel might be writing the last branch trace records.
1061
1062 Let's check whether the data head moved while we read the trace. */
aadf7753 1063 if (data_head == *pevent->data_head)
7c97f91e
MM
1064 break;
1065 }
1066
aadf7753 1067 pevent->last_head = data_head;
7c97f91e 1068
969c39fb
MM
1069 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1070 if we're not doing a delta read. There is no way of filling in its zeroed
1071 BEGIN element. */
734b0e4b
MM
1072 if (!VEC_empty (btrace_block_s, btrace->blocks)
1073 && type != BTRACE_READ_DELTA)
1074 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1075
1076 return BTRACE_ERR_NONE;
7c97f91e
MM
1077}
1078
bc504a31 1079/* Fill in the Intel Processor Trace configuration information. */
b20a6524
MM
1080
1081static void
1082linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
1083{
1084 conf->cpu = btrace_this_cpu ();
1085}
1086
bc504a31 1087/* Read branch trace data in Intel Processor Trace format for the thread
b20a6524
MM
1088 given by TINFO into BTRACE using the TYPE reading method. */
1089
1090static enum btrace_error
1091linux_read_pt (struct btrace_data_pt *btrace,
1092 struct btrace_target_info *tinfo,
1093 enum btrace_read_type type)
1094{
1095 struct perf_event_buffer *pt;
1096
1097 pt = &tinfo->variant.pt.pt;
1098
1099 linux_fill_btrace_pt_config (&btrace->config);
1100
1101 switch (type)
1102 {
1103 case BTRACE_READ_DELTA:
1104 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1105 around to stay inside the aux buffer. */
1106 return BTRACE_ERR_NOT_SUPPORTED;
1107
1108 case BTRACE_READ_NEW:
1109 if (!perf_event_new_data (pt))
1110 return BTRACE_ERR_NONE;
1111
1112 /* Fall through. */
1113 case BTRACE_READ_ALL:
1114 perf_event_read_all (pt, &btrace->data, &btrace->size);
1115 return BTRACE_ERR_NONE;
1116 }
1117
1118 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
1119}
1120
734b0e4b
MM
1121/* See linux-btrace.h. */
1122
1123enum btrace_error
1124linux_read_btrace (struct btrace_data *btrace,
1125 struct btrace_target_info *tinfo,
1126 enum btrace_read_type type)
1127{
f4abbc16
MM
1128 switch (tinfo->conf.format)
1129 {
1130 case BTRACE_FORMAT_NONE:
1131 return BTRACE_ERR_NOT_SUPPORTED;
1132
1133 case BTRACE_FORMAT_BTS:
1134 /* We read btrace in BTS format. */
1135 btrace->format = BTRACE_FORMAT_BTS;
1136 btrace->variant.bts.blocks = NULL;
1137
1138 return linux_read_bts (&btrace->variant.bts, tinfo, type);
b20a6524
MM
1139
1140 case BTRACE_FORMAT_PT:
bc504a31 1141 /* We read btrace in Intel Processor Trace format. */
b20a6524
MM
1142 btrace->format = BTRACE_FORMAT_PT;
1143 btrace->variant.pt.data = NULL;
1144 btrace->variant.pt.size = 0;
1145
1146 return linux_read_pt (&btrace->variant.pt, tinfo, type);
f4abbc16
MM
1147 }
1148
1149 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1150}
1151
1152/* See linux-btrace.h. */
734b0e4b 1153
f4abbc16
MM
1154const struct btrace_config *
1155linux_btrace_conf (const struct btrace_target_info *tinfo)
1156{
1157 return &tinfo->conf;
734b0e4b
MM
1158}
1159
7c97f91e
MM
1160#else /* !HAVE_LINUX_PERF_EVENT_H */
1161
1162/* See linux-btrace.h. */
1163
1164int
043c3577 1165linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
7c97f91e
MM
1166{
1167 return 0;
1168}
1169
1170/* See linux-btrace.h. */
1171
1172struct btrace_target_info *
f4abbc16 1173linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
7c97f91e
MM
1174{
1175 return NULL;
1176}
1177
1178/* See linux-btrace.h. */
1179
969c39fb 1180enum btrace_error
7c97f91e
MM
1181linux_disable_btrace (struct btrace_target_info *tinfo)
1182{
969c39fb 1183 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
1184}
1185
1186/* See linux-btrace.h. */
1187
969c39fb 1188enum btrace_error
734b0e4b 1189linux_read_btrace (struct btrace_data *btrace,
969c39fb 1190 struct btrace_target_info *tinfo,
7c97f91e
MM
1191 enum btrace_read_type type)
1192{
969c39fb 1193 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
1194}
1195
f4abbc16
MM
1196/* See linux-btrace.h. */
1197
1198const struct btrace_config *
1199linux_btrace_conf (const struct btrace_target_info *tinfo)
1200{
1201 return NULL;
1202}
1203
7c97f91e 1204#endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.496092 seconds and 4 git commands to generate.