btrace: prepare for throwing exceptions when enabling btrace
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
25 #include "gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
28 #include "common/scoped_fd.h"
29 #include "common/scoped_mmap.h"
30
31 #include <inttypes.h>
32
33 #include <sys/syscall.h>
34
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
36 #include <unistd.h>
37 #include <sys/mman.h>
38 #include <sys/user.h>
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
41 #include <signal.h>
42
43 /* A branch trace record in perf_event. */
44 struct perf_event_bts
45 {
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51 };
52
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
55 {
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61 };
62
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
66 {
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 cpu.model = (cpuid >> 4) & 0xf;
88
89 if (cpu.family == 0x6)
90 cpu.model += (cpuid >> 12) & 0xf0;
91 }
92 }
93 }
94
95 return cpu;
96 }
97
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
99
100 static int
101 perf_event_new_data (const struct perf_event_buffer *pev)
102 {
103 return *pev->data_head != pev->last_head;
104 }
105
106 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
109
110 static gdb_byte *
111 perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
112 size_t size)
113 {
114 const gdb_byte *begin, *end, *start, *stop;
115 gdb_byte *buffer;
116 size_t buffer_size;
117 __u64 data_tail;
118
119 if (size == 0)
120 return NULL;
121
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size = pev->size;
124 gdb_assert (size <= buffer_size);
125
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
129 don't underflow.
130
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head < size)
135 data_head += buffer_size;
136
137 gdb_assert (size <= data_head);
138 data_tail = data_head - size;
139
140 begin = pev->mem;
141 start = begin + data_tail % buffer_size;
142 stop = begin + data_head % buffer_size;
143
144 buffer = (gdb_byte *) xmalloc (size);
145
146 if (start < stop)
147 memcpy (buffer, start, stop - start);
148 else
149 {
150 end = begin + buffer_size;
151
152 memcpy (buffer, start, end - start);
153 memcpy (buffer + (end - start), begin, stop - begin);
154 }
155
156 return buffer;
157 }
158
159 /* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
161
162 static void
163 perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
164 size_t *psize)
165 {
166 size_t size;
167 __u64 data_head;
168
169 data_head = *pev->data_head;
170 size = pev->size;
171
172 *data = perf_event_read (pev, data_head, size);
173 *psize = size;
174
175 pev->last_head = data_head;
176 }
177
178 /* Determine the event type.
179 Returns zero on success and fills in TYPE; returns -1 otherwise. */
180
181 static int
182 perf_event_pt_event_type (int *type)
183 {
184 gdb_file_up file
185 = gdb_fopen_cloexec ("/sys/bus/event_source/devices/intel_pt/type", "r");
186 if (file == nullptr)
187 return -1;
188
189 int found = fscanf (file.get (), "%d", type);
190 if (found == 1)
191 return 0;
192 return -1;
193 }
194
195 /* Try to determine the start address of the Linux kernel. */
196
197 static uint64_t
198 linux_determine_kernel_start (void)
199 {
200 static uint64_t kernel_start;
201 static int cached;
202
203 if (cached != 0)
204 return kernel_start;
205
206 cached = 1;
207
208 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
209 if (file == NULL)
210 return kernel_start;
211
212 while (!feof (file.get ()))
213 {
214 char buffer[1024], symbol[8], *line;
215 uint64_t addr;
216 int match;
217
218 line = fgets (buffer, sizeof (buffer), file.get ());
219 if (line == NULL)
220 break;
221
222 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
223 if (match != 2)
224 continue;
225
226 if (strcmp (symbol, "_text") == 0)
227 {
228 kernel_start = addr;
229 break;
230 }
231 }
232
233 return kernel_start;
234 }
235
236 /* Check whether an address is in the kernel. */
237
238 static inline int
239 perf_event_is_kernel_addr (uint64_t addr)
240 {
241 uint64_t kernel_start;
242
243 kernel_start = linux_determine_kernel_start ();
244 if (kernel_start != 0ull)
245 return (addr >= kernel_start);
246
247 /* If we don't know the kernel's start address, let's check the most
248 significant bit. This will work at least for 64-bit kernels. */
249 return ((addr & (1ull << 63)) != 0);
250 }
251
252 /* Check whether a perf event record should be skipped. */
253
254 static inline int
255 perf_event_skip_bts_record (const struct perf_event_bts *bts)
256 {
257 /* The hardware may report branches from kernel into user space. Branches
258 from user into kernel space will be suppressed. We filter the former to
259 provide a consistent branch trace excluding kernel. */
260 return perf_event_is_kernel_addr (bts->from);
261 }
262
263 /* Perform a few consistency checks on a perf event sample record. This is
264 meant to catch cases when we get out of sync with the perf event stream. */
265
266 static inline int
267 perf_event_sample_ok (const struct perf_event_sample *sample)
268 {
269 if (sample->header.type != PERF_RECORD_SAMPLE)
270 return 0;
271
272 if (sample->header.size != sizeof (*sample))
273 return 0;
274
275 return 1;
276 }
277
278 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
279 and to addresses (plus a header).
280
281 Start points into that buffer at the next sample position.
282 We read the collected samples backwards from start.
283
284 While reading the samples, we convert the information into a list of blocks.
285 For two adjacent samples s1 and s2, we form a block b such that b.begin =
286 s1.to and b.end = s2.from.
287
288 In case the buffer overflows during sampling, one sample may have its lower
289 part at the end and its upper part at the beginning of the buffer. */
290
291 static VEC (btrace_block_s) *
292 perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
293 const uint8_t *end, const uint8_t *start, size_t size)
294 {
295 VEC (btrace_block_s) *btrace = NULL;
296 struct perf_event_sample sample;
297 size_t read = 0;
298 struct btrace_block block = { 0, 0 };
299 struct regcache *regcache;
300
301 gdb_assert (begin <= start);
302 gdb_assert (start <= end);
303
304 /* The first block ends at the current pc. */
305 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
306 block.end = regcache_read_pc (regcache);
307
308 /* The buffer may contain a partial record as its last entry (i.e. when the
309 buffer size is not a multiple of the sample size). */
310 read = sizeof (sample) - 1;
311
312 for (; read < size; read += sizeof (sample))
313 {
314 const struct perf_event_sample *psample;
315
316 /* Find the next perf_event sample in a backwards traversal. */
317 start -= sizeof (sample);
318
319 /* If we're still inside the buffer, we're done. */
320 if (begin <= start)
321 psample = (const struct perf_event_sample *) start;
322 else
323 {
324 int missing;
325
326 /* We're to the left of the ring buffer, we will wrap around and
327 reappear at the very right of the ring buffer. */
328
329 missing = (begin - start);
330 start = (end - missing);
331
332 /* If the entire sample is missing, we're done. */
333 if (missing == sizeof (sample))
334 psample = (const struct perf_event_sample *) start;
335 else
336 {
337 uint8_t *stack;
338
339 /* The sample wrapped around. The lower part is at the end and
340 the upper part is at the beginning of the buffer. */
341 stack = (uint8_t *) &sample;
342
343 /* Copy the two parts so we have a contiguous sample. */
344 memcpy (stack, start, missing);
345 memcpy (stack + missing, begin, sizeof (sample) - missing);
346
347 psample = &sample;
348 }
349 }
350
351 if (!perf_event_sample_ok (psample))
352 {
353 warning (_("Branch trace may be incomplete."));
354 break;
355 }
356
357 if (perf_event_skip_bts_record (&psample->bts))
358 continue;
359
360 /* We found a valid sample, so we can complete the current block. */
361 block.begin = psample->bts.to;
362
363 VEC_safe_push (btrace_block_s, btrace, &block);
364
365 /* Start the next block. */
366 block.end = psample->bts.from;
367 }
368
369 /* Push the last block (i.e. the first one of inferior execution), as well.
370 We don't know where it ends, but we know where it starts. If we're
371 reading delta trace, we can fill in the start address later on.
372 Otherwise we will prune it. */
373 block.begin = 0;
374 VEC_safe_push (btrace_block_s, btrace, &block);
375
376 return btrace;
377 }
378
379 /* Check whether the kernel supports BTS. */
380
381 static int
382 kernel_supports_bts (void)
383 {
384 struct perf_event_attr attr;
385 pid_t child, pid;
386 int status, file;
387
388 errno = 0;
389 child = fork ();
390 switch (child)
391 {
392 case -1:
393 warning (_("test bts: cannot fork: %s."), safe_strerror (errno));
394 return 0;
395
396 case 0:
397 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
398 if (status != 0)
399 {
400 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
401 safe_strerror (errno));
402 _exit (1);
403 }
404
405 status = raise (SIGTRAP);
406 if (status != 0)
407 {
408 warning (_("test bts: cannot raise SIGTRAP: %s."),
409 safe_strerror (errno));
410 _exit (1);
411 }
412
413 _exit (1);
414
415 default:
416 pid = waitpid (child, &status, 0);
417 if (pid != child)
418 {
419 warning (_("test bts: bad pid %ld, error: %s."),
420 (long) pid, safe_strerror (errno));
421 return 0;
422 }
423
424 if (!WIFSTOPPED (status))
425 {
426 warning (_("test bts: expected stop. status: %d."),
427 status);
428 return 0;
429 }
430
431 memset (&attr, 0, sizeof (attr));
432
433 attr.type = PERF_TYPE_HARDWARE;
434 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
435 attr.sample_period = 1;
436 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
437 attr.exclude_kernel = 1;
438 attr.exclude_hv = 1;
439 attr.exclude_idle = 1;
440
441 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
442 if (file >= 0)
443 close (file);
444
445 kill (child, SIGKILL);
446 ptrace (PTRACE_KILL, child, NULL, NULL);
447
448 pid = waitpid (child, &status, 0);
449 if (pid != child)
450 {
451 warning (_("test bts: bad pid %ld, error: %s."),
452 (long) pid, safe_strerror (errno));
453 if (!WIFSIGNALED (status))
454 warning (_("test bts: expected killed. status: %d."),
455 status);
456 }
457
458 return (file >= 0);
459 }
460 }
461
462 /* Check whether the kernel supports Intel Processor Trace. */
463
464 static int
465 kernel_supports_pt (void)
466 {
467 struct perf_event_attr attr;
468 pid_t child, pid;
469 int status, file, type;
470
471 errno = 0;
472 child = fork ();
473 switch (child)
474 {
475 case -1:
476 warning (_("test pt: cannot fork: %s."), safe_strerror (errno));
477 return 0;
478
479 case 0:
480 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
481 if (status != 0)
482 {
483 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
484 safe_strerror (errno));
485 _exit (1);
486 }
487
488 status = raise (SIGTRAP);
489 if (status != 0)
490 {
491 warning (_("test pt: cannot raise SIGTRAP: %s."),
492 safe_strerror (errno));
493 _exit (1);
494 }
495
496 _exit (1);
497
498 default:
499 pid = waitpid (child, &status, 0);
500 if (pid != child)
501 {
502 warning (_("test pt: bad pid %ld, error: %s."),
503 (long) pid, safe_strerror (errno));
504 return 0;
505 }
506
507 if (!WIFSTOPPED (status))
508 {
509 warning (_("test pt: expected stop. status: %d."),
510 status);
511 return 0;
512 }
513
514 status = perf_event_pt_event_type (&type);
515 if (status != 0)
516 file = -1;
517 else
518 {
519 memset (&attr, 0, sizeof (attr));
520
521 attr.size = sizeof (attr);
522 attr.type = type;
523 attr.exclude_kernel = 1;
524 attr.exclude_hv = 1;
525 attr.exclude_idle = 1;
526
527 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
528 if (file >= 0)
529 close (file);
530 }
531
532 kill (child, SIGKILL);
533 ptrace (PTRACE_KILL, child, NULL, NULL);
534
535 pid = waitpid (child, &status, 0);
536 if (pid != child)
537 {
538 warning (_("test pt: bad pid %ld, error: %s."),
539 (long) pid, safe_strerror (errno));
540 if (!WIFSIGNALED (status))
541 warning (_("test pt: expected killed. status: %d."),
542 status);
543 }
544
545 return (file >= 0);
546 }
547 }
548
549 /* Check whether an Intel cpu supports BTS. */
550
551 static int
552 intel_supports_bts (const struct btrace_cpu *cpu)
553 {
554 switch (cpu->family)
555 {
556 case 0x6:
557 switch (cpu->model)
558 {
559 case 0x1a: /* Nehalem */
560 case 0x1f:
561 case 0x1e:
562 case 0x2e:
563 case 0x25: /* Westmere */
564 case 0x2c:
565 case 0x2f:
566 case 0x2a: /* Sandy Bridge */
567 case 0x2d:
568 case 0x3a: /* Ivy Bridge */
569
570 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
571 "from" information afer an EIST transition, T-states, C1E, or
572 Adaptive Thermal Throttling. */
573 return 0;
574 }
575 }
576
577 return 1;
578 }
579
580 /* Check whether the cpu supports BTS. */
581
582 static int
583 cpu_supports_bts (void)
584 {
585 struct btrace_cpu cpu;
586
587 cpu = btrace_this_cpu ();
588 switch (cpu.vendor)
589 {
590 default:
591 /* Don't know about others. Let's assume they do. */
592 return 1;
593
594 case CV_INTEL:
595 return intel_supports_bts (&cpu);
596 }
597 }
598
599 /* Check whether the linux target supports BTS. */
600
601 static int
602 linux_supports_bts (void)
603 {
604 static int cached;
605
606 if (cached == 0)
607 {
608 if (!kernel_supports_bts ())
609 cached = -1;
610 else if (!cpu_supports_bts ())
611 cached = -1;
612 else
613 cached = 1;
614 }
615
616 return cached > 0;
617 }
618
619 /* Check whether the linux target supports Intel Processor Trace. */
620
621 static int
622 linux_supports_pt (void)
623 {
624 static int cached;
625
626 if (cached == 0)
627 {
628 if (!kernel_supports_pt ())
629 cached = -1;
630 else
631 cached = 1;
632 }
633
634 return cached > 0;
635 }
636
637 /* See linux-btrace.h. */
638
639 int
640 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
641 {
642 switch (format)
643 {
644 case BTRACE_FORMAT_NONE:
645 return 0;
646
647 case BTRACE_FORMAT_BTS:
648 return linux_supports_bts ();
649
650 case BTRACE_FORMAT_PT:
651 return linux_supports_pt ();
652 }
653
654 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
655 }
656
657 /* Enable branch tracing in BTS format. */
658
659 static struct btrace_target_info *
660 linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
661 {
662 struct btrace_tinfo_bts *bts;
663 size_t size, pages;
664 __u64 data_offset;
665 int pid, pg;
666
667 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
668 (XCNEW (btrace_target_info));
669 tinfo->ptid = ptid;
670
671 tinfo->conf.format = BTRACE_FORMAT_BTS;
672 bts = &tinfo->variant.bts;
673
674 bts->attr.size = sizeof (bts->attr);
675 bts->attr.type = PERF_TYPE_HARDWARE;
676 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
677 bts->attr.sample_period = 1;
678
679 /* We sample from and to address. */
680 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
681
682 bts->attr.exclude_kernel = 1;
683 bts->attr.exclude_hv = 1;
684 bts->attr.exclude_idle = 1;
685
686 pid = ptid_get_lwp (ptid);
687 if (pid == 0)
688 pid = ptid_get_pid (ptid);
689
690 errno = 0;
691 scoped_fd fd (syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0));
692 if (fd.get () < 0)
693 return nullptr;
694
695 /* Convert the requested size in bytes to pages (rounding up). */
696 pages = ((size_t) conf->size / PAGE_SIZE
697 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
698 /* We need at least one page. */
699 if (pages == 0)
700 pages = 1;
701
702 /* The buffer size can be requested in powers of two pages. Adjust PAGES
703 to the next power of two. */
704 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
705 if ((pages & ((size_t) 1 << pg)) != 0)
706 pages += ((size_t) 1 << pg);
707
708 /* We try to allocate the requested size.
709 If that fails, try to get as much as we can. */
710 scoped_mmap data;
711 for (; pages > 0; pages >>= 1)
712 {
713 size_t length;
714 __u64 data_size;
715
716 data_size = (__u64) pages * PAGE_SIZE;
717
718 /* Don't ask for more than we can represent in the configuration. */
719 if ((__u64) UINT_MAX < data_size)
720 continue;
721
722 size = (size_t) data_size;
723 length = size + PAGE_SIZE;
724
725 /* Check for overflows. */
726 if ((__u64) length != data_size + PAGE_SIZE)
727 continue;
728
729 /* The number of pages we request needs to be a power of two. */
730 data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0);
731 if (data.get () != MAP_FAILED)
732 break;
733 }
734
735 if (pages == 0)
736 return nullptr;
737
738 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
739 data.get ();
740 data_offset = PAGE_SIZE;
741
742 #if defined (PERF_ATTR_SIZE_VER5)
743 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
744 {
745 __u64 data_size;
746
747 data_offset = header->data_offset;
748 data_size = header->data_size;
749
750 size = (unsigned int) data_size;
751
752 /* Check for overflows. */
753 if ((__u64) size != data_size)
754 return nullptr;
755 }
756 #endif /* defined (PERF_ATTR_SIZE_VER5) */
757
758 bts->bts.size = size;
759 bts->bts.data_head = &header->data_head;
760 bts->bts.mem = (const uint8_t *) data.get () + data_offset;
761 bts->bts.last_head = 0ull;
762 bts->header = header;
763 bts->file = fd.release ();
764
765 data.release ();
766
767 tinfo->conf.bts.size = (unsigned int) size;
768 return tinfo.release ();
769 }
770
771 #if defined (PERF_ATTR_SIZE_VER5)
772
773 /* Enable branch tracing in Intel Processor Trace format. */
774
775 static struct btrace_target_info *
776 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
777 {
778 struct btrace_tinfo_pt *pt;
779 size_t pages;
780 int pid, pg, errcode, type;
781
782 if (conf->size == 0)
783 return NULL;
784
785 errcode = perf_event_pt_event_type (&type);
786 if (errcode != 0)
787 return NULL;
788
789 pid = ptid_get_lwp (ptid);
790 if (pid == 0)
791 pid = ptid_get_pid (ptid);
792
793 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
794 (XCNEW (btrace_target_info));
795 tinfo->ptid = ptid;
796
797 tinfo->conf.format = BTRACE_FORMAT_PT;
798 pt = &tinfo->variant.pt;
799
800 pt->attr.size = sizeof (pt->attr);
801 pt->attr.type = type;
802
803 pt->attr.exclude_kernel = 1;
804 pt->attr.exclude_hv = 1;
805 pt->attr.exclude_idle = 1;
806
807 errno = 0;
808 scoped_fd fd (syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0));
809 if (fd.get () < 0)
810 return nullptr;
811
812 /* Allocate the configuration page. */
813 scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
814 fd.get (), 0);
815 if (data.get () == MAP_FAILED)
816 return nullptr;
817
818 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
819 data.get ();
820
821 header->aux_offset = header->data_offset + header->data_size;
822
823 /* Convert the requested size in bytes to pages (rounding up). */
824 pages = ((size_t) conf->size / PAGE_SIZE
825 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
826 /* We need at least one page. */
827 if (pages == 0)
828 pages = 1;
829
830 /* The buffer size can be requested in powers of two pages. Adjust PAGES
831 to the next power of two. */
832 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
833 if ((pages & ((size_t) 1 << pg)) != 0)
834 pages += ((size_t) 1 << pg);
835
836 /* We try to allocate the requested size.
837 If that fails, try to get as much as we can. */
838 scoped_mmap aux;
839 for (; pages > 0; pages >>= 1)
840 {
841 size_t length;
842 __u64 data_size;
843
844 data_size = (__u64) pages * PAGE_SIZE;
845
846 /* Don't ask for more than we can represent in the configuration. */
847 if ((__u64) UINT_MAX < data_size)
848 continue;
849
850 length = (size_t) data_size;
851
852 /* Check for overflows. */
853 if ((__u64) length != data_size)
854 continue;
855
856 header->aux_size = data_size;
857
858 aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
859 header->aux_offset);
860 if (aux.get () != MAP_FAILED)
861 break;
862 }
863
864 if (pages == 0)
865 return nullptr;
866
867 pt->pt.size = aux.size ();
868 pt->pt.mem = (const uint8_t *) aux.release ();
869 pt->pt.data_head = &header->aux_head;
870 pt->header = header;
871 pt->file = fd.release ();
872
873 data.release ();
874
875 tinfo->conf.pt.size = (unsigned int) pt->pt.size;
876 return tinfo.release ();
877 }
878
879 #else /* !defined (PERF_ATTR_SIZE_VER5) */
880
881 static struct btrace_target_info *
882 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
883 {
884 errno = EOPNOTSUPP;
885 return NULL;
886 }
887
888 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
889
890 /* See linux-btrace.h. */
891
892 struct btrace_target_info *
893 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
894 {
895 struct btrace_target_info *tinfo;
896
897 tinfo = NULL;
898 switch (conf->format)
899 {
900 case BTRACE_FORMAT_NONE:
901 break;
902
903 case BTRACE_FORMAT_BTS:
904 tinfo = linux_enable_bts (ptid, &conf->bts);
905 break;
906
907 case BTRACE_FORMAT_PT:
908 tinfo = linux_enable_pt (ptid, &conf->pt);
909 break;
910 }
911
912 return tinfo;
913 }
914
915 /* Disable BTS tracing. */
916
917 static enum btrace_error
918 linux_disable_bts (struct btrace_tinfo_bts *tinfo)
919 {
920 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
921 close (tinfo->file);
922
923 return BTRACE_ERR_NONE;
924 }
925
926 /* Disable Intel Processor Trace tracing. */
927
928 static enum btrace_error
929 linux_disable_pt (struct btrace_tinfo_pt *tinfo)
930 {
931 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
932 munmap((void *) tinfo->header, PAGE_SIZE);
933 close (tinfo->file);
934
935 return BTRACE_ERR_NONE;
936 }
937
938 /* See linux-btrace.h. */
939
940 enum btrace_error
941 linux_disable_btrace (struct btrace_target_info *tinfo)
942 {
943 enum btrace_error errcode;
944
945 errcode = BTRACE_ERR_NOT_SUPPORTED;
946 switch (tinfo->conf.format)
947 {
948 case BTRACE_FORMAT_NONE:
949 break;
950
951 case BTRACE_FORMAT_BTS:
952 errcode = linux_disable_bts (&tinfo->variant.bts);
953 break;
954
955 case BTRACE_FORMAT_PT:
956 errcode = linux_disable_pt (&tinfo->variant.pt);
957 break;
958 }
959
960 if (errcode == BTRACE_ERR_NONE)
961 xfree (tinfo);
962
963 return errcode;
964 }
965
966 /* Read branch trace data in BTS format for the thread given by TINFO into
967 BTRACE using the TYPE reading method. */
968
969 static enum btrace_error
970 linux_read_bts (struct btrace_data_bts *btrace,
971 struct btrace_target_info *tinfo,
972 enum btrace_read_type type)
973 {
974 struct perf_event_buffer *pevent;
975 const uint8_t *begin, *end, *start;
976 size_t buffer_size, size;
977 __u64 data_head, data_tail;
978 unsigned int retries = 5;
979
980 pevent = &tinfo->variant.bts.bts;
981
982 /* For delta reads, we return at least the partial last block containing
983 the current PC. */
984 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
985 return BTRACE_ERR_NONE;
986
987 buffer_size = pevent->size;
988 data_tail = pevent->last_head;
989
990 /* We may need to retry reading the trace. See below. */
991 while (retries--)
992 {
993 data_head = *pevent->data_head;
994
995 /* Delete any leftover trace from the previous iteration. */
996 VEC_free (btrace_block_s, btrace->blocks);
997
998 if (type == BTRACE_READ_DELTA)
999 {
1000 __u64 data_size;
1001
1002 /* Determine the number of bytes to read and check for buffer
1003 overflows. */
1004
1005 /* Check for data head overflows. We might be able to recover from
1006 those but they are very unlikely and it's not really worth the
1007 effort, I think. */
1008 if (data_head < data_tail)
1009 return BTRACE_ERR_OVERFLOW;
1010
1011 /* If the buffer is smaller than the trace delta, we overflowed. */
1012 data_size = data_head - data_tail;
1013 if (buffer_size < data_size)
1014 return BTRACE_ERR_OVERFLOW;
1015
1016 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1017 size = (size_t) data_size;
1018 }
1019 else
1020 {
1021 /* Read the entire buffer. */
1022 size = buffer_size;
1023
1024 /* Adjust the size if the buffer has not overflowed, yet. */
1025 if (data_head < size)
1026 size = (size_t) data_head;
1027 }
1028
1029 /* Data_head keeps growing; the buffer itself is circular. */
1030 begin = pevent->mem;
1031 start = begin + data_head % buffer_size;
1032
1033 if (data_head <= buffer_size)
1034 end = start;
1035 else
1036 end = begin + pevent->size;
1037
1038 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
1039
1040 /* The stopping thread notifies its ptracer before it is scheduled out.
1041 On multi-core systems, the debugger might therefore run while the
1042 kernel might be writing the last branch trace records.
1043
1044 Let's check whether the data head moved while we read the trace. */
1045 if (data_head == *pevent->data_head)
1046 break;
1047 }
1048
1049 pevent->last_head = data_head;
1050
1051 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1052 if we're not doing a delta read. There is no way of filling in its zeroed
1053 BEGIN element. */
1054 if (!VEC_empty (btrace_block_s, btrace->blocks)
1055 && type != BTRACE_READ_DELTA)
1056 VEC_pop (btrace_block_s, btrace->blocks);
1057
1058 return BTRACE_ERR_NONE;
1059 }
1060
1061 /* Fill in the Intel Processor Trace configuration information. */
1062
1063 static void
1064 linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
1065 {
1066 conf->cpu = btrace_this_cpu ();
1067 }
1068
1069 /* Read branch trace data in Intel Processor Trace format for the thread
1070 given by TINFO into BTRACE using the TYPE reading method. */
1071
1072 static enum btrace_error
1073 linux_read_pt (struct btrace_data_pt *btrace,
1074 struct btrace_target_info *tinfo,
1075 enum btrace_read_type type)
1076 {
1077 struct perf_event_buffer *pt;
1078
1079 pt = &tinfo->variant.pt.pt;
1080
1081 linux_fill_btrace_pt_config (&btrace->config);
1082
1083 switch (type)
1084 {
1085 case BTRACE_READ_DELTA:
1086 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1087 around to stay inside the aux buffer. */
1088 return BTRACE_ERR_NOT_SUPPORTED;
1089
1090 case BTRACE_READ_NEW:
1091 if (!perf_event_new_data (pt))
1092 return BTRACE_ERR_NONE;
1093
1094 /* Fall through. */
1095 case BTRACE_READ_ALL:
1096 perf_event_read_all (pt, &btrace->data, &btrace->size);
1097 return BTRACE_ERR_NONE;
1098 }
1099
1100 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
1101 }
1102
1103 /* See linux-btrace.h. */
1104
1105 enum btrace_error
1106 linux_read_btrace (struct btrace_data *btrace,
1107 struct btrace_target_info *tinfo,
1108 enum btrace_read_type type)
1109 {
1110 switch (tinfo->conf.format)
1111 {
1112 case BTRACE_FORMAT_NONE:
1113 return BTRACE_ERR_NOT_SUPPORTED;
1114
1115 case BTRACE_FORMAT_BTS:
1116 /* We read btrace in BTS format. */
1117 btrace->format = BTRACE_FORMAT_BTS;
1118 btrace->variant.bts.blocks = NULL;
1119
1120 return linux_read_bts (&btrace->variant.bts, tinfo, type);
1121
1122 case BTRACE_FORMAT_PT:
1123 /* We read btrace in Intel Processor Trace format. */
1124 btrace->format = BTRACE_FORMAT_PT;
1125 btrace->variant.pt.data = NULL;
1126 btrace->variant.pt.size = 0;
1127
1128 return linux_read_pt (&btrace->variant.pt, tinfo, type);
1129 }
1130
1131 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1132 }
1133
1134 /* See linux-btrace.h. */
1135
1136 const struct btrace_config *
1137 linux_btrace_conf (const struct btrace_target_info *tinfo)
1138 {
1139 return &tinfo->conf;
1140 }
1141
1142 #else /* !HAVE_LINUX_PERF_EVENT_H */
1143
1144 /* See linux-btrace.h. */
1145
1146 int
1147 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
1148 {
1149 return 0;
1150 }
1151
1152 /* See linux-btrace.h. */
1153
1154 struct btrace_target_info *
1155 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
1156 {
1157 return NULL;
1158 }
1159
1160 /* See linux-btrace.h. */
1161
1162 enum btrace_error
1163 linux_disable_btrace (struct btrace_target_info *tinfo)
1164 {
1165 return BTRACE_ERR_NOT_SUPPORTED;
1166 }
1167
1168 /* See linux-btrace.h. */
1169
1170 enum btrace_error
1171 linux_read_btrace (struct btrace_data *btrace,
1172 struct btrace_target_info *tinfo,
1173 enum btrace_read_type type)
1174 {
1175 return BTRACE_ERR_NOT_SUPPORTED;
1176 }
1177
1178 /* See linux-btrace.h. */
1179
1180 const struct btrace_config *
1181 linux_btrace_conf (const struct btrace_target_info *tinfo)
1182 {
1183 return NULL;
1184 }
1185
1186 #endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.055109 seconds and 5 git commands to generate.