btrace, gdbserver: use exceptions to convey btrace enable/disable errors
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
25 #include "gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
28 #include "common/scoped_fd.h"
29 #include "common/scoped_mmap.h"
30
31 #include <inttypes.h>
32
33 #include <sys/syscall.h>
34
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
36 #include <unistd.h>
37 #include <sys/mman.h>
38 #include <sys/user.h>
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
41 #include <signal.h>
42
43 /* A branch trace record in perf_event. */
44 struct perf_event_bts
45 {
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51 };
52
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
55 {
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61 };
62
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
66 {
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 cpu.model = (cpuid >> 4) & 0xf;
88
89 if (cpu.family == 0x6)
90 cpu.model += (cpuid >> 12) & 0xf0;
91 }
92 }
93 }
94
95 return cpu;
96 }
97
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
99
100 static int
101 perf_event_new_data (const struct perf_event_buffer *pev)
102 {
103 return *pev->data_head != pev->last_head;
104 }
105
106 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
109
110 static gdb_byte *
111 perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
112 size_t size)
113 {
114 const gdb_byte *begin, *end, *start, *stop;
115 gdb_byte *buffer;
116 size_t buffer_size;
117 __u64 data_tail;
118
119 if (size == 0)
120 return NULL;
121
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size = pev->size;
124 gdb_assert (size <= buffer_size);
125
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
129 don't underflow.
130
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head < size)
135 data_head += buffer_size;
136
137 gdb_assert (size <= data_head);
138 data_tail = data_head - size;
139
140 begin = pev->mem;
141 start = begin + data_tail % buffer_size;
142 stop = begin + data_head % buffer_size;
143
144 buffer = (gdb_byte *) xmalloc (size);
145
146 if (start < stop)
147 memcpy (buffer, start, stop - start);
148 else
149 {
150 end = begin + buffer_size;
151
152 memcpy (buffer, start, end - start);
153 memcpy (buffer + (end - start), begin, stop - begin);
154 }
155
156 return buffer;
157 }
158
159 /* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
161
162 static void
163 perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
164 size_t *psize)
165 {
166 size_t size;
167 __u64 data_head;
168
169 data_head = *pev->data_head;
170 size = pev->size;
171
172 *data = perf_event_read (pev, data_head, size);
173 *psize = size;
174
175 pev->last_head = data_head;
176 }
177
178 /* Determine the event type.
179 Returns zero on success and fills in TYPE; returns -1 otherwise. */
180
181 static int
182 perf_event_pt_event_type (int *type)
183 {
184 gdb_file_up file
185 = gdb_fopen_cloexec ("/sys/bus/event_source/devices/intel_pt/type", "r");
186 if (file == nullptr)
187 return -1;
188
189 int found = fscanf (file.get (), "%d", type);
190 if (found == 1)
191 return 0;
192 return -1;
193 }
194
195 /* Try to determine the start address of the Linux kernel. */
196
197 static uint64_t
198 linux_determine_kernel_start (void)
199 {
200 static uint64_t kernel_start;
201 static int cached;
202
203 if (cached != 0)
204 return kernel_start;
205
206 cached = 1;
207
208 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
209 if (file == NULL)
210 return kernel_start;
211
212 while (!feof (file.get ()))
213 {
214 char buffer[1024], symbol[8], *line;
215 uint64_t addr;
216 int match;
217
218 line = fgets (buffer, sizeof (buffer), file.get ());
219 if (line == NULL)
220 break;
221
222 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
223 if (match != 2)
224 continue;
225
226 if (strcmp (symbol, "_text") == 0)
227 {
228 kernel_start = addr;
229 break;
230 }
231 }
232
233 return kernel_start;
234 }
235
236 /* Check whether an address is in the kernel. */
237
238 static inline int
239 perf_event_is_kernel_addr (uint64_t addr)
240 {
241 uint64_t kernel_start;
242
243 kernel_start = linux_determine_kernel_start ();
244 if (kernel_start != 0ull)
245 return (addr >= kernel_start);
246
247 /* If we don't know the kernel's start address, let's check the most
248 significant bit. This will work at least for 64-bit kernels. */
249 return ((addr & (1ull << 63)) != 0);
250 }
251
252 /* Check whether a perf event record should be skipped. */
253
254 static inline int
255 perf_event_skip_bts_record (const struct perf_event_bts *bts)
256 {
257 /* The hardware may report branches from kernel into user space. Branches
258 from user into kernel space will be suppressed. We filter the former to
259 provide a consistent branch trace excluding kernel. */
260 return perf_event_is_kernel_addr (bts->from);
261 }
262
263 /* Perform a few consistency checks on a perf event sample record. This is
264 meant to catch cases when we get out of sync with the perf event stream. */
265
266 static inline int
267 perf_event_sample_ok (const struct perf_event_sample *sample)
268 {
269 if (sample->header.type != PERF_RECORD_SAMPLE)
270 return 0;
271
272 if (sample->header.size != sizeof (*sample))
273 return 0;
274
275 return 1;
276 }
277
278 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
279 and to addresses (plus a header).
280
281 Start points into that buffer at the next sample position.
282 We read the collected samples backwards from start.
283
284 While reading the samples, we convert the information into a list of blocks.
285 For two adjacent samples s1 and s2, we form a block b such that b.begin =
286 s1.to and b.end = s2.from.
287
288 In case the buffer overflows during sampling, one sample may have its lower
289 part at the end and its upper part at the beginning of the buffer. */
290
291 static VEC (btrace_block_s) *
292 perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
293 const uint8_t *end, const uint8_t *start, size_t size)
294 {
295 VEC (btrace_block_s) *btrace = NULL;
296 struct perf_event_sample sample;
297 size_t read = 0;
298 struct btrace_block block = { 0, 0 };
299 struct regcache *regcache;
300
301 gdb_assert (begin <= start);
302 gdb_assert (start <= end);
303
304 /* The first block ends at the current pc. */
305 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
306 block.end = regcache_read_pc (regcache);
307
308 /* The buffer may contain a partial record as its last entry (i.e. when the
309 buffer size is not a multiple of the sample size). */
310 read = sizeof (sample) - 1;
311
312 for (; read < size; read += sizeof (sample))
313 {
314 const struct perf_event_sample *psample;
315
316 /* Find the next perf_event sample in a backwards traversal. */
317 start -= sizeof (sample);
318
319 /* If we're still inside the buffer, we're done. */
320 if (begin <= start)
321 psample = (const struct perf_event_sample *) start;
322 else
323 {
324 int missing;
325
326 /* We're to the left of the ring buffer, we will wrap around and
327 reappear at the very right of the ring buffer. */
328
329 missing = (begin - start);
330 start = (end - missing);
331
332 /* If the entire sample is missing, we're done. */
333 if (missing == sizeof (sample))
334 psample = (const struct perf_event_sample *) start;
335 else
336 {
337 uint8_t *stack;
338
339 /* The sample wrapped around. The lower part is at the end and
340 the upper part is at the beginning of the buffer. */
341 stack = (uint8_t *) &sample;
342
343 /* Copy the two parts so we have a contiguous sample. */
344 memcpy (stack, start, missing);
345 memcpy (stack + missing, begin, sizeof (sample) - missing);
346
347 psample = &sample;
348 }
349 }
350
351 if (!perf_event_sample_ok (psample))
352 {
353 warning (_("Branch trace may be incomplete."));
354 break;
355 }
356
357 if (perf_event_skip_bts_record (&psample->bts))
358 continue;
359
360 /* We found a valid sample, so we can complete the current block. */
361 block.begin = psample->bts.to;
362
363 VEC_safe_push (btrace_block_s, btrace, &block);
364
365 /* Start the next block. */
366 block.end = psample->bts.from;
367 }
368
369 /* Push the last block (i.e. the first one of inferior execution), as well.
370 We don't know where it ends, but we know where it starts. If we're
371 reading delta trace, we can fill in the start address later on.
372 Otherwise we will prune it. */
373 block.begin = 0;
374 VEC_safe_push (btrace_block_s, btrace, &block);
375
376 return btrace;
377 }
378
379 /* Check whether the kernel supports BTS. */
380
381 static int
382 kernel_supports_bts (void)
383 {
384 struct perf_event_attr attr;
385 pid_t child, pid;
386 int status, file;
387
388 errno = 0;
389 child = fork ();
390 switch (child)
391 {
392 case -1:
393 warning (_("test bts: cannot fork: %s."), safe_strerror (errno));
394 return 0;
395
396 case 0:
397 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
398 if (status != 0)
399 {
400 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
401 safe_strerror (errno));
402 _exit (1);
403 }
404
405 status = raise (SIGTRAP);
406 if (status != 0)
407 {
408 warning (_("test bts: cannot raise SIGTRAP: %s."),
409 safe_strerror (errno));
410 _exit (1);
411 }
412
413 _exit (1);
414
415 default:
416 pid = waitpid (child, &status, 0);
417 if (pid != child)
418 {
419 warning (_("test bts: bad pid %ld, error: %s."),
420 (long) pid, safe_strerror (errno));
421 return 0;
422 }
423
424 if (!WIFSTOPPED (status))
425 {
426 warning (_("test bts: expected stop. status: %d."),
427 status);
428 return 0;
429 }
430
431 memset (&attr, 0, sizeof (attr));
432
433 attr.type = PERF_TYPE_HARDWARE;
434 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
435 attr.sample_period = 1;
436 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
437 attr.exclude_kernel = 1;
438 attr.exclude_hv = 1;
439 attr.exclude_idle = 1;
440
441 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
442 if (file >= 0)
443 close (file);
444
445 kill (child, SIGKILL);
446 ptrace (PTRACE_KILL, child, NULL, NULL);
447
448 pid = waitpid (child, &status, 0);
449 if (pid != child)
450 {
451 warning (_("test bts: bad pid %ld, error: %s."),
452 (long) pid, safe_strerror (errno));
453 if (!WIFSIGNALED (status))
454 warning (_("test bts: expected killed. status: %d."),
455 status);
456 }
457
458 return (file >= 0);
459 }
460 }
461
462 /* Check whether the kernel supports Intel Processor Trace. */
463
464 static int
465 kernel_supports_pt (void)
466 {
467 struct perf_event_attr attr;
468 pid_t child, pid;
469 int status, file, type;
470
471 errno = 0;
472 child = fork ();
473 switch (child)
474 {
475 case -1:
476 warning (_("test pt: cannot fork: %s."), safe_strerror (errno));
477 return 0;
478
479 case 0:
480 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
481 if (status != 0)
482 {
483 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
484 safe_strerror (errno));
485 _exit (1);
486 }
487
488 status = raise (SIGTRAP);
489 if (status != 0)
490 {
491 warning (_("test pt: cannot raise SIGTRAP: %s."),
492 safe_strerror (errno));
493 _exit (1);
494 }
495
496 _exit (1);
497
498 default:
499 pid = waitpid (child, &status, 0);
500 if (pid != child)
501 {
502 warning (_("test pt: bad pid %ld, error: %s."),
503 (long) pid, safe_strerror (errno));
504 return 0;
505 }
506
507 if (!WIFSTOPPED (status))
508 {
509 warning (_("test pt: expected stop. status: %d."),
510 status);
511 return 0;
512 }
513
514 status = perf_event_pt_event_type (&type);
515 if (status != 0)
516 file = -1;
517 else
518 {
519 memset (&attr, 0, sizeof (attr));
520
521 attr.size = sizeof (attr);
522 attr.type = type;
523 attr.exclude_kernel = 1;
524 attr.exclude_hv = 1;
525 attr.exclude_idle = 1;
526
527 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
528 if (file >= 0)
529 close (file);
530 }
531
532 kill (child, SIGKILL);
533 ptrace (PTRACE_KILL, child, NULL, NULL);
534
535 pid = waitpid (child, &status, 0);
536 if (pid != child)
537 {
538 warning (_("test pt: bad pid %ld, error: %s."),
539 (long) pid, safe_strerror (errno));
540 if (!WIFSIGNALED (status))
541 warning (_("test pt: expected killed. status: %d."),
542 status);
543 }
544
545 return (file >= 0);
546 }
547 }
548
549 /* Check whether an Intel cpu supports BTS. */
550
551 static int
552 intel_supports_bts (const struct btrace_cpu *cpu)
553 {
554 switch (cpu->family)
555 {
556 case 0x6:
557 switch (cpu->model)
558 {
559 case 0x1a: /* Nehalem */
560 case 0x1f:
561 case 0x1e:
562 case 0x2e:
563 case 0x25: /* Westmere */
564 case 0x2c:
565 case 0x2f:
566 case 0x2a: /* Sandy Bridge */
567 case 0x2d:
568 case 0x3a: /* Ivy Bridge */
569
570 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
571 "from" information afer an EIST transition, T-states, C1E, or
572 Adaptive Thermal Throttling. */
573 return 0;
574 }
575 }
576
577 return 1;
578 }
579
580 /* Check whether the cpu supports BTS. */
581
582 static int
583 cpu_supports_bts (void)
584 {
585 struct btrace_cpu cpu;
586
587 cpu = btrace_this_cpu ();
588 switch (cpu.vendor)
589 {
590 default:
591 /* Don't know about others. Let's assume they do. */
592 return 1;
593
594 case CV_INTEL:
595 return intel_supports_bts (&cpu);
596 }
597 }
598
599 /* Check whether the linux target supports BTS. */
600
601 static int
602 linux_supports_bts (void)
603 {
604 static int cached;
605
606 if (cached == 0)
607 {
608 if (!kernel_supports_bts ())
609 cached = -1;
610 else if (!cpu_supports_bts ())
611 cached = -1;
612 else
613 cached = 1;
614 }
615
616 return cached > 0;
617 }
618
619 /* Check whether the linux target supports Intel Processor Trace. */
620
621 static int
622 linux_supports_pt (void)
623 {
624 static int cached;
625
626 if (cached == 0)
627 {
628 if (!kernel_supports_pt ())
629 cached = -1;
630 else
631 cached = 1;
632 }
633
634 return cached > 0;
635 }
636
637 /* See linux-btrace.h. */
638
639 int
640 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
641 {
642 switch (format)
643 {
644 case BTRACE_FORMAT_NONE:
645 return 0;
646
647 case BTRACE_FORMAT_BTS:
648 return linux_supports_bts ();
649
650 case BTRACE_FORMAT_PT:
651 return linux_supports_pt ();
652 }
653
654 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
655 }
656
657 /* Enable branch tracing in BTS format. */
658
659 static struct btrace_target_info *
660 linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
661 {
662 struct btrace_tinfo_bts *bts;
663 size_t size, pages;
664 __u64 data_offset;
665 int pid, pg;
666
667 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
668 (XCNEW (btrace_target_info));
669 tinfo->ptid = ptid;
670
671 tinfo->conf.format = BTRACE_FORMAT_BTS;
672 bts = &tinfo->variant.bts;
673
674 bts->attr.size = sizeof (bts->attr);
675 bts->attr.type = PERF_TYPE_HARDWARE;
676 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
677 bts->attr.sample_period = 1;
678
679 /* We sample from and to address. */
680 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
681
682 bts->attr.exclude_kernel = 1;
683 bts->attr.exclude_hv = 1;
684 bts->attr.exclude_idle = 1;
685
686 pid = ptid_get_lwp (ptid);
687 if (pid == 0)
688 pid = ptid_get_pid (ptid);
689
690 errno = 0;
691 scoped_fd fd (syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0));
692 if (fd.get () < 0)
693 return nullptr;
694
695 /* Convert the requested size in bytes to pages (rounding up). */
696 pages = ((size_t) conf->size / PAGE_SIZE
697 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
698 /* We need at least one page. */
699 if (pages == 0)
700 pages = 1;
701
702 /* The buffer size can be requested in powers of two pages. Adjust PAGES
703 to the next power of two. */
704 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
705 if ((pages & ((size_t) 1 << pg)) != 0)
706 pages += ((size_t) 1 << pg);
707
708 /* We try to allocate the requested size.
709 If that fails, try to get as much as we can. */
710 scoped_mmap data;
711 for (; pages > 0; pages >>= 1)
712 {
713 size_t length;
714 __u64 data_size;
715
716 data_size = (__u64) pages * PAGE_SIZE;
717
718 /* Don't ask for more than we can represent in the configuration. */
719 if ((__u64) UINT_MAX < data_size)
720 continue;
721
722 size = (size_t) data_size;
723 length = size + PAGE_SIZE;
724
725 /* Check for overflows. */
726 if ((__u64) length != data_size + PAGE_SIZE)
727 continue;
728
729 /* The number of pages we request needs to be a power of two. */
730 data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0);
731 if (data.get () != MAP_FAILED)
732 break;
733 }
734
735 if (pages == 0)
736 return nullptr;
737
738 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
739 data.get ();
740 data_offset = PAGE_SIZE;
741
742 #if defined (PERF_ATTR_SIZE_VER5)
743 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
744 {
745 __u64 data_size;
746
747 data_offset = header->data_offset;
748 data_size = header->data_size;
749
750 size = (unsigned int) data_size;
751
752 /* Check for overflows. */
753 if ((__u64) size != data_size)
754 return nullptr;
755 }
756 #endif /* defined (PERF_ATTR_SIZE_VER5) */
757
758 bts->bts.size = size;
759 bts->bts.data_head = &header->data_head;
760 bts->bts.mem = (const uint8_t *) data.get () + data_offset;
761 bts->bts.last_head = 0ull;
762 bts->header = header;
763 bts->file = fd.release ();
764
765 data.release ();
766
767 tinfo->conf.bts.size = (unsigned int) size;
768 return tinfo.release ();
769 }
770
771 #if defined (PERF_ATTR_SIZE_VER5)
772
773 /* Enable branch tracing in Intel Processor Trace format. */
774
775 static struct btrace_target_info *
776 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
777 {
778 struct btrace_tinfo_pt *pt;
779 size_t pages;
780 int pid, pg, errcode, type;
781
782 if (conf->size == 0)
783 return NULL;
784
785 errcode = perf_event_pt_event_type (&type);
786 if (errcode != 0)
787 return NULL;
788
789 pid = ptid_get_lwp (ptid);
790 if (pid == 0)
791 pid = ptid_get_pid (ptid);
792
793 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
794 (XCNEW (btrace_target_info));
795 tinfo->ptid = ptid;
796
797 tinfo->conf.format = BTRACE_FORMAT_PT;
798 pt = &tinfo->variant.pt;
799
800 pt->attr.size = sizeof (pt->attr);
801 pt->attr.type = type;
802
803 pt->attr.exclude_kernel = 1;
804 pt->attr.exclude_hv = 1;
805 pt->attr.exclude_idle = 1;
806
807 errno = 0;
808 scoped_fd fd (syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0));
809 if (fd.get () < 0)
810 return nullptr;
811
812 /* Allocate the configuration page. */
813 scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
814 fd.get (), 0);
815 if (data.get () == MAP_FAILED)
816 return nullptr;
817
818 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
819 data.get ();
820
821 header->aux_offset = header->data_offset + header->data_size;
822
823 /* Convert the requested size in bytes to pages (rounding up). */
824 pages = ((size_t) conf->size / PAGE_SIZE
825 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
826 /* We need at least one page. */
827 if (pages == 0)
828 pages = 1;
829
830 /* The buffer size can be requested in powers of two pages. Adjust PAGES
831 to the next power of two. */
832 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
833 if ((pages & ((size_t) 1 << pg)) != 0)
834 pages += ((size_t) 1 << pg);
835
836 /* We try to allocate the requested size.
837 If that fails, try to get as much as we can. */
838 scoped_mmap aux;
839 for (; pages > 0; pages >>= 1)
840 {
841 size_t length;
842 __u64 data_size;
843
844 data_size = (__u64) pages * PAGE_SIZE;
845
846 /* Don't ask for more than we can represent in the configuration. */
847 if ((__u64) UINT_MAX < data_size)
848 continue;
849
850 length = (size_t) data_size;
851
852 /* Check for overflows. */
853 if ((__u64) length != data_size)
854 continue;
855
856 header->aux_size = data_size;
857
858 aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
859 header->aux_offset);
860 if (aux.get () != MAP_FAILED)
861 break;
862 }
863
864 if (pages == 0)
865 return nullptr;
866
867 pt->pt.size = aux.size ();
868 pt->pt.mem = (const uint8_t *) aux.release ();
869 pt->pt.data_head = &header->aux_head;
870 pt->header = header;
871 pt->file = fd.release ();
872
873 data.release ();
874
875 tinfo->conf.pt.size = (unsigned int) pt->pt.size;
876 return tinfo.release ();
877 }
878
879 #else /* !defined (PERF_ATTR_SIZE_VER5) */
880
881 static struct btrace_target_info *
882 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
883 {
884 errno = EOPNOTSUPP;
885 return NULL;
886 }
887
888 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
889
890 /* See linux-btrace.h. */
891
892 struct btrace_target_info *
893 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
894 {
895 struct btrace_target_info *tinfo;
896
897 tinfo = NULL;
898 switch (conf->format)
899 {
900 case BTRACE_FORMAT_NONE:
901 break;
902
903 case BTRACE_FORMAT_BTS:
904 tinfo = linux_enable_bts (ptid, &conf->bts);
905 break;
906
907 case BTRACE_FORMAT_PT:
908 tinfo = linux_enable_pt (ptid, &conf->pt);
909 break;
910 }
911
912 if (tinfo == NULL)
913 error (_("Unknown error."));
914
915 return tinfo;
916 }
917
918 /* Disable BTS tracing. */
919
920 static enum btrace_error
921 linux_disable_bts (struct btrace_tinfo_bts *tinfo)
922 {
923 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
924 close (tinfo->file);
925
926 return BTRACE_ERR_NONE;
927 }
928
929 /* Disable Intel Processor Trace tracing. */
930
931 static enum btrace_error
932 linux_disable_pt (struct btrace_tinfo_pt *tinfo)
933 {
934 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
935 munmap((void *) tinfo->header, PAGE_SIZE);
936 close (tinfo->file);
937
938 return BTRACE_ERR_NONE;
939 }
940
941 /* See linux-btrace.h. */
942
943 enum btrace_error
944 linux_disable_btrace (struct btrace_target_info *tinfo)
945 {
946 enum btrace_error errcode;
947
948 errcode = BTRACE_ERR_NOT_SUPPORTED;
949 switch (tinfo->conf.format)
950 {
951 case BTRACE_FORMAT_NONE:
952 break;
953
954 case BTRACE_FORMAT_BTS:
955 errcode = linux_disable_bts (&tinfo->variant.bts);
956 break;
957
958 case BTRACE_FORMAT_PT:
959 errcode = linux_disable_pt (&tinfo->variant.pt);
960 break;
961 }
962
963 if (errcode == BTRACE_ERR_NONE)
964 xfree (tinfo);
965
966 return errcode;
967 }
968
969 /* Read branch trace data in BTS format for the thread given by TINFO into
970 BTRACE using the TYPE reading method. */
971
972 static enum btrace_error
973 linux_read_bts (struct btrace_data_bts *btrace,
974 struct btrace_target_info *tinfo,
975 enum btrace_read_type type)
976 {
977 struct perf_event_buffer *pevent;
978 const uint8_t *begin, *end, *start;
979 size_t buffer_size, size;
980 __u64 data_head, data_tail;
981 unsigned int retries = 5;
982
983 pevent = &tinfo->variant.bts.bts;
984
985 /* For delta reads, we return at least the partial last block containing
986 the current PC. */
987 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
988 return BTRACE_ERR_NONE;
989
990 buffer_size = pevent->size;
991 data_tail = pevent->last_head;
992
993 /* We may need to retry reading the trace. See below. */
994 while (retries--)
995 {
996 data_head = *pevent->data_head;
997
998 /* Delete any leftover trace from the previous iteration. */
999 VEC_free (btrace_block_s, btrace->blocks);
1000
1001 if (type == BTRACE_READ_DELTA)
1002 {
1003 __u64 data_size;
1004
1005 /* Determine the number of bytes to read and check for buffer
1006 overflows. */
1007
1008 /* Check for data head overflows. We might be able to recover from
1009 those but they are very unlikely and it's not really worth the
1010 effort, I think. */
1011 if (data_head < data_tail)
1012 return BTRACE_ERR_OVERFLOW;
1013
1014 /* If the buffer is smaller than the trace delta, we overflowed. */
1015 data_size = data_head - data_tail;
1016 if (buffer_size < data_size)
1017 return BTRACE_ERR_OVERFLOW;
1018
1019 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1020 size = (size_t) data_size;
1021 }
1022 else
1023 {
1024 /* Read the entire buffer. */
1025 size = buffer_size;
1026
1027 /* Adjust the size if the buffer has not overflowed, yet. */
1028 if (data_head < size)
1029 size = (size_t) data_head;
1030 }
1031
1032 /* Data_head keeps growing; the buffer itself is circular. */
1033 begin = pevent->mem;
1034 start = begin + data_head % buffer_size;
1035
1036 if (data_head <= buffer_size)
1037 end = start;
1038 else
1039 end = begin + pevent->size;
1040
1041 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
1042
1043 /* The stopping thread notifies its ptracer before it is scheduled out.
1044 On multi-core systems, the debugger might therefore run while the
1045 kernel might be writing the last branch trace records.
1046
1047 Let's check whether the data head moved while we read the trace. */
1048 if (data_head == *pevent->data_head)
1049 break;
1050 }
1051
1052 pevent->last_head = data_head;
1053
1054 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1055 if we're not doing a delta read. There is no way of filling in its zeroed
1056 BEGIN element. */
1057 if (!VEC_empty (btrace_block_s, btrace->blocks)
1058 && type != BTRACE_READ_DELTA)
1059 VEC_pop (btrace_block_s, btrace->blocks);
1060
1061 return BTRACE_ERR_NONE;
1062 }
1063
1064 /* Fill in the Intel Processor Trace configuration information. */
1065
1066 static void
1067 linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
1068 {
1069 conf->cpu = btrace_this_cpu ();
1070 }
1071
1072 /* Read branch trace data in Intel Processor Trace format for the thread
1073 given by TINFO into BTRACE using the TYPE reading method. */
1074
1075 static enum btrace_error
1076 linux_read_pt (struct btrace_data_pt *btrace,
1077 struct btrace_target_info *tinfo,
1078 enum btrace_read_type type)
1079 {
1080 struct perf_event_buffer *pt;
1081
1082 pt = &tinfo->variant.pt.pt;
1083
1084 linux_fill_btrace_pt_config (&btrace->config);
1085
1086 switch (type)
1087 {
1088 case BTRACE_READ_DELTA:
1089 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1090 around to stay inside the aux buffer. */
1091 return BTRACE_ERR_NOT_SUPPORTED;
1092
1093 case BTRACE_READ_NEW:
1094 if (!perf_event_new_data (pt))
1095 return BTRACE_ERR_NONE;
1096
1097 /* Fall through. */
1098 case BTRACE_READ_ALL:
1099 perf_event_read_all (pt, &btrace->data, &btrace->size);
1100 return BTRACE_ERR_NONE;
1101 }
1102
1103 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
1104 }
1105
1106 /* See linux-btrace.h. */
1107
1108 enum btrace_error
1109 linux_read_btrace (struct btrace_data *btrace,
1110 struct btrace_target_info *tinfo,
1111 enum btrace_read_type type)
1112 {
1113 switch (tinfo->conf.format)
1114 {
1115 case BTRACE_FORMAT_NONE:
1116 return BTRACE_ERR_NOT_SUPPORTED;
1117
1118 case BTRACE_FORMAT_BTS:
1119 /* We read btrace in BTS format. */
1120 btrace->format = BTRACE_FORMAT_BTS;
1121 btrace->variant.bts.blocks = NULL;
1122
1123 return linux_read_bts (&btrace->variant.bts, tinfo, type);
1124
1125 case BTRACE_FORMAT_PT:
1126 /* We read btrace in Intel Processor Trace format. */
1127 btrace->format = BTRACE_FORMAT_PT;
1128 btrace->variant.pt.data = NULL;
1129 btrace->variant.pt.size = 0;
1130
1131 return linux_read_pt (&btrace->variant.pt, tinfo, type);
1132 }
1133
1134 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1135 }
1136
1137 /* See linux-btrace.h. */
1138
1139 const struct btrace_config *
1140 linux_btrace_conf (const struct btrace_target_info *tinfo)
1141 {
1142 return &tinfo->conf;
1143 }
1144
1145 #else /* !HAVE_LINUX_PERF_EVENT_H */
1146
1147 /* See linux-btrace.h. */
1148
1149 int
1150 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
1151 {
1152 return 0;
1153 }
1154
1155 /* See linux-btrace.h. */
1156
1157 struct btrace_target_info *
1158 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
1159 {
1160 return NULL;
1161 }
1162
1163 /* See linux-btrace.h. */
1164
1165 enum btrace_error
1166 linux_disable_btrace (struct btrace_target_info *tinfo)
1167 {
1168 return BTRACE_ERR_NOT_SUPPORTED;
1169 }
1170
1171 /* See linux-btrace.h. */
1172
1173 enum btrace_error
1174 linux_read_btrace (struct btrace_data *btrace,
1175 struct btrace_target_info *tinfo,
1176 enum btrace_read_type type)
1177 {
1178 return BTRACE_ERR_NOT_SUPPORTED;
1179 }
1180
1181 /* See linux-btrace.h. */
1182
1183 const struct btrace_config *
1184 linux_btrace_conf (const struct btrace_target_info *tinfo)
1185 {
1186 return NULL;
1187 }
1188
1189 #endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.054272 seconds and 5 git commands to generate.