Sync config.sub,config.guess with upstream.
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
25 #include "gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
28
29 #include <inttypes.h>
30
31 #ifdef HAVE_SYS_SYSCALL_H
32 #include <sys/syscall.h>
33 #endif
34
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
36 #include <unistd.h>
37 #include <sys/mman.h>
38 #include <sys/user.h>
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
41 #include <signal.h>
42
43 /* A branch trace record in perf_event. */
44 struct perf_event_bts
45 {
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51 };
52
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
55 {
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61 };
62
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
66 {
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 cpu.model = (cpuid >> 4) & 0xf;
88
89 if (cpu.family == 0x6)
90 cpu.model += (cpuid >> 12) & 0xf0;
91 }
92 }
93 }
94
95 return cpu;
96 }
97
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
99
100 static int
101 perf_event_new_data (const struct perf_event_buffer *pev)
102 {
103 return *pev->data_head != pev->last_head;
104 }
105
106 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
109
110 static gdb_byte *
111 perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
112 size_t size)
113 {
114 const gdb_byte *begin, *end, *start, *stop;
115 gdb_byte *buffer;
116 size_t buffer_size;
117 __u64 data_tail;
118
119 if (size == 0)
120 return NULL;
121
122 gdb_assert (size <= data_head);
123 data_tail = data_head - size;
124
125 buffer_size = pev->size;
126 begin = pev->mem;
127 start = begin + data_tail % buffer_size;
128 stop = begin + data_head % buffer_size;
129
130 buffer = (gdb_byte *) xmalloc (size);
131
132 if (start < stop)
133 memcpy (buffer, start, stop - start);
134 else
135 {
136 end = begin + buffer_size;
137
138 memcpy (buffer, start, end - start);
139 memcpy (buffer + (end - start), begin, stop - begin);
140 }
141
142 return buffer;
143 }
144
145 /* Copy the perf event buffer data from PEV.
146 Store a pointer to the copy into DATA and its size in SIZE. */
147
148 static void
149 perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
150 size_t *psize)
151 {
152 size_t size;
153 __u64 data_head;
154
155 data_head = *pev->data_head;
156
157 size = pev->size;
158 if (data_head < size)
159 size = (size_t) data_head;
160
161 *data = perf_event_read (pev, data_head, size);
162 *psize = size;
163
164 pev->last_head = data_head;
165 }
166
167 /* Determine the event type.
168 Returns zero on success and fills in TYPE; returns -1 otherwise. */
169
170 static int
171 perf_event_pt_event_type (int *type)
172 {
173 FILE *file;
174 int found;
175
176 file = fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
177 if (file == NULL)
178 return -1;
179
180 found = fscanf (file, "%d", type);
181
182 fclose (file);
183
184 if (found == 1)
185 return 0;
186 return -1;
187 }
188
189 /* Try to determine the start address of the Linux kernel. */
190
191 static uint64_t
192 linux_determine_kernel_start (void)
193 {
194 static uint64_t kernel_start;
195 static int cached;
196 FILE *file;
197
198 if (cached != 0)
199 return kernel_start;
200
201 cached = 1;
202
203 file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
204 if (file == NULL)
205 return kernel_start;
206
207 while (!feof (file))
208 {
209 char buffer[1024], symbol[8], *line;
210 uint64_t addr;
211 int match;
212
213 line = fgets (buffer, sizeof (buffer), file);
214 if (line == NULL)
215 break;
216
217 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
218 if (match != 2)
219 continue;
220
221 if (strcmp (symbol, "_text") == 0)
222 {
223 kernel_start = addr;
224 break;
225 }
226 }
227
228 fclose (file);
229
230 return kernel_start;
231 }
232
233 /* Check whether an address is in the kernel. */
234
235 static inline int
236 perf_event_is_kernel_addr (uint64_t addr)
237 {
238 uint64_t kernel_start;
239
240 kernel_start = linux_determine_kernel_start ();
241 if (kernel_start != 0ull)
242 return (addr >= kernel_start);
243
244 /* If we don't know the kernel's start address, let's check the most
245 significant bit. This will work at least for 64-bit kernels. */
246 return ((addr & (1ull << 63)) != 0);
247 }
248
249 /* Check whether a perf event record should be skipped. */
250
251 static inline int
252 perf_event_skip_bts_record (const struct perf_event_bts *bts)
253 {
254 /* The hardware may report branches from kernel into user space. Branches
255 from user into kernel space will be suppressed. We filter the former to
256 provide a consistent branch trace excluding kernel. */
257 return perf_event_is_kernel_addr (bts->from);
258 }
259
260 /* Perform a few consistency checks on a perf event sample record. This is
261 meant to catch cases when we get out of sync with the perf event stream. */
262
263 static inline int
264 perf_event_sample_ok (const struct perf_event_sample *sample)
265 {
266 if (sample->header.type != PERF_RECORD_SAMPLE)
267 return 0;
268
269 if (sample->header.size != sizeof (*sample))
270 return 0;
271
272 return 1;
273 }
274
275 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
276 and to addresses (plus a header).
277
278 Start points into that buffer at the next sample position.
279 We read the collected samples backwards from start.
280
281 While reading the samples, we convert the information into a list of blocks.
282 For two adjacent samples s1 and s2, we form a block b such that b.begin =
283 s1.to and b.end = s2.from.
284
285 In case the buffer overflows during sampling, one sample may have its lower
286 part at the end and its upper part at the beginning of the buffer. */
287
288 static VEC (btrace_block_s) *
289 perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
290 const uint8_t *end, const uint8_t *start, size_t size)
291 {
292 VEC (btrace_block_s) *btrace = NULL;
293 struct perf_event_sample sample;
294 size_t read = 0;
295 struct btrace_block block = { 0, 0 };
296 struct regcache *regcache;
297
298 gdb_assert (begin <= start);
299 gdb_assert (start <= end);
300
301 /* The first block ends at the current pc. */
302 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
303 block.end = regcache_read_pc (regcache);
304
305 /* The buffer may contain a partial record as its last entry (i.e. when the
306 buffer size is not a multiple of the sample size). */
307 read = sizeof (sample) - 1;
308
309 for (; read < size; read += sizeof (sample))
310 {
311 const struct perf_event_sample *psample;
312
313 /* Find the next perf_event sample in a backwards traversal. */
314 start -= sizeof (sample);
315
316 /* If we're still inside the buffer, we're done. */
317 if (begin <= start)
318 psample = (const struct perf_event_sample *) start;
319 else
320 {
321 int missing;
322
323 /* We're to the left of the ring buffer, we will wrap around and
324 reappear at the very right of the ring buffer. */
325
326 missing = (begin - start);
327 start = (end - missing);
328
329 /* If the entire sample is missing, we're done. */
330 if (missing == sizeof (sample))
331 psample = (const struct perf_event_sample *) start;
332 else
333 {
334 uint8_t *stack;
335
336 /* The sample wrapped around. The lower part is at the end and
337 the upper part is at the beginning of the buffer. */
338 stack = (uint8_t *) &sample;
339
340 /* Copy the two parts so we have a contiguous sample. */
341 memcpy (stack, start, missing);
342 memcpy (stack + missing, begin, sizeof (sample) - missing);
343
344 psample = &sample;
345 }
346 }
347
348 if (!perf_event_sample_ok (psample))
349 {
350 warning (_("Branch trace may be incomplete."));
351 break;
352 }
353
354 if (perf_event_skip_bts_record (&psample->bts))
355 continue;
356
357 /* We found a valid sample, so we can complete the current block. */
358 block.begin = psample->bts.to;
359
360 VEC_safe_push (btrace_block_s, btrace, &block);
361
362 /* Start the next block. */
363 block.end = psample->bts.from;
364 }
365
366 /* Push the last block (i.e. the first one of inferior execution), as well.
367 We don't know where it ends, but we know where it starts. If we're
368 reading delta trace, we can fill in the start address later on.
369 Otherwise we will prune it. */
370 block.begin = 0;
371 VEC_safe_push (btrace_block_s, btrace, &block);
372
373 return btrace;
374 }
375
376 /* Check whether the kernel supports BTS. */
377
378 static int
379 kernel_supports_bts (void)
380 {
381 struct perf_event_attr attr;
382 pid_t child, pid;
383 int status, file;
384
385 errno = 0;
386 child = fork ();
387 switch (child)
388 {
389 case -1:
390 warning (_("test bts: cannot fork: %s."), safe_strerror (errno));
391 return 0;
392
393 case 0:
394 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
395 if (status != 0)
396 {
397 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
398 safe_strerror (errno));
399 _exit (1);
400 }
401
402 status = raise (SIGTRAP);
403 if (status != 0)
404 {
405 warning (_("test bts: cannot raise SIGTRAP: %s."),
406 safe_strerror (errno));
407 _exit (1);
408 }
409
410 _exit (1);
411
412 default:
413 pid = waitpid (child, &status, 0);
414 if (pid != child)
415 {
416 warning (_("test bts: bad pid %ld, error: %s."),
417 (long) pid, safe_strerror (errno));
418 return 0;
419 }
420
421 if (!WIFSTOPPED (status))
422 {
423 warning (_("test bts: expected stop. status: %d."),
424 status);
425 return 0;
426 }
427
428 memset (&attr, 0, sizeof (attr));
429
430 attr.type = PERF_TYPE_HARDWARE;
431 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
432 attr.sample_period = 1;
433 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
434 attr.exclude_kernel = 1;
435 attr.exclude_hv = 1;
436 attr.exclude_idle = 1;
437
438 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
439 if (file >= 0)
440 close (file);
441
442 kill (child, SIGKILL);
443 ptrace (PTRACE_KILL, child, NULL, NULL);
444
445 pid = waitpid (child, &status, 0);
446 if (pid != child)
447 {
448 warning (_("test bts: bad pid %ld, error: %s."),
449 (long) pid, safe_strerror (errno));
450 if (!WIFSIGNALED (status))
451 warning (_("test bts: expected killed. status: %d."),
452 status);
453 }
454
455 return (file >= 0);
456 }
457 }
458
459 /* Check whether the kernel supports Intel Processor Trace. */
460
461 static int
462 kernel_supports_pt (void)
463 {
464 struct perf_event_attr attr;
465 pid_t child, pid;
466 int status, file, type;
467
468 errno = 0;
469 child = fork ();
470 switch (child)
471 {
472 case -1:
473 warning (_("test pt: cannot fork: %s."), safe_strerror (errno));
474 return 0;
475
476 case 0:
477 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
478 if (status != 0)
479 {
480 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
481 safe_strerror (errno));
482 _exit (1);
483 }
484
485 status = raise (SIGTRAP);
486 if (status != 0)
487 {
488 warning (_("test pt: cannot raise SIGTRAP: %s."),
489 safe_strerror (errno));
490 _exit (1);
491 }
492
493 _exit (1);
494
495 default:
496 pid = waitpid (child, &status, 0);
497 if (pid != child)
498 {
499 warning (_("test pt: bad pid %ld, error: %s."),
500 (long) pid, safe_strerror (errno));
501 return 0;
502 }
503
504 if (!WIFSTOPPED (status))
505 {
506 warning (_("test pt: expected stop. status: %d."),
507 status);
508 return 0;
509 }
510
511 status = perf_event_pt_event_type (&type);
512 if (status != 0)
513 file = -1;
514 else
515 {
516 memset (&attr, 0, sizeof (attr));
517
518 attr.size = sizeof (attr);
519 attr.type = type;
520 attr.exclude_kernel = 1;
521 attr.exclude_hv = 1;
522 attr.exclude_idle = 1;
523
524 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
525 if (file >= 0)
526 close (file);
527 }
528
529 kill (child, SIGKILL);
530 ptrace (PTRACE_KILL, child, NULL, NULL);
531
532 pid = waitpid (child, &status, 0);
533 if (pid != child)
534 {
535 warning (_("test pt: bad pid %ld, error: %s."),
536 (long) pid, safe_strerror (errno));
537 if (!WIFSIGNALED (status))
538 warning (_("test pt: expected killed. status: %d."),
539 status);
540 }
541
542 return (file >= 0);
543 }
544 }
545
546 /* Check whether an Intel cpu supports BTS. */
547
548 static int
549 intel_supports_bts (const struct btrace_cpu *cpu)
550 {
551 switch (cpu->family)
552 {
553 case 0x6:
554 switch (cpu->model)
555 {
556 case 0x1a: /* Nehalem */
557 case 0x1f:
558 case 0x1e:
559 case 0x2e:
560 case 0x25: /* Westmere */
561 case 0x2c:
562 case 0x2f:
563 case 0x2a: /* Sandy Bridge */
564 case 0x2d:
565 case 0x3a: /* Ivy Bridge */
566
567 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
568 "from" information afer an EIST transition, T-states, C1E, or
569 Adaptive Thermal Throttling. */
570 return 0;
571 }
572 }
573
574 return 1;
575 }
576
577 /* Check whether the cpu supports BTS. */
578
579 static int
580 cpu_supports_bts (void)
581 {
582 struct btrace_cpu cpu;
583
584 cpu = btrace_this_cpu ();
585 switch (cpu.vendor)
586 {
587 default:
588 /* Don't know about others. Let's assume they do. */
589 return 1;
590
591 case CV_INTEL:
592 return intel_supports_bts (&cpu);
593 }
594 }
595
596 /* Check whether the linux target supports BTS. */
597
598 static int
599 linux_supports_bts (void)
600 {
601 static int cached;
602
603 if (cached == 0)
604 {
605 if (!kernel_supports_bts ())
606 cached = -1;
607 else if (!cpu_supports_bts ())
608 cached = -1;
609 else
610 cached = 1;
611 }
612
613 return cached > 0;
614 }
615
616 /* Check whether the linux target supports Intel Processor Trace. */
617
618 static int
619 linux_supports_pt (void)
620 {
621 static int cached;
622
623 if (cached == 0)
624 {
625 if (!kernel_supports_pt ())
626 cached = -1;
627 else
628 cached = 1;
629 }
630
631 return cached > 0;
632 }
633
634 /* See linux-btrace.h. */
635
636 int
637 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
638 {
639 switch (format)
640 {
641 case BTRACE_FORMAT_NONE:
642 return 0;
643
644 case BTRACE_FORMAT_BTS:
645 return linux_supports_bts ();
646
647 case BTRACE_FORMAT_PT:
648 return linux_supports_pt ();
649 }
650
651 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
652 }
653
654 /* Enable branch tracing in BTS format. */
655
656 static struct btrace_target_info *
657 linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
658 {
659 struct perf_event_mmap_page *header;
660 struct btrace_target_info *tinfo;
661 struct btrace_tinfo_bts *bts;
662 size_t size, pages;
663 __u64 data_offset;
664 int pid, pg;
665
666 tinfo = XCNEW (struct btrace_target_info);
667 tinfo->ptid = ptid;
668
669 tinfo->conf.format = BTRACE_FORMAT_BTS;
670 bts = &tinfo->variant.bts;
671
672 bts->attr.size = sizeof (bts->attr);
673 bts->attr.type = PERF_TYPE_HARDWARE;
674 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
675 bts->attr.sample_period = 1;
676
677 /* We sample from and to address. */
678 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
679
680 bts->attr.exclude_kernel = 1;
681 bts->attr.exclude_hv = 1;
682 bts->attr.exclude_idle = 1;
683
684 pid = ptid_get_lwp (ptid);
685 if (pid == 0)
686 pid = ptid_get_pid (ptid);
687
688 errno = 0;
689 bts->file = syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0);
690 if (bts->file < 0)
691 goto err_out;
692
693 /* Convert the requested size in bytes to pages (rounding up). */
694 pages = ((size_t) conf->size / PAGE_SIZE
695 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
696 /* We need at least one page. */
697 if (pages == 0)
698 pages = 1;
699
700 /* The buffer size can be requested in powers of two pages. Adjust PAGES
701 to the next power of two. */
702 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
703 if ((pages & ((size_t) 1 << pg)) != 0)
704 pages += ((size_t) 1 << pg);
705
706 /* We try to allocate the requested size.
707 If that fails, try to get as much as we can. */
708 for (; pages > 0; pages >>= 1)
709 {
710 size_t length;
711 __u64 data_size;
712
713 data_size = (__u64) pages * PAGE_SIZE;
714
715 /* Don't ask for more than we can represent in the configuration. */
716 if ((__u64) UINT_MAX < data_size)
717 continue;
718
719 size = (size_t) data_size;
720 length = size + PAGE_SIZE;
721
722 /* Check for overflows. */
723 if ((__u64) length != data_size + PAGE_SIZE)
724 continue;
725
726 /* The number of pages we request needs to be a power of two. */
727 header = ((struct perf_event_mmap_page *)
728 mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0));
729 if (header != MAP_FAILED)
730 break;
731 }
732
733 if (pages == 0)
734 goto err_file;
735
736 data_offset = PAGE_SIZE;
737
738 #if defined (PERF_ATTR_SIZE_VER5)
739 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
740 {
741 __u64 data_size;
742
743 data_offset = header->data_offset;
744 data_size = header->data_size;
745
746 size = (unsigned int) data_size;
747
748 /* Check for overflows. */
749 if ((__u64) size != data_size)
750 {
751 munmap ((void *) header, size + PAGE_SIZE);
752 goto err_file;
753 }
754 }
755 #endif /* defined (PERF_ATTR_SIZE_VER5) */
756
757 bts->header = header;
758 bts->bts.mem = ((const uint8_t *) header) + data_offset;
759 bts->bts.size = size;
760 bts->bts.data_head = &header->data_head;
761 bts->bts.last_head = 0ull;
762
763 tinfo->conf.bts.size = (unsigned int) size;
764 return tinfo;
765
766 err_file:
767 /* We were not able to allocate any buffer. */
768 close (bts->file);
769
770 err_out:
771 xfree (tinfo);
772 return NULL;
773 }
774
775 #if defined (PERF_ATTR_SIZE_VER5)
776
777 /* Enable branch tracing in Intel Processor Trace format. */
778
779 static struct btrace_target_info *
780 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
781 {
782 struct perf_event_mmap_page *header;
783 struct btrace_target_info *tinfo;
784 struct btrace_tinfo_pt *pt;
785 size_t pages, size;
786 int pid, pg, errcode, type;
787
788 if (conf->size == 0)
789 return NULL;
790
791 errcode = perf_event_pt_event_type (&type);
792 if (errcode != 0)
793 return NULL;
794
795 pid = ptid_get_lwp (ptid);
796 if (pid == 0)
797 pid = ptid_get_pid (ptid);
798
799 tinfo = XCNEW (struct btrace_target_info);
800 tinfo->ptid = ptid;
801
802 tinfo->conf.format = BTRACE_FORMAT_PT;
803 pt = &tinfo->variant.pt;
804
805 pt->attr.size = sizeof (pt->attr);
806 pt->attr.type = type;
807
808 pt->attr.exclude_kernel = 1;
809 pt->attr.exclude_hv = 1;
810 pt->attr.exclude_idle = 1;
811
812 errno = 0;
813 pt->file = syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0);
814 if (pt->file < 0)
815 goto err;
816
817 /* Allocate the configuration page. */
818 header = ((struct perf_event_mmap_page *)
819 mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
820 pt->file, 0));
821 if (header == MAP_FAILED)
822 goto err_file;
823
824 header->aux_offset = header->data_offset + header->data_size;
825
826 /* Convert the requested size in bytes to pages (rounding up). */
827 pages = ((size_t) conf->size / PAGE_SIZE
828 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
829 /* We need at least one page. */
830 if (pages == 0)
831 pages = 1;
832
833 /* The buffer size can be requested in powers of two pages. Adjust PAGES
834 to the next power of two. */
835 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
836 if ((pages & ((size_t) 1 << pg)) != 0)
837 pages += ((size_t) 1 << pg);
838
839 /* We try to allocate the requested size.
840 If that fails, try to get as much as we can. */
841 for (; pages > 0; pages >>= 1)
842 {
843 size_t length;
844 __u64 data_size;
845
846 data_size = (__u64) pages * PAGE_SIZE;
847
848 /* Don't ask for more than we can represent in the configuration. */
849 if ((__u64) UINT_MAX < data_size)
850 continue;
851
852 size = (size_t) data_size;
853
854 /* Check for overflows. */
855 if ((__u64) size != data_size)
856 continue;
857
858 header->aux_size = data_size;
859 length = size;
860
861 pt->pt.mem = ((const uint8_t *)
862 mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
863 header->aux_offset));
864 if (pt->pt.mem != MAP_FAILED)
865 break;
866 }
867
868 if (pages == 0)
869 goto err_conf;
870
871 pt->header = header;
872 pt->pt.size = size;
873 pt->pt.data_head = &header->aux_head;
874
875 tinfo->conf.pt.size = (unsigned int) size;
876 return tinfo;
877
878 err_conf:
879 munmap((void *) header, PAGE_SIZE);
880
881 err_file:
882 close (pt->file);
883
884 err:
885 xfree (tinfo);
886 return NULL;
887 }
888
889 #else /* !defined (PERF_ATTR_SIZE_VER5) */
890
891 static struct btrace_target_info *
892 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
893 {
894 errno = EOPNOTSUPP;
895 return NULL;
896 }
897
898 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
899
900 /* See linux-btrace.h. */
901
902 struct btrace_target_info *
903 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
904 {
905 struct btrace_target_info *tinfo;
906
907 tinfo = NULL;
908 switch (conf->format)
909 {
910 case BTRACE_FORMAT_NONE:
911 break;
912
913 case BTRACE_FORMAT_BTS:
914 tinfo = linux_enable_bts (ptid, &conf->bts);
915 break;
916
917 case BTRACE_FORMAT_PT:
918 tinfo = linux_enable_pt (ptid, &conf->pt);
919 break;
920 }
921
922 return tinfo;
923 }
924
925 /* Disable BTS tracing. */
926
927 static enum btrace_error
928 linux_disable_bts (struct btrace_tinfo_bts *tinfo)
929 {
930 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
931 close (tinfo->file);
932
933 return BTRACE_ERR_NONE;
934 }
935
936 /* Disable Intel Processor Trace tracing. */
937
938 static enum btrace_error
939 linux_disable_pt (struct btrace_tinfo_pt *tinfo)
940 {
941 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
942 munmap((void *) tinfo->header, PAGE_SIZE);
943 close (tinfo->file);
944
945 return BTRACE_ERR_NONE;
946 }
947
948 /* See linux-btrace.h. */
949
950 enum btrace_error
951 linux_disable_btrace (struct btrace_target_info *tinfo)
952 {
953 enum btrace_error errcode;
954
955 errcode = BTRACE_ERR_NOT_SUPPORTED;
956 switch (tinfo->conf.format)
957 {
958 case BTRACE_FORMAT_NONE:
959 break;
960
961 case BTRACE_FORMAT_BTS:
962 errcode = linux_disable_bts (&tinfo->variant.bts);
963 break;
964
965 case BTRACE_FORMAT_PT:
966 errcode = linux_disable_pt (&tinfo->variant.pt);
967 break;
968 }
969
970 if (errcode == BTRACE_ERR_NONE)
971 xfree (tinfo);
972
973 return errcode;
974 }
975
976 /* Read branch trace data in BTS format for the thread given by TINFO into
977 BTRACE using the TYPE reading method. */
978
979 static enum btrace_error
980 linux_read_bts (struct btrace_data_bts *btrace,
981 struct btrace_target_info *tinfo,
982 enum btrace_read_type type)
983 {
984 struct perf_event_buffer *pevent;
985 const uint8_t *begin, *end, *start;
986 size_t buffer_size, size;
987 __u64 data_head, data_tail;
988 unsigned int retries = 5;
989
990 pevent = &tinfo->variant.bts.bts;
991
992 /* For delta reads, we return at least the partial last block containing
993 the current PC. */
994 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
995 return BTRACE_ERR_NONE;
996
997 buffer_size = pevent->size;
998 data_tail = pevent->last_head;
999
1000 /* We may need to retry reading the trace. See below. */
1001 while (retries--)
1002 {
1003 data_head = *pevent->data_head;
1004
1005 /* Delete any leftover trace from the previous iteration. */
1006 VEC_free (btrace_block_s, btrace->blocks);
1007
1008 if (type == BTRACE_READ_DELTA)
1009 {
1010 __u64 data_size;
1011
1012 /* Determine the number of bytes to read and check for buffer
1013 overflows. */
1014
1015 /* Check for data head overflows. We might be able to recover from
1016 those but they are very unlikely and it's not really worth the
1017 effort, I think. */
1018 if (data_head < data_tail)
1019 return BTRACE_ERR_OVERFLOW;
1020
1021 /* If the buffer is smaller than the trace delta, we overflowed. */
1022 data_size = data_head - data_tail;
1023 if (buffer_size < data_size)
1024 return BTRACE_ERR_OVERFLOW;
1025
1026 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1027 size = (size_t) data_size;
1028 }
1029 else
1030 {
1031 /* Read the entire buffer. */
1032 size = buffer_size;
1033
1034 /* Adjust the size if the buffer has not overflowed, yet. */
1035 if (data_head < size)
1036 size = (size_t) data_head;
1037 }
1038
1039 /* Data_head keeps growing; the buffer itself is circular. */
1040 begin = pevent->mem;
1041 start = begin + data_head % buffer_size;
1042
1043 if (data_head <= buffer_size)
1044 end = start;
1045 else
1046 end = begin + pevent->size;
1047
1048 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
1049
1050 /* The stopping thread notifies its ptracer before it is scheduled out.
1051 On multi-core systems, the debugger might therefore run while the
1052 kernel might be writing the last branch trace records.
1053
1054 Let's check whether the data head moved while we read the trace. */
1055 if (data_head == *pevent->data_head)
1056 break;
1057 }
1058
1059 pevent->last_head = data_head;
1060
1061 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1062 if we're not doing a delta read. There is no way of filling in its zeroed
1063 BEGIN element. */
1064 if (!VEC_empty (btrace_block_s, btrace->blocks)
1065 && type != BTRACE_READ_DELTA)
1066 VEC_pop (btrace_block_s, btrace->blocks);
1067
1068 return BTRACE_ERR_NONE;
1069 }
1070
1071 /* Fill in the Intel Processor Trace configuration information. */
1072
1073 static void
1074 linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
1075 {
1076 conf->cpu = btrace_this_cpu ();
1077 }
1078
1079 /* Read branch trace data in Intel Processor Trace format for the thread
1080 given by TINFO into BTRACE using the TYPE reading method. */
1081
1082 static enum btrace_error
1083 linux_read_pt (struct btrace_data_pt *btrace,
1084 struct btrace_target_info *tinfo,
1085 enum btrace_read_type type)
1086 {
1087 struct perf_event_buffer *pt;
1088
1089 pt = &tinfo->variant.pt.pt;
1090
1091 linux_fill_btrace_pt_config (&btrace->config);
1092
1093 switch (type)
1094 {
1095 case BTRACE_READ_DELTA:
1096 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1097 around to stay inside the aux buffer. */
1098 return BTRACE_ERR_NOT_SUPPORTED;
1099
1100 case BTRACE_READ_NEW:
1101 if (!perf_event_new_data (pt))
1102 return BTRACE_ERR_NONE;
1103
1104 /* Fall through. */
1105 case BTRACE_READ_ALL:
1106 perf_event_read_all (pt, &btrace->data, &btrace->size);
1107 return BTRACE_ERR_NONE;
1108 }
1109
1110 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
1111 }
1112
1113 /* See linux-btrace.h. */
1114
1115 enum btrace_error
1116 linux_read_btrace (struct btrace_data *btrace,
1117 struct btrace_target_info *tinfo,
1118 enum btrace_read_type type)
1119 {
1120 switch (tinfo->conf.format)
1121 {
1122 case BTRACE_FORMAT_NONE:
1123 return BTRACE_ERR_NOT_SUPPORTED;
1124
1125 case BTRACE_FORMAT_BTS:
1126 /* We read btrace in BTS format. */
1127 btrace->format = BTRACE_FORMAT_BTS;
1128 btrace->variant.bts.blocks = NULL;
1129
1130 return linux_read_bts (&btrace->variant.bts, tinfo, type);
1131
1132 case BTRACE_FORMAT_PT:
1133 /* We read btrace in Intel Processor Trace format. */
1134 btrace->format = BTRACE_FORMAT_PT;
1135 btrace->variant.pt.data = NULL;
1136 btrace->variant.pt.size = 0;
1137
1138 return linux_read_pt (&btrace->variant.pt, tinfo, type);
1139 }
1140
1141 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1142 }
1143
1144 /* See linux-btrace.h. */
1145
1146 const struct btrace_config *
1147 linux_btrace_conf (const struct btrace_target_info *tinfo)
1148 {
1149 return &tinfo->conf;
1150 }
1151
1152 #else /* !HAVE_LINUX_PERF_EVENT_H */
1153
1154 /* See linux-btrace.h. */
1155
1156 int
1157 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
1158 {
1159 return 0;
1160 }
1161
1162 /* See linux-btrace.h. */
1163
1164 struct btrace_target_info *
1165 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
1166 {
1167 return NULL;
1168 }
1169
1170 /* See linux-btrace.h. */
1171
1172 enum btrace_error
1173 linux_disable_btrace (struct btrace_target_info *tinfo)
1174 {
1175 return BTRACE_ERR_NOT_SUPPORTED;
1176 }
1177
1178 /* See linux-btrace.h. */
1179
1180 enum btrace_error
1181 linux_read_btrace (struct btrace_data *btrace,
1182 struct btrace_target_info *tinfo,
1183 enum btrace_read_type type)
1184 {
1185 return BTRACE_ERR_NOT_SUPPORTED;
1186 }
1187
1188 /* See linux-btrace.h. */
1189
1190 const struct btrace_config *
1191 linux_btrace_conf (const struct btrace_target_info *tinfo)
1192 {
1193 return NULL;
1194 }
1195
1196 #endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.053891 seconds and 4 git commands to generate.