Introduce and use gdb_file_up
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
25 #include "gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
28
29 #include <inttypes.h>
30
31 #ifdef HAVE_SYS_SYSCALL_H
32 #include <sys/syscall.h>
33 #endif
34
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
36 #include <unistd.h>
37 #include <sys/mman.h>
38 #include <sys/user.h>
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
41 #include <signal.h>
42
43 /* A branch trace record in perf_event. */
44 struct perf_event_bts
45 {
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51 };
52
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
55 {
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61 };
62
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
66 {
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 cpu.model = (cpuid >> 4) & 0xf;
88
89 if (cpu.family == 0x6)
90 cpu.model += (cpuid >> 12) & 0xf0;
91 }
92 }
93 }
94
95 return cpu;
96 }
97
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
99
100 static int
101 perf_event_new_data (const struct perf_event_buffer *pev)
102 {
103 return *pev->data_head != pev->last_head;
104 }
105
106 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
109
110 static gdb_byte *
111 perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
112 size_t size)
113 {
114 const gdb_byte *begin, *end, *start, *stop;
115 gdb_byte *buffer;
116 size_t buffer_size;
117 __u64 data_tail;
118
119 if (size == 0)
120 return NULL;
121
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size = pev->size;
124 gdb_assert (size <= buffer_size);
125
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
129 don't underflow.
130
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head < size)
135 data_head += buffer_size;
136
137 gdb_assert (size <= data_head);
138 data_tail = data_head - size;
139
140 begin = pev->mem;
141 start = begin + data_tail % buffer_size;
142 stop = begin + data_head % buffer_size;
143
144 buffer = (gdb_byte *) xmalloc (size);
145
146 if (start < stop)
147 memcpy (buffer, start, stop - start);
148 else
149 {
150 end = begin + buffer_size;
151
152 memcpy (buffer, start, end - start);
153 memcpy (buffer + (end - start), begin, stop - begin);
154 }
155
156 return buffer;
157 }
158
159 /* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
161
162 static void
163 perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
164 size_t *psize)
165 {
166 size_t size;
167 __u64 data_head;
168
169 data_head = *pev->data_head;
170 size = pev->size;
171
172 *data = perf_event_read (pev, data_head, size);
173 *psize = size;
174
175 pev->last_head = data_head;
176 }
177
178 /* Determine the event type.
179 Returns zero on success and fills in TYPE; returns -1 otherwise. */
180
181 static int
182 perf_event_pt_event_type (int *type)
183 {
184 FILE *file;
185 int found;
186
187 file = fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
188 if (file == NULL)
189 return -1;
190
191 found = fscanf (file, "%d", type);
192
193 fclose (file);
194
195 if (found == 1)
196 return 0;
197 return -1;
198 }
199
200 /* Try to determine the start address of the Linux kernel. */
201
202 static uint64_t
203 linux_determine_kernel_start (void)
204 {
205 static uint64_t kernel_start;
206 static int cached;
207
208 if (cached != 0)
209 return kernel_start;
210
211 cached = 1;
212
213 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
214 if (file == NULL)
215 return kernel_start;
216
217 while (!feof (file.get ()))
218 {
219 char buffer[1024], symbol[8], *line;
220 uint64_t addr;
221 int match;
222
223 line = fgets (buffer, sizeof (buffer), file.get ());
224 if (line == NULL)
225 break;
226
227 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
228 if (match != 2)
229 continue;
230
231 if (strcmp (symbol, "_text") == 0)
232 {
233 kernel_start = addr;
234 break;
235 }
236 }
237
238 return kernel_start;
239 }
240
241 /* Check whether an address is in the kernel. */
242
243 static inline int
244 perf_event_is_kernel_addr (uint64_t addr)
245 {
246 uint64_t kernel_start;
247
248 kernel_start = linux_determine_kernel_start ();
249 if (kernel_start != 0ull)
250 return (addr >= kernel_start);
251
252 /* If we don't know the kernel's start address, let's check the most
253 significant bit. This will work at least for 64-bit kernels. */
254 return ((addr & (1ull << 63)) != 0);
255 }
256
257 /* Check whether a perf event record should be skipped. */
258
259 static inline int
260 perf_event_skip_bts_record (const struct perf_event_bts *bts)
261 {
262 /* The hardware may report branches from kernel into user space. Branches
263 from user into kernel space will be suppressed. We filter the former to
264 provide a consistent branch trace excluding kernel. */
265 return perf_event_is_kernel_addr (bts->from);
266 }
267
268 /* Perform a few consistency checks on a perf event sample record. This is
269 meant to catch cases when we get out of sync with the perf event stream. */
270
271 static inline int
272 perf_event_sample_ok (const struct perf_event_sample *sample)
273 {
274 if (sample->header.type != PERF_RECORD_SAMPLE)
275 return 0;
276
277 if (sample->header.size != sizeof (*sample))
278 return 0;
279
280 return 1;
281 }
282
283 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
284 and to addresses (plus a header).
285
286 Start points into that buffer at the next sample position.
287 We read the collected samples backwards from start.
288
289 While reading the samples, we convert the information into a list of blocks.
290 For two adjacent samples s1 and s2, we form a block b such that b.begin =
291 s1.to and b.end = s2.from.
292
293 In case the buffer overflows during sampling, one sample may have its lower
294 part at the end and its upper part at the beginning of the buffer. */
295
296 static VEC (btrace_block_s) *
297 perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
298 const uint8_t *end, const uint8_t *start, size_t size)
299 {
300 VEC (btrace_block_s) *btrace = NULL;
301 struct perf_event_sample sample;
302 size_t read = 0;
303 struct btrace_block block = { 0, 0 };
304 struct regcache *regcache;
305
306 gdb_assert (begin <= start);
307 gdb_assert (start <= end);
308
309 /* The first block ends at the current pc. */
310 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
311 block.end = regcache_read_pc (regcache);
312
313 /* The buffer may contain a partial record as its last entry (i.e. when the
314 buffer size is not a multiple of the sample size). */
315 read = sizeof (sample) - 1;
316
317 for (; read < size; read += sizeof (sample))
318 {
319 const struct perf_event_sample *psample;
320
321 /* Find the next perf_event sample in a backwards traversal. */
322 start -= sizeof (sample);
323
324 /* If we're still inside the buffer, we're done. */
325 if (begin <= start)
326 psample = (const struct perf_event_sample *) start;
327 else
328 {
329 int missing;
330
331 /* We're to the left of the ring buffer, we will wrap around and
332 reappear at the very right of the ring buffer. */
333
334 missing = (begin - start);
335 start = (end - missing);
336
337 /* If the entire sample is missing, we're done. */
338 if (missing == sizeof (sample))
339 psample = (const struct perf_event_sample *) start;
340 else
341 {
342 uint8_t *stack;
343
344 /* The sample wrapped around. The lower part is at the end and
345 the upper part is at the beginning of the buffer. */
346 stack = (uint8_t *) &sample;
347
348 /* Copy the two parts so we have a contiguous sample. */
349 memcpy (stack, start, missing);
350 memcpy (stack + missing, begin, sizeof (sample) - missing);
351
352 psample = &sample;
353 }
354 }
355
356 if (!perf_event_sample_ok (psample))
357 {
358 warning (_("Branch trace may be incomplete."));
359 break;
360 }
361
362 if (perf_event_skip_bts_record (&psample->bts))
363 continue;
364
365 /* We found a valid sample, so we can complete the current block. */
366 block.begin = psample->bts.to;
367
368 VEC_safe_push (btrace_block_s, btrace, &block);
369
370 /* Start the next block. */
371 block.end = psample->bts.from;
372 }
373
374 /* Push the last block (i.e. the first one of inferior execution), as well.
375 We don't know where it ends, but we know where it starts. If we're
376 reading delta trace, we can fill in the start address later on.
377 Otherwise we will prune it. */
378 block.begin = 0;
379 VEC_safe_push (btrace_block_s, btrace, &block);
380
381 return btrace;
382 }
383
384 /* Check whether the kernel supports BTS. */
385
386 static int
387 kernel_supports_bts (void)
388 {
389 struct perf_event_attr attr;
390 pid_t child, pid;
391 int status, file;
392
393 errno = 0;
394 child = fork ();
395 switch (child)
396 {
397 case -1:
398 warning (_("test bts: cannot fork: %s."), safe_strerror (errno));
399 return 0;
400
401 case 0:
402 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
403 if (status != 0)
404 {
405 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
406 safe_strerror (errno));
407 _exit (1);
408 }
409
410 status = raise (SIGTRAP);
411 if (status != 0)
412 {
413 warning (_("test bts: cannot raise SIGTRAP: %s."),
414 safe_strerror (errno));
415 _exit (1);
416 }
417
418 _exit (1);
419
420 default:
421 pid = waitpid (child, &status, 0);
422 if (pid != child)
423 {
424 warning (_("test bts: bad pid %ld, error: %s."),
425 (long) pid, safe_strerror (errno));
426 return 0;
427 }
428
429 if (!WIFSTOPPED (status))
430 {
431 warning (_("test bts: expected stop. status: %d."),
432 status);
433 return 0;
434 }
435
436 memset (&attr, 0, sizeof (attr));
437
438 attr.type = PERF_TYPE_HARDWARE;
439 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
440 attr.sample_period = 1;
441 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
442 attr.exclude_kernel = 1;
443 attr.exclude_hv = 1;
444 attr.exclude_idle = 1;
445
446 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
447 if (file >= 0)
448 close (file);
449
450 kill (child, SIGKILL);
451 ptrace (PTRACE_KILL, child, NULL, NULL);
452
453 pid = waitpid (child, &status, 0);
454 if (pid != child)
455 {
456 warning (_("test bts: bad pid %ld, error: %s."),
457 (long) pid, safe_strerror (errno));
458 if (!WIFSIGNALED (status))
459 warning (_("test bts: expected killed. status: %d."),
460 status);
461 }
462
463 return (file >= 0);
464 }
465 }
466
467 /* Check whether the kernel supports Intel Processor Trace. */
468
469 static int
470 kernel_supports_pt (void)
471 {
472 struct perf_event_attr attr;
473 pid_t child, pid;
474 int status, file, type;
475
476 errno = 0;
477 child = fork ();
478 switch (child)
479 {
480 case -1:
481 warning (_("test pt: cannot fork: %s."), safe_strerror (errno));
482 return 0;
483
484 case 0:
485 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
486 if (status != 0)
487 {
488 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
489 safe_strerror (errno));
490 _exit (1);
491 }
492
493 status = raise (SIGTRAP);
494 if (status != 0)
495 {
496 warning (_("test pt: cannot raise SIGTRAP: %s."),
497 safe_strerror (errno));
498 _exit (1);
499 }
500
501 _exit (1);
502
503 default:
504 pid = waitpid (child, &status, 0);
505 if (pid != child)
506 {
507 warning (_("test pt: bad pid %ld, error: %s."),
508 (long) pid, safe_strerror (errno));
509 return 0;
510 }
511
512 if (!WIFSTOPPED (status))
513 {
514 warning (_("test pt: expected stop. status: %d."),
515 status);
516 return 0;
517 }
518
519 status = perf_event_pt_event_type (&type);
520 if (status != 0)
521 file = -1;
522 else
523 {
524 memset (&attr, 0, sizeof (attr));
525
526 attr.size = sizeof (attr);
527 attr.type = type;
528 attr.exclude_kernel = 1;
529 attr.exclude_hv = 1;
530 attr.exclude_idle = 1;
531
532 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
533 if (file >= 0)
534 close (file);
535 }
536
537 kill (child, SIGKILL);
538 ptrace (PTRACE_KILL, child, NULL, NULL);
539
540 pid = waitpid (child, &status, 0);
541 if (pid != child)
542 {
543 warning (_("test pt: bad pid %ld, error: %s."),
544 (long) pid, safe_strerror (errno));
545 if (!WIFSIGNALED (status))
546 warning (_("test pt: expected killed. status: %d."),
547 status);
548 }
549
550 return (file >= 0);
551 }
552 }
553
554 /* Check whether an Intel cpu supports BTS. */
555
556 static int
557 intel_supports_bts (const struct btrace_cpu *cpu)
558 {
559 switch (cpu->family)
560 {
561 case 0x6:
562 switch (cpu->model)
563 {
564 case 0x1a: /* Nehalem */
565 case 0x1f:
566 case 0x1e:
567 case 0x2e:
568 case 0x25: /* Westmere */
569 case 0x2c:
570 case 0x2f:
571 case 0x2a: /* Sandy Bridge */
572 case 0x2d:
573 case 0x3a: /* Ivy Bridge */
574
575 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
576 "from" information afer an EIST transition, T-states, C1E, or
577 Adaptive Thermal Throttling. */
578 return 0;
579 }
580 }
581
582 return 1;
583 }
584
585 /* Check whether the cpu supports BTS. */
586
587 static int
588 cpu_supports_bts (void)
589 {
590 struct btrace_cpu cpu;
591
592 cpu = btrace_this_cpu ();
593 switch (cpu.vendor)
594 {
595 default:
596 /* Don't know about others. Let's assume they do. */
597 return 1;
598
599 case CV_INTEL:
600 return intel_supports_bts (&cpu);
601 }
602 }
603
604 /* Check whether the linux target supports BTS. */
605
606 static int
607 linux_supports_bts (void)
608 {
609 static int cached;
610
611 if (cached == 0)
612 {
613 if (!kernel_supports_bts ())
614 cached = -1;
615 else if (!cpu_supports_bts ())
616 cached = -1;
617 else
618 cached = 1;
619 }
620
621 return cached > 0;
622 }
623
624 /* Check whether the linux target supports Intel Processor Trace. */
625
626 static int
627 linux_supports_pt (void)
628 {
629 static int cached;
630
631 if (cached == 0)
632 {
633 if (!kernel_supports_pt ())
634 cached = -1;
635 else
636 cached = 1;
637 }
638
639 return cached > 0;
640 }
641
642 /* See linux-btrace.h. */
643
644 int
645 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
646 {
647 switch (format)
648 {
649 case BTRACE_FORMAT_NONE:
650 return 0;
651
652 case BTRACE_FORMAT_BTS:
653 return linux_supports_bts ();
654
655 case BTRACE_FORMAT_PT:
656 return linux_supports_pt ();
657 }
658
659 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
660 }
661
662 /* Enable branch tracing in BTS format. */
663
664 static struct btrace_target_info *
665 linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
666 {
667 struct perf_event_mmap_page *header;
668 struct btrace_target_info *tinfo;
669 struct btrace_tinfo_bts *bts;
670 size_t size, pages;
671 __u64 data_offset;
672 int pid, pg;
673
674 tinfo = XCNEW (struct btrace_target_info);
675 tinfo->ptid = ptid;
676
677 tinfo->conf.format = BTRACE_FORMAT_BTS;
678 bts = &tinfo->variant.bts;
679
680 bts->attr.size = sizeof (bts->attr);
681 bts->attr.type = PERF_TYPE_HARDWARE;
682 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
683 bts->attr.sample_period = 1;
684
685 /* We sample from and to address. */
686 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
687
688 bts->attr.exclude_kernel = 1;
689 bts->attr.exclude_hv = 1;
690 bts->attr.exclude_idle = 1;
691
692 pid = ptid_get_lwp (ptid);
693 if (pid == 0)
694 pid = ptid_get_pid (ptid);
695
696 errno = 0;
697 bts->file = syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0);
698 if (bts->file < 0)
699 goto err_out;
700
701 /* Convert the requested size in bytes to pages (rounding up). */
702 pages = ((size_t) conf->size / PAGE_SIZE
703 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
704 /* We need at least one page. */
705 if (pages == 0)
706 pages = 1;
707
708 /* The buffer size can be requested in powers of two pages. Adjust PAGES
709 to the next power of two. */
710 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
711 if ((pages & ((size_t) 1 << pg)) != 0)
712 pages += ((size_t) 1 << pg);
713
714 /* We try to allocate the requested size.
715 If that fails, try to get as much as we can. */
716 for (; pages > 0; pages >>= 1)
717 {
718 size_t length;
719 __u64 data_size;
720
721 data_size = (__u64) pages * PAGE_SIZE;
722
723 /* Don't ask for more than we can represent in the configuration. */
724 if ((__u64) UINT_MAX < data_size)
725 continue;
726
727 size = (size_t) data_size;
728 length = size + PAGE_SIZE;
729
730 /* Check for overflows. */
731 if ((__u64) length != data_size + PAGE_SIZE)
732 continue;
733
734 /* The number of pages we request needs to be a power of two. */
735 header = ((struct perf_event_mmap_page *)
736 mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0));
737 if (header != MAP_FAILED)
738 break;
739 }
740
741 if (pages == 0)
742 goto err_file;
743
744 data_offset = PAGE_SIZE;
745
746 #if defined (PERF_ATTR_SIZE_VER5)
747 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
748 {
749 __u64 data_size;
750
751 data_offset = header->data_offset;
752 data_size = header->data_size;
753
754 size = (unsigned int) data_size;
755
756 /* Check for overflows. */
757 if ((__u64) size != data_size)
758 {
759 munmap ((void *) header, size + PAGE_SIZE);
760 goto err_file;
761 }
762 }
763 #endif /* defined (PERF_ATTR_SIZE_VER5) */
764
765 bts->header = header;
766 bts->bts.mem = ((const uint8_t *) header) + data_offset;
767 bts->bts.size = size;
768 bts->bts.data_head = &header->data_head;
769 bts->bts.last_head = 0ull;
770
771 tinfo->conf.bts.size = (unsigned int) size;
772 return tinfo;
773
774 err_file:
775 /* We were not able to allocate any buffer. */
776 close (bts->file);
777
778 err_out:
779 xfree (tinfo);
780 return NULL;
781 }
782
783 #if defined (PERF_ATTR_SIZE_VER5)
784
785 /* Enable branch tracing in Intel Processor Trace format. */
786
787 static struct btrace_target_info *
788 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
789 {
790 struct perf_event_mmap_page *header;
791 struct btrace_target_info *tinfo;
792 struct btrace_tinfo_pt *pt;
793 size_t pages, size;
794 int pid, pg, errcode, type;
795
796 if (conf->size == 0)
797 return NULL;
798
799 errcode = perf_event_pt_event_type (&type);
800 if (errcode != 0)
801 return NULL;
802
803 pid = ptid_get_lwp (ptid);
804 if (pid == 0)
805 pid = ptid_get_pid (ptid);
806
807 tinfo = XCNEW (struct btrace_target_info);
808 tinfo->ptid = ptid;
809
810 tinfo->conf.format = BTRACE_FORMAT_PT;
811 pt = &tinfo->variant.pt;
812
813 pt->attr.size = sizeof (pt->attr);
814 pt->attr.type = type;
815
816 pt->attr.exclude_kernel = 1;
817 pt->attr.exclude_hv = 1;
818 pt->attr.exclude_idle = 1;
819
820 errno = 0;
821 pt->file = syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0);
822 if (pt->file < 0)
823 goto err;
824
825 /* Allocate the configuration page. */
826 header = ((struct perf_event_mmap_page *)
827 mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
828 pt->file, 0));
829 if (header == MAP_FAILED)
830 goto err_file;
831
832 header->aux_offset = header->data_offset + header->data_size;
833
834 /* Convert the requested size in bytes to pages (rounding up). */
835 pages = ((size_t) conf->size / PAGE_SIZE
836 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
837 /* We need at least one page. */
838 if (pages == 0)
839 pages = 1;
840
841 /* The buffer size can be requested in powers of two pages. Adjust PAGES
842 to the next power of two. */
843 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
844 if ((pages & ((size_t) 1 << pg)) != 0)
845 pages += ((size_t) 1 << pg);
846
847 /* We try to allocate the requested size.
848 If that fails, try to get as much as we can. */
849 for (; pages > 0; pages >>= 1)
850 {
851 size_t length;
852 __u64 data_size;
853
854 data_size = (__u64) pages * PAGE_SIZE;
855
856 /* Don't ask for more than we can represent in the configuration. */
857 if ((__u64) UINT_MAX < data_size)
858 continue;
859
860 size = (size_t) data_size;
861
862 /* Check for overflows. */
863 if ((__u64) size != data_size)
864 continue;
865
866 header->aux_size = data_size;
867 length = size;
868
869 pt->pt.mem = ((const uint8_t *)
870 mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
871 header->aux_offset));
872 if (pt->pt.mem != MAP_FAILED)
873 break;
874 }
875
876 if (pages == 0)
877 goto err_conf;
878
879 pt->header = header;
880 pt->pt.size = size;
881 pt->pt.data_head = &header->aux_head;
882
883 tinfo->conf.pt.size = (unsigned int) size;
884 return tinfo;
885
886 err_conf:
887 munmap((void *) header, PAGE_SIZE);
888
889 err_file:
890 close (pt->file);
891
892 err:
893 xfree (tinfo);
894 return NULL;
895 }
896
897 #else /* !defined (PERF_ATTR_SIZE_VER5) */
898
899 static struct btrace_target_info *
900 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
901 {
902 errno = EOPNOTSUPP;
903 return NULL;
904 }
905
906 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
907
908 /* See linux-btrace.h. */
909
910 struct btrace_target_info *
911 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
912 {
913 struct btrace_target_info *tinfo;
914
915 tinfo = NULL;
916 switch (conf->format)
917 {
918 case BTRACE_FORMAT_NONE:
919 break;
920
921 case BTRACE_FORMAT_BTS:
922 tinfo = linux_enable_bts (ptid, &conf->bts);
923 break;
924
925 case BTRACE_FORMAT_PT:
926 tinfo = linux_enable_pt (ptid, &conf->pt);
927 break;
928 }
929
930 return tinfo;
931 }
932
933 /* Disable BTS tracing. */
934
935 static enum btrace_error
936 linux_disable_bts (struct btrace_tinfo_bts *tinfo)
937 {
938 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
939 close (tinfo->file);
940
941 return BTRACE_ERR_NONE;
942 }
943
944 /* Disable Intel Processor Trace tracing. */
945
946 static enum btrace_error
947 linux_disable_pt (struct btrace_tinfo_pt *tinfo)
948 {
949 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
950 munmap((void *) tinfo->header, PAGE_SIZE);
951 close (tinfo->file);
952
953 return BTRACE_ERR_NONE;
954 }
955
956 /* See linux-btrace.h. */
957
958 enum btrace_error
959 linux_disable_btrace (struct btrace_target_info *tinfo)
960 {
961 enum btrace_error errcode;
962
963 errcode = BTRACE_ERR_NOT_SUPPORTED;
964 switch (tinfo->conf.format)
965 {
966 case BTRACE_FORMAT_NONE:
967 break;
968
969 case BTRACE_FORMAT_BTS:
970 errcode = linux_disable_bts (&tinfo->variant.bts);
971 break;
972
973 case BTRACE_FORMAT_PT:
974 errcode = linux_disable_pt (&tinfo->variant.pt);
975 break;
976 }
977
978 if (errcode == BTRACE_ERR_NONE)
979 xfree (tinfo);
980
981 return errcode;
982 }
983
984 /* Read branch trace data in BTS format for the thread given by TINFO into
985 BTRACE using the TYPE reading method. */
986
987 static enum btrace_error
988 linux_read_bts (struct btrace_data_bts *btrace,
989 struct btrace_target_info *tinfo,
990 enum btrace_read_type type)
991 {
992 struct perf_event_buffer *pevent;
993 const uint8_t *begin, *end, *start;
994 size_t buffer_size, size;
995 __u64 data_head, data_tail;
996 unsigned int retries = 5;
997
998 pevent = &tinfo->variant.bts.bts;
999
1000 /* For delta reads, we return at least the partial last block containing
1001 the current PC. */
1002 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
1003 return BTRACE_ERR_NONE;
1004
1005 buffer_size = pevent->size;
1006 data_tail = pevent->last_head;
1007
1008 /* We may need to retry reading the trace. See below. */
1009 while (retries--)
1010 {
1011 data_head = *pevent->data_head;
1012
1013 /* Delete any leftover trace from the previous iteration. */
1014 VEC_free (btrace_block_s, btrace->blocks);
1015
1016 if (type == BTRACE_READ_DELTA)
1017 {
1018 __u64 data_size;
1019
1020 /* Determine the number of bytes to read and check for buffer
1021 overflows. */
1022
1023 /* Check for data head overflows. We might be able to recover from
1024 those but they are very unlikely and it's not really worth the
1025 effort, I think. */
1026 if (data_head < data_tail)
1027 return BTRACE_ERR_OVERFLOW;
1028
1029 /* If the buffer is smaller than the trace delta, we overflowed. */
1030 data_size = data_head - data_tail;
1031 if (buffer_size < data_size)
1032 return BTRACE_ERR_OVERFLOW;
1033
1034 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1035 size = (size_t) data_size;
1036 }
1037 else
1038 {
1039 /* Read the entire buffer. */
1040 size = buffer_size;
1041
1042 /* Adjust the size if the buffer has not overflowed, yet. */
1043 if (data_head < size)
1044 size = (size_t) data_head;
1045 }
1046
1047 /* Data_head keeps growing; the buffer itself is circular. */
1048 begin = pevent->mem;
1049 start = begin + data_head % buffer_size;
1050
1051 if (data_head <= buffer_size)
1052 end = start;
1053 else
1054 end = begin + pevent->size;
1055
1056 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
1057
1058 /* The stopping thread notifies its ptracer before it is scheduled out.
1059 On multi-core systems, the debugger might therefore run while the
1060 kernel might be writing the last branch trace records.
1061
1062 Let's check whether the data head moved while we read the trace. */
1063 if (data_head == *pevent->data_head)
1064 break;
1065 }
1066
1067 pevent->last_head = data_head;
1068
1069 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1070 if we're not doing a delta read. There is no way of filling in its zeroed
1071 BEGIN element. */
1072 if (!VEC_empty (btrace_block_s, btrace->blocks)
1073 && type != BTRACE_READ_DELTA)
1074 VEC_pop (btrace_block_s, btrace->blocks);
1075
1076 return BTRACE_ERR_NONE;
1077 }
1078
1079 /* Fill in the Intel Processor Trace configuration information. */
1080
1081 static void
1082 linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
1083 {
1084 conf->cpu = btrace_this_cpu ();
1085 }
1086
1087 /* Read branch trace data in Intel Processor Trace format for the thread
1088 given by TINFO into BTRACE using the TYPE reading method. */
1089
1090 static enum btrace_error
1091 linux_read_pt (struct btrace_data_pt *btrace,
1092 struct btrace_target_info *tinfo,
1093 enum btrace_read_type type)
1094 {
1095 struct perf_event_buffer *pt;
1096
1097 pt = &tinfo->variant.pt.pt;
1098
1099 linux_fill_btrace_pt_config (&btrace->config);
1100
1101 switch (type)
1102 {
1103 case BTRACE_READ_DELTA:
1104 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1105 around to stay inside the aux buffer. */
1106 return BTRACE_ERR_NOT_SUPPORTED;
1107
1108 case BTRACE_READ_NEW:
1109 if (!perf_event_new_data (pt))
1110 return BTRACE_ERR_NONE;
1111
1112 /* Fall through. */
1113 case BTRACE_READ_ALL:
1114 perf_event_read_all (pt, &btrace->data, &btrace->size);
1115 return BTRACE_ERR_NONE;
1116 }
1117
1118 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
1119 }
1120
1121 /* See linux-btrace.h. */
1122
1123 enum btrace_error
1124 linux_read_btrace (struct btrace_data *btrace,
1125 struct btrace_target_info *tinfo,
1126 enum btrace_read_type type)
1127 {
1128 switch (tinfo->conf.format)
1129 {
1130 case BTRACE_FORMAT_NONE:
1131 return BTRACE_ERR_NOT_SUPPORTED;
1132
1133 case BTRACE_FORMAT_BTS:
1134 /* We read btrace in BTS format. */
1135 btrace->format = BTRACE_FORMAT_BTS;
1136 btrace->variant.bts.blocks = NULL;
1137
1138 return linux_read_bts (&btrace->variant.bts, tinfo, type);
1139
1140 case BTRACE_FORMAT_PT:
1141 /* We read btrace in Intel Processor Trace format. */
1142 btrace->format = BTRACE_FORMAT_PT;
1143 btrace->variant.pt.data = NULL;
1144 btrace->variant.pt.size = 0;
1145
1146 return linux_read_pt (&btrace->variant.pt, tinfo, type);
1147 }
1148
1149 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1150 }
1151
1152 /* See linux-btrace.h. */
1153
1154 const struct btrace_config *
1155 linux_btrace_conf (const struct btrace_target_info *tinfo)
1156 {
1157 return &tinfo->conf;
1158 }
1159
1160 #else /* !HAVE_LINUX_PERF_EVENT_H */
1161
1162 /* See linux-btrace.h. */
1163
1164 int
1165 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
1166 {
1167 return 0;
1168 }
1169
1170 /* See linux-btrace.h. */
1171
1172 struct btrace_target_info *
1173 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
1174 {
1175 return NULL;
1176 }
1177
1178 /* See linux-btrace.h. */
1179
1180 enum btrace_error
1181 linux_disable_btrace (struct btrace_target_info *tinfo)
1182 {
1183 return BTRACE_ERR_NOT_SUPPORTED;
1184 }
1185
1186 /* See linux-btrace.h. */
1187
1188 enum btrace_error
1189 linux_read_btrace (struct btrace_data *btrace,
1190 struct btrace_target_info *tinfo,
1191 enum btrace_read_type type)
1192 {
1193 return BTRACE_ERR_NOT_SUPPORTED;
1194 }
1195
1196 /* See linux-btrace.h. */
1197
1198 const struct btrace_config *
1199 linux_btrace_conf (const struct btrace_target_info *tinfo)
1200 {
1201 return NULL;
1202 }
1203
1204 #endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.053791 seconds and 5 git commands to generate.