btrace: improve enable error messages
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
CommitLineData
7c97f91e
MM
1/* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
e2882c85 3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
7c97f91e
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
53f81362 22#include "common-defs.h"
7c97f91e 23#include "linux-btrace.h"
361c8ade 24#include "common-regcache.h"
be8b1ea6 25#include "gdb_wait.h"
df7e5265 26#include "x86-cpuid.h"
0568462b 27#include "filestuff.h"
5c3284c1
MM
28#include "common/scoped_fd.h"
29#include "common/scoped_mmap.h"
0568462b
MM
30
31#include <inttypes.h>
7c97f91e 32
5b4e221c 33#include <sys/syscall.h>
5b4e221c
MF
34
35#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
7c97f91e 36#include <unistd.h>
7c97f91e
MM
37#include <sys/mman.h>
38#include <sys/user.h>
5826e159 39#include "nat/gdb_ptrace.h"
a950d57c 40#include <sys/types.h>
a950d57c 41#include <signal.h>
7c97f91e
MM
42
43/* A branch trace record in perf_event. */
44struct perf_event_bts
45{
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51};
52
53/* A perf_event branch trace sample. */
54struct perf_event_sample
55{
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61};
62
afb778a2
MM
63/* Identify the cpu we're running on. */
64static struct btrace_cpu
65btrace_this_cpu (void)
66{
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 cpu.model = (cpuid >> 4) & 0xf;
88
89 if (cpu.family == 0x6)
90 cpu.model += (cpuid >> 12) & 0xf0;
91 }
92 }
93 }
94
95 return cpu;
96}
97
aadf7753 98/* Return non-zero if there is new data in PEVENT; zero otherwise. */
7c97f91e 99
aadf7753
MM
100static int
101perf_event_new_data (const struct perf_event_buffer *pev)
7c97f91e 102{
aadf7753 103 return *pev->data_head != pev->last_head;
7c97f91e
MM
104}
105
b20a6524
MM
106/* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
109
110static gdb_byte *
e7b01ce0
MM
111perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
112 size_t size)
b20a6524
MM
113{
114 const gdb_byte *begin, *end, *start, *stop;
115 gdb_byte *buffer;
e7b01ce0
MM
116 size_t buffer_size;
117 __u64 data_tail;
b20a6524
MM
118
119 if (size == 0)
120 return NULL;
121
db58b373
MM
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size = pev->size;
124 gdb_assert (size <= buffer_size);
125
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
129 don't underflow.
130
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head < size)
135 data_head += buffer_size;
136
b20a6524
MM
137 gdb_assert (size <= data_head);
138 data_tail = data_head - size;
139
b20a6524
MM
140 begin = pev->mem;
141 start = begin + data_tail % buffer_size;
142 stop = begin + data_head % buffer_size;
143
224c3ddb 144 buffer = (gdb_byte *) xmalloc (size);
b20a6524
MM
145
146 if (start < stop)
147 memcpy (buffer, start, stop - start);
148 else
149 {
150 end = begin + buffer_size;
151
152 memcpy (buffer, start, end - start);
153 memcpy (buffer + (end - start), begin, stop - begin);
154 }
155
156 return buffer;
157}
158
159/* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
161
162static void
163perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
e7b01ce0 164 size_t *psize)
b20a6524 165{
e7b01ce0
MM
166 size_t size;
167 __u64 data_head;
b20a6524
MM
168
169 data_head = *pev->data_head;
b20a6524 170 size = pev->size;
b20a6524
MM
171
172 *data = perf_event_read (pev, data_head, size);
173 *psize = size;
174
175 pev->last_head = data_head;
176}
177
0568462b
MM
178/* Try to determine the start address of the Linux kernel. */
179
180static uint64_t
181linux_determine_kernel_start (void)
d68e53f4 182{
0568462b
MM
183 static uint64_t kernel_start;
184 static int cached;
d68e53f4 185
0568462b
MM
186 if (cached != 0)
187 return kernel_start;
d68e53f4 188
0568462b
MM
189 cached = 1;
190
d419f42d 191 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
0568462b
MM
192 if (file == NULL)
193 return kernel_start;
194
d419f42d 195 while (!feof (file.get ()))
0568462b
MM
196 {
197 char buffer[1024], symbol[8], *line;
198 uint64_t addr;
199 int match;
200
d419f42d 201 line = fgets (buffer, sizeof (buffer), file.get ());
0568462b
MM
202 if (line == NULL)
203 break;
d68e53f4 204
0568462b
MM
205 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
206 if (match != 2)
207 continue;
d68e53f4 208
0568462b
MM
209 if (strcmp (symbol, "_text") == 0)
210 {
211 kernel_start = addr;
212 break;
213 }
214 }
215
0568462b 216 return kernel_start;
d68e53f4
MM
217}
218
7c97f91e
MM
219/* Check whether an address is in the kernel. */
220
221static inline int
0568462b 222perf_event_is_kernel_addr (uint64_t addr)
7c97f91e 223{
0568462b 224 uint64_t kernel_start;
7c97f91e 225
0568462b
MM
226 kernel_start = linux_determine_kernel_start ();
227 if (kernel_start != 0ull)
228 return (addr >= kernel_start);
7c97f91e 229
0568462b
MM
230 /* If we don't know the kernel's start address, let's check the most
231 significant bit. This will work at least for 64-bit kernels. */
232 return ((addr & (1ull << 63)) != 0);
7c97f91e
MM
233}
234
235/* Check whether a perf event record should be skipped. */
236
237static inline int
0568462b 238perf_event_skip_bts_record (const struct perf_event_bts *bts)
7c97f91e
MM
239{
240 /* The hardware may report branches from kernel into user space. Branches
241 from user into kernel space will be suppressed. We filter the former to
242 provide a consistent branch trace excluding kernel. */
0568462b 243 return perf_event_is_kernel_addr (bts->from);
7c97f91e
MM
244}
245
246/* Perform a few consistency checks on a perf event sample record. This is
247 meant to catch cases when we get out of sync with the perf event stream. */
248
249static inline int
250perf_event_sample_ok (const struct perf_event_sample *sample)
251{
252 if (sample->header.type != PERF_RECORD_SAMPLE)
253 return 0;
254
255 if (sample->header.size != sizeof (*sample))
256 return 0;
257
258 return 1;
259}
260
261/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
262 and to addresses (plus a header).
263
264 Start points into that buffer at the next sample position.
265 We read the collected samples backwards from start.
266
267 While reading the samples, we convert the information into a list of blocks.
268 For two adjacent samples s1 and s2, we form a block b such that b.begin =
269 s1.to and b.end = s2.from.
270
271 In case the buffer overflows during sampling, one sample may have its lower
272 part at the end and its upper part at the beginning of the buffer. */
273
274static VEC (btrace_block_s) *
275perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
e7b01ce0 276 const uint8_t *end, const uint8_t *start, size_t size)
7c97f91e
MM
277{
278 VEC (btrace_block_s) *btrace = NULL;
279 struct perf_event_sample sample;
e7b01ce0 280 size_t read = 0;
7c97f91e
MM
281 struct btrace_block block = { 0, 0 };
282 struct regcache *regcache;
283
284 gdb_assert (begin <= start);
285 gdb_assert (start <= end);
286
287 /* The first block ends at the current pc. */
361c8ade 288 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
7c97f91e
MM
289 block.end = regcache_read_pc (regcache);
290
291 /* The buffer may contain a partial record as its last entry (i.e. when the
292 buffer size is not a multiple of the sample size). */
293 read = sizeof (sample) - 1;
294
295 for (; read < size; read += sizeof (sample))
296 {
297 const struct perf_event_sample *psample;
298
299 /* Find the next perf_event sample in a backwards traversal. */
300 start -= sizeof (sample);
301
302 /* If we're still inside the buffer, we're done. */
303 if (begin <= start)
304 psample = (const struct perf_event_sample *) start;
305 else
306 {
307 int missing;
308
309 /* We're to the left of the ring buffer, we will wrap around and
310 reappear at the very right of the ring buffer. */
311
312 missing = (begin - start);
313 start = (end - missing);
314
315 /* If the entire sample is missing, we're done. */
316 if (missing == sizeof (sample))
317 psample = (const struct perf_event_sample *) start;
318 else
319 {
320 uint8_t *stack;
321
322 /* The sample wrapped around. The lower part is at the end and
323 the upper part is at the beginning of the buffer. */
324 stack = (uint8_t *) &sample;
325
326 /* Copy the two parts so we have a contiguous sample. */
327 memcpy (stack, start, missing);
328 memcpy (stack + missing, begin, sizeof (sample) - missing);
329
330 psample = &sample;
331 }
332 }
333
334 if (!perf_event_sample_ok (psample))
335 {
336 warning (_("Branch trace may be incomplete."));
337 break;
338 }
339
0568462b 340 if (perf_event_skip_bts_record (&psample->bts))
7c97f91e
MM
341 continue;
342
343 /* We found a valid sample, so we can complete the current block. */
344 block.begin = psample->bts.to;
345
346 VEC_safe_push (btrace_block_s, btrace, &block);
347
348 /* Start the next block. */
349 block.end = psample->bts.from;
350 }
351
969c39fb
MM
352 /* Push the last block (i.e. the first one of inferior execution), as well.
353 We don't know where it ends, but we know where it starts. If we're
354 reading delta trace, we can fill in the start address later on.
355 Otherwise we will prune it. */
356 block.begin = 0;
357 VEC_safe_push (btrace_block_s, btrace, &block);
358
7c97f91e
MM
359 return btrace;
360}
361
043c3577 362/* Check whether an Intel cpu supports BTS. */
a950d57c
MM
363
364static int
afb778a2 365intel_supports_bts (const struct btrace_cpu *cpu)
a950d57c 366{
afb778a2 367 switch (cpu->family)
5f8e0b8f
MF
368 {
369 case 0x6:
afb778a2 370 switch (cpu->model)
5f8e0b8f
MF
371 {
372 case 0x1a: /* Nehalem */
373 case 0x1f:
374 case 0x1e:
375 case 0x2e:
376 case 0x25: /* Westmere */
377 case 0x2c:
378 case 0x2f:
379 case 0x2a: /* Sandy Bridge */
380 case 0x2d:
381 case 0x3a: /* Ivy Bridge */
382
383 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
384 "from" information afer an EIST transition, T-states, C1E, or
385 Adaptive Thermal Throttling. */
386 return 0;
387 }
388 }
a950d57c
MM
389
390 return 1;
a950d57c
MM
391}
392
043c3577 393/* Check whether the cpu supports BTS. */
a950d57c
MM
394
395static int
043c3577 396cpu_supports_bts (void)
a950d57c 397{
afb778a2 398 struct btrace_cpu cpu;
a950d57c 399
afb778a2
MM
400 cpu = btrace_this_cpu ();
401 switch (cpu.vendor)
402 {
403 default:
404 /* Don't know about others. Let's assume they do. */
405 return 1;
a950d57c 406
afb778a2
MM
407 case CV_INTEL:
408 return intel_supports_bts (&cpu);
409 }
a950d57c
MM
410}
411
f4abbc16 412/* Enable branch tracing in BTS format. */
043c3577 413
f4abbc16 414static struct btrace_target_info *
d33501a5 415linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
7c97f91e 416{
f4abbc16 417 struct btrace_tinfo_bts *bts;
e7b01ce0
MM
418 size_t size, pages;
419 __u64 data_offset;
d0fa7535 420 int pid, pg;
7c97f91e 421
de6242d3
MM
422 if (!cpu_supports_bts ())
423 error (_("BTS support has been disabled for the target cpu."));
424
5c3284c1
MM
425 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
426 (XCNEW (btrace_target_info));
7c97f91e
MM
427 tinfo->ptid = ptid;
428
f4abbc16
MM
429 tinfo->conf.format = BTRACE_FORMAT_BTS;
430 bts = &tinfo->variant.bts;
7c97f91e 431
f4abbc16
MM
432 bts->attr.size = sizeof (bts->attr);
433 bts->attr.type = PERF_TYPE_HARDWARE;
434 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
435 bts->attr.sample_period = 1;
7c97f91e 436
f4abbc16
MM
437 /* We sample from and to address. */
438 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
7c97f91e 439
f4abbc16
MM
440 bts->attr.exclude_kernel = 1;
441 bts->attr.exclude_hv = 1;
442 bts->attr.exclude_idle = 1;
7c97f91e
MM
443
444 pid = ptid_get_lwp (ptid);
445 if (pid == 0)
446 pid = ptid_get_pid (ptid);
447
448 errno = 0;
5c3284c1
MM
449 scoped_fd fd (syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0));
450 if (fd.get () < 0)
17ad2a4f 451 error (_("Failed to start recording: %s"), safe_strerror (errno));
7c97f91e 452
d33501a5 453 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
454 pages = ((size_t) conf->size / PAGE_SIZE
455 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
d33501a5
MM
456 /* We need at least one page. */
457 if (pages == 0)
458 pages = 1;
459
460 /* The buffer size can be requested in powers of two pages. Adjust PAGES
461 to the next power of two. */
e7b01ce0
MM
462 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
463 if ((pages & ((size_t) 1 << pg)) != 0)
464 pages += ((size_t) 1 << pg);
d33501a5
MM
465
466 /* We try to allocate the requested size.
467 If that fails, try to get as much as we can. */
5c3284c1 468 scoped_mmap data;
d33501a5 469 for (; pages > 0; pages >>= 1)
d0fa7535 470 {
d33501a5 471 size_t length;
e7b01ce0 472 __u64 data_size;
d33501a5 473
e7b01ce0
MM
474 data_size = (__u64) pages * PAGE_SIZE;
475
476 /* Don't ask for more than we can represent in the configuration. */
477 if ((__u64) UINT_MAX < data_size)
478 continue;
479
480 size = (size_t) data_size;
d33501a5
MM
481 length = size + PAGE_SIZE;
482
483 /* Check for overflows. */
e7b01ce0 484 if ((__u64) length != data_size + PAGE_SIZE)
d33501a5
MM
485 continue;
486
17ad2a4f 487 errno = 0;
d0fa7535 488 /* The number of pages we request needs to be a power of two. */
5c3284c1
MM
489 data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0);
490 if (data.get () != MAP_FAILED)
aadf7753 491 break;
d0fa7535 492 }
7c97f91e 493
010a18a1 494 if (pages == 0)
17ad2a4f 495 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
aadf7753 496
5c3284c1
MM
497 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
498 data.get ();
010a18a1 499 data_offset = PAGE_SIZE;
010a18a1
MM
500
501#if defined (PERF_ATTR_SIZE_VER5)
502 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
503 {
e7b01ce0
MM
504 __u64 data_size;
505
010a18a1
MM
506 data_offset = header->data_offset;
507 data_size = header->data_size;
e7b01ce0
MM
508
509 size = (unsigned int) data_size;
510
511 /* Check for overflows. */
512 if ((__u64) size != data_size)
17ad2a4f 513 error (_("Failed to determine trace buffer size."));
010a18a1
MM
514 }
515#endif /* defined (PERF_ATTR_SIZE_VER5) */
516
e7b01ce0 517 bts->bts.size = size;
f4abbc16 518 bts->bts.data_head = &header->data_head;
5c3284c1 519 bts->bts.mem = (const uint8_t *) data.get () + data_offset;
e7b01ce0 520 bts->bts.last_head = 0ull;
5c3284c1
MM
521 bts->header = header;
522 bts->file = fd.release ();
aadf7753 523
5c3284c1 524 data.release ();
7c97f91e 525
5c3284c1
MM
526 tinfo->conf.bts.size = (unsigned int) size;
527 return tinfo.release ();
b20a6524
MM
528}
529
530#if defined (PERF_ATTR_SIZE_VER5)
531
17ad2a4f 532/* Determine the event type. */
de6242d3
MM
533
534static int
17ad2a4f 535perf_event_pt_event_type ()
de6242d3 536{
17ad2a4f
MM
537 static const char filename[] = "/sys/bus/event_source/devices/intel_pt/type";
538
539 errno = 0;
540 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
de6242d3 541 if (file.get () == nullptr)
17ad2a4f 542 error (_("Failed to open %s: %s."), filename, safe_strerror (errno));
de6242d3 543
17ad2a4f
MM
544 int type, found = fscanf (file.get (), "%d", &type);
545 if (found != 1)
546 error (_("Failed to read the PT event type from %s."), filename);
547
548 return type;
de6242d3
MM
549}
550
bc504a31 551/* Enable branch tracing in Intel Processor Trace format. */
b20a6524
MM
552
553static struct btrace_target_info *
554linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
555{
b20a6524 556 struct btrace_tinfo_pt *pt;
5c3284c1 557 size_t pages;
17ad2a4f 558 int pid, pg;
b20a6524
MM
559
560 pid = ptid_get_lwp (ptid);
561 if (pid == 0)
562 pid = ptid_get_pid (ptid);
563
5c3284c1
MM
564 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
565 (XCNEW (btrace_target_info));
b20a6524 566 tinfo->ptid = ptid;
b20a6524
MM
567
568 tinfo->conf.format = BTRACE_FORMAT_PT;
569 pt = &tinfo->variant.pt;
570
571 pt->attr.size = sizeof (pt->attr);
17ad2a4f 572 pt->attr.type = perf_event_pt_event_type ();
b20a6524
MM
573
574 pt->attr.exclude_kernel = 1;
575 pt->attr.exclude_hv = 1;
576 pt->attr.exclude_idle = 1;
577
578 errno = 0;
5c3284c1
MM
579 scoped_fd fd (syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0));
580 if (fd.get () < 0)
17ad2a4f 581 error (_("Failed to start recording: %s"), safe_strerror (errno));
b20a6524
MM
582
583 /* Allocate the configuration page. */
5c3284c1
MM
584 scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
585 fd.get (), 0);
586 if (data.get () == MAP_FAILED)
17ad2a4f 587 error (_("Failed to map trace user page: %s."), safe_strerror (errno));
5c3284c1
MM
588
589 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
590 data.get ();
b20a6524
MM
591
592 header->aux_offset = header->data_offset + header->data_size;
593
594 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
595 pages = ((size_t) conf->size / PAGE_SIZE
596 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
b20a6524
MM
597 /* We need at least one page. */
598 if (pages == 0)
599 pages = 1;
600
601 /* The buffer size can be requested in powers of two pages. Adjust PAGES
602 to the next power of two. */
e7b01ce0
MM
603 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
604 if ((pages & ((size_t) 1 << pg)) != 0)
605 pages += ((size_t) 1 << pg);
b20a6524
MM
606
607 /* We try to allocate the requested size.
608 If that fails, try to get as much as we can. */
5c3284c1 609 scoped_mmap aux;
b20a6524
MM
610 for (; pages > 0; pages >>= 1)
611 {
612 size_t length;
e7b01ce0 613 __u64 data_size;
b20a6524 614
e7b01ce0
MM
615 data_size = (__u64) pages * PAGE_SIZE;
616
617 /* Don't ask for more than we can represent in the configuration. */
618 if ((__u64) UINT_MAX < data_size)
619 continue;
620
5c3284c1 621 length = (size_t) data_size;
b20a6524
MM
622
623 /* Check for overflows. */
5c3284c1 624 if ((__u64) length != data_size)
b20a6524
MM
625 continue;
626
e7b01ce0 627 header->aux_size = data_size;
b20a6524 628
17ad2a4f 629 errno = 0;
5c3284c1
MM
630 aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
631 header->aux_offset);
632 if (aux.get () != MAP_FAILED)
b20a6524
MM
633 break;
634 }
635
636 if (pages == 0)
17ad2a4f 637 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
b20a6524 638
5c3284c1
MM
639 pt->pt.size = aux.size ();
640 pt->pt.mem = (const uint8_t *) aux.release ();
b20a6524 641 pt->pt.data_head = &header->aux_head;
5c3284c1
MM
642 pt->header = header;
643 pt->file = fd.release ();
b20a6524 644
5c3284c1 645 data.release ();
b20a6524 646
5c3284c1
MM
647 tinfo->conf.pt.size = (unsigned int) pt->pt.size;
648 return tinfo.release ();
7c97f91e
MM
649}
650
b20a6524
MM
651#else /* !defined (PERF_ATTR_SIZE_VER5) */
652
653static struct btrace_target_info *
654linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
655{
17ad2a4f 656 error (_("Intel Processor Trace support was disabled at compile time."));
b20a6524
MM
657}
658
659#endif /* !defined (PERF_ATTR_SIZE_VER5) */
660
7c97f91e
MM
661/* See linux-btrace.h. */
662
f4abbc16
MM
663struct btrace_target_info *
664linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
665{
f4abbc16
MM
666 switch (conf->format)
667 {
668 case BTRACE_FORMAT_NONE:
17ad2a4f
MM
669 error (_("Bad branch trace format."));
670
671 default:
672 error (_("Unknown branch trace format."));
f4abbc16
MM
673
674 case BTRACE_FORMAT_BTS:
17ad2a4f 675 return linux_enable_bts (ptid, &conf->bts);
b20a6524
MM
676
677 case BTRACE_FORMAT_PT:
17ad2a4f 678 return linux_enable_pt (ptid, &conf->pt);
f4abbc16 679 }
f4abbc16
MM
680}
681
682/* Disable BTS tracing. */
683
684static enum btrace_error
685linux_disable_bts (struct btrace_tinfo_bts *tinfo)
7c97f91e 686{
aadf7753 687 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
7c97f91e 688 close (tinfo->file);
7c97f91e 689
969c39fb 690 return BTRACE_ERR_NONE;
7c97f91e
MM
691}
692
bc504a31 693/* Disable Intel Processor Trace tracing. */
b20a6524
MM
694
695static enum btrace_error
696linux_disable_pt (struct btrace_tinfo_pt *tinfo)
697{
698 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
699 munmap((void *) tinfo->header, PAGE_SIZE);
700 close (tinfo->file);
701
702 return BTRACE_ERR_NONE;
703}
704
f4abbc16
MM
705/* See linux-btrace.h. */
706
707enum btrace_error
708linux_disable_btrace (struct btrace_target_info *tinfo)
709{
710 enum btrace_error errcode;
711
712 errcode = BTRACE_ERR_NOT_SUPPORTED;
713 switch (tinfo->conf.format)
714 {
715 case BTRACE_FORMAT_NONE:
716 break;
717
718 case BTRACE_FORMAT_BTS:
719 errcode = linux_disable_bts (&tinfo->variant.bts);
720 break;
b20a6524
MM
721
722 case BTRACE_FORMAT_PT:
723 errcode = linux_disable_pt (&tinfo->variant.pt);
724 break;
f4abbc16
MM
725 }
726
727 if (errcode == BTRACE_ERR_NONE)
728 xfree (tinfo);
729
730 return errcode;
731}
732
734b0e4b
MM
733/* Read branch trace data in BTS format for the thread given by TINFO into
734 BTRACE using the TYPE reading method. */
7c97f91e 735
734b0e4b
MM
736static enum btrace_error
737linux_read_bts (struct btrace_data_bts *btrace,
738 struct btrace_target_info *tinfo,
739 enum btrace_read_type type)
7c97f91e 740{
aadf7753 741 struct perf_event_buffer *pevent;
7c97f91e 742 const uint8_t *begin, *end, *start;
e7b01ce0
MM
743 size_t buffer_size, size;
744 __u64 data_head, data_tail;
aadf7753
MM
745 unsigned int retries = 5;
746
f4abbc16 747 pevent = &tinfo->variant.bts.bts;
7c97f91e 748
969c39fb
MM
749 /* For delta reads, we return at least the partial last block containing
750 the current PC. */
aadf7753 751 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
969c39fb 752 return BTRACE_ERR_NONE;
7c97f91e 753
aadf7753
MM
754 buffer_size = pevent->size;
755 data_tail = pevent->last_head;
7c97f91e
MM
756
757 /* We may need to retry reading the trace. See below. */
758 while (retries--)
759 {
aadf7753 760 data_head = *pevent->data_head;
7c97f91e 761
ed9edfb5 762 /* Delete any leftover trace from the previous iteration. */
734b0e4b 763 VEC_free (btrace_block_s, btrace->blocks);
ed9edfb5 764
969c39fb 765 if (type == BTRACE_READ_DELTA)
7c97f91e 766 {
e7b01ce0
MM
767 __u64 data_size;
768
969c39fb
MM
769 /* Determine the number of bytes to read and check for buffer
770 overflows. */
771
772 /* Check for data head overflows. We might be able to recover from
773 those but they are very unlikely and it's not really worth the
774 effort, I think. */
775 if (data_head < data_tail)
776 return BTRACE_ERR_OVERFLOW;
777
778 /* If the buffer is smaller than the trace delta, we overflowed. */
e7b01ce0
MM
779 data_size = data_head - data_tail;
780 if (buffer_size < data_size)
969c39fb 781 return BTRACE_ERR_OVERFLOW;
e7b01ce0
MM
782
783 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
784 size = (size_t) data_size;
969c39fb
MM
785 }
786 else
787 {
788 /* Read the entire buffer. */
789 size = buffer_size;
7c97f91e 790
969c39fb
MM
791 /* Adjust the size if the buffer has not overflowed, yet. */
792 if (data_head < size)
e7b01ce0 793 size = (size_t) data_head;
7c97f91e
MM
794 }
795
969c39fb 796 /* Data_head keeps growing; the buffer itself is circular. */
aadf7753 797 begin = pevent->mem;
969c39fb
MM
798 start = begin + data_head % buffer_size;
799
800 if (data_head <= buffer_size)
801 end = start;
802 else
aadf7753 803 end = begin + pevent->size;
969c39fb 804
734b0e4b 805 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
969c39fb 806
7c97f91e
MM
807 /* The stopping thread notifies its ptracer before it is scheduled out.
808 On multi-core systems, the debugger might therefore run while the
809 kernel might be writing the last branch trace records.
810
811 Let's check whether the data head moved while we read the trace. */
aadf7753 812 if (data_head == *pevent->data_head)
7c97f91e
MM
813 break;
814 }
815
aadf7753 816 pevent->last_head = data_head;
7c97f91e 817
969c39fb
MM
818 /* Prune the incomplete last block (i.e. the first one of inferior execution)
819 if we're not doing a delta read. There is no way of filling in its zeroed
820 BEGIN element. */
734b0e4b
MM
821 if (!VEC_empty (btrace_block_s, btrace->blocks)
822 && type != BTRACE_READ_DELTA)
823 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
824
825 return BTRACE_ERR_NONE;
7c97f91e
MM
826}
827
bc504a31 828/* Fill in the Intel Processor Trace configuration information. */
b20a6524
MM
829
830static void
831linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
832{
833 conf->cpu = btrace_this_cpu ();
834}
835
bc504a31 836/* Read branch trace data in Intel Processor Trace format for the thread
b20a6524
MM
837 given by TINFO into BTRACE using the TYPE reading method. */
838
839static enum btrace_error
840linux_read_pt (struct btrace_data_pt *btrace,
841 struct btrace_target_info *tinfo,
842 enum btrace_read_type type)
843{
844 struct perf_event_buffer *pt;
845
846 pt = &tinfo->variant.pt.pt;
847
848 linux_fill_btrace_pt_config (&btrace->config);
849
850 switch (type)
851 {
852 case BTRACE_READ_DELTA:
853 /* We don't support delta reads. The data head (i.e. aux_head) wraps
854 around to stay inside the aux buffer. */
855 return BTRACE_ERR_NOT_SUPPORTED;
856
857 case BTRACE_READ_NEW:
858 if (!perf_event_new_data (pt))
859 return BTRACE_ERR_NONE;
860
861 /* Fall through. */
862 case BTRACE_READ_ALL:
863 perf_event_read_all (pt, &btrace->data, &btrace->size);
864 return BTRACE_ERR_NONE;
865 }
866
867 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
868}
869
734b0e4b
MM
870/* See linux-btrace.h. */
871
872enum btrace_error
873linux_read_btrace (struct btrace_data *btrace,
874 struct btrace_target_info *tinfo,
875 enum btrace_read_type type)
876{
f4abbc16
MM
877 switch (tinfo->conf.format)
878 {
879 case BTRACE_FORMAT_NONE:
880 return BTRACE_ERR_NOT_SUPPORTED;
881
882 case BTRACE_FORMAT_BTS:
883 /* We read btrace in BTS format. */
884 btrace->format = BTRACE_FORMAT_BTS;
885 btrace->variant.bts.blocks = NULL;
886
887 return linux_read_bts (&btrace->variant.bts, tinfo, type);
b20a6524
MM
888
889 case BTRACE_FORMAT_PT:
bc504a31 890 /* We read btrace in Intel Processor Trace format. */
b20a6524
MM
891 btrace->format = BTRACE_FORMAT_PT;
892 btrace->variant.pt.data = NULL;
893 btrace->variant.pt.size = 0;
894
895 return linux_read_pt (&btrace->variant.pt, tinfo, type);
f4abbc16
MM
896 }
897
898 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
899}
900
901/* See linux-btrace.h. */
734b0e4b 902
f4abbc16
MM
903const struct btrace_config *
904linux_btrace_conf (const struct btrace_target_info *tinfo)
905{
906 return &tinfo->conf;
734b0e4b
MM
907}
908
7c97f91e
MM
909#else /* !HAVE_LINUX_PERF_EVENT_H */
910
911/* See linux-btrace.h. */
912
7c97f91e 913struct btrace_target_info *
f4abbc16 914linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
7c97f91e
MM
915{
916 return NULL;
917}
918
919/* See linux-btrace.h. */
920
969c39fb 921enum btrace_error
7c97f91e
MM
922linux_disable_btrace (struct btrace_target_info *tinfo)
923{
969c39fb 924 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
925}
926
927/* See linux-btrace.h. */
928
969c39fb 929enum btrace_error
734b0e4b 930linux_read_btrace (struct btrace_data *btrace,
969c39fb 931 struct btrace_target_info *tinfo,
7c97f91e
MM
932 enum btrace_read_type type)
933{
969c39fb 934 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
935}
936
f4abbc16
MM
937/* See linux-btrace.h. */
938
939const struct btrace_config *
940linux_btrace_conf (const struct btrace_target_info *tinfo)
941{
942 return NULL;
943}
944
7c97f91e 945#endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.465463 seconds and 4 git commands to generate.