GDB: Fix the overflow in addr/line_is_displayed()
[deliverable/binutils-gdb.git] / gdb / btrace.h
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #ifndef BTRACE_H
23 #define BTRACE_H
24
25 /* Branch tracing (btrace) is a per-thread control-flow execution trace of the
26 inferior. For presentation purposes, the branch trace is represented as a
27 list of sequential control-flow blocks, one such list per thread. */
28
29 #include "gdbsupport/btrace-common.h"
30 #include "target/waitstatus.h" /* For enum target_stop_reason. */
31 #include "gdbsupport/enum-flags.h"
32
33 #if defined (HAVE_LIBIPT)
34 # include <intel-pt.h>
35 #endif
36
37 #include <vector>
38
39 struct thread_info;
40 struct btrace_function;
41
42 /* A coarse instruction classification. */
43 enum btrace_insn_class
44 {
45 /* The instruction is something not listed below. */
46 BTRACE_INSN_OTHER,
47
48 /* The instruction is a function call. */
49 BTRACE_INSN_CALL,
50
51 /* The instruction is a function return. */
52 BTRACE_INSN_RETURN,
53
54 /* The instruction is an unconditional jump. */
55 BTRACE_INSN_JUMP
56 };
57
58 /* Instruction flags. */
59 enum btrace_insn_flag
60 {
61 /* The instruction has been executed speculatively. */
62 BTRACE_INSN_FLAG_SPECULATIVE = (1 << 0)
63 };
64 DEF_ENUM_FLAGS_TYPE (enum btrace_insn_flag, btrace_insn_flags);
65
66 /* A branch trace instruction.
67
68 This represents a single instruction in a branch trace. */
69 struct btrace_insn
70 {
71 /* The address of this instruction. */
72 CORE_ADDR pc;
73
74 /* The size of this instruction in bytes. */
75 gdb_byte size;
76
77 /* The instruction class of this instruction. */
78 enum btrace_insn_class iclass;
79
80 /* A bit vector of BTRACE_INSN_FLAGS. */
81 btrace_insn_flags flags;
82 };
83
84 /* Flags for btrace function segments. */
85 enum btrace_function_flag
86 {
87 /* The 'up' link interpretation.
88 If set, it points to the function segment we returned to.
89 If clear, it points to the function segment we called from. */
90 BFUN_UP_LINKS_TO_RET = (1 << 0),
91
92 /* The 'up' link points to a tail call. This obviously only makes sense
93 if bfun_up_links_to_ret is clear. */
94 BFUN_UP_LINKS_TO_TAILCALL = (1 << 1)
95 };
96 DEF_ENUM_FLAGS_TYPE (enum btrace_function_flag, btrace_function_flags);
97
98 /* Decode errors for the BTS recording format. */
99 enum btrace_bts_error
100 {
101 /* The instruction trace overflowed the end of the trace block. */
102 BDE_BTS_OVERFLOW = 1,
103
104 /* The instruction size could not be determined. */
105 BDE_BTS_INSN_SIZE
106 };
107
108 /* Decode errors for the Intel Processor Trace recording format. */
109 enum btrace_pt_error
110 {
111 /* The user cancelled trace processing. */
112 BDE_PT_USER_QUIT = 1,
113
114 /* Tracing was temporarily disabled. */
115 BDE_PT_DISABLED,
116
117 /* Trace recording overflowed. */
118 BDE_PT_OVERFLOW
119
120 /* Negative numbers are used by the decoder library. */
121 };
122
123 /* A branch trace function segment.
124
125 This represents a function segment in a branch trace, i.e. a consecutive
126 number of instructions belonging to the same function.
127
128 In case of decode errors, we add an empty function segment to indicate
129 the gap in the trace.
130
131 We do not allow function segments without instructions otherwise. */
132 struct btrace_function
133 {
134 btrace_function (struct minimal_symbol *msym_, struct symbol *sym_,
135 unsigned int number_, unsigned int insn_offset_, int level_)
136 : msym (msym_), sym (sym_), insn_offset (insn_offset_), number (number_),
137 level (level_)
138 {
139 }
140
141 /* The full and minimal symbol for the function. Both may be NULL. */
142 struct minimal_symbol *msym;
143 struct symbol *sym;
144
145 /* The function segment numbers of the previous and next segment belonging to
146 the same function. If a function calls another function, the former will
147 have at least two segments: one before the call and another after the
148 return. Will be zero if there is no such function segment. */
149 unsigned int prev = 0;
150 unsigned int next = 0;
151
152 /* The function segment number of the directly preceding function segment in
153 a (fake) call stack. Will be zero if there is no such function segment in
154 the record. */
155 unsigned int up = 0;
156
157 /* The instructions in this function segment.
158 The instruction vector will be empty if the function segment
159 represents a decode error. */
160 std::vector<btrace_insn> insn;
161
162 /* The error code of a decode error that led to a gap.
163 Must be zero unless INSN is empty; non-zero otherwise. */
164 int errcode = 0;
165
166 /* The instruction number offset for the first instruction in this
167 function segment.
168 If INSN is empty this is the insn_offset of the succeding function
169 segment in control-flow order. */
170 unsigned int insn_offset;
171
172 /* The 1-based function number in control-flow order.
173 If INSN is empty indicating a gap in the trace due to a decode error,
174 we still count the gap as a function. */
175 unsigned int number;
176
177 /* The function level in a back trace across the entire branch trace.
178 A caller's level is one lower than the level of its callee.
179
180 Levels can be negative if we see returns for which we have not seen
181 the corresponding calls. The branch trace thread information provides
182 a fixup to normalize function levels so the smallest level is zero. */
183 int level;
184
185 /* A bit-vector of btrace_function_flag. */
186 btrace_function_flags flags = 0;
187 };
188
189 /* A branch trace instruction iterator. */
190 struct btrace_insn_iterator
191 {
192 /* The branch trace information for this thread. Will never be NULL. */
193 const struct btrace_thread_info *btinfo;
194
195 /* The index of the function segment in BTINFO->FUNCTIONS. */
196 unsigned int call_index;
197
198 /* The index into the function segment's instruction vector. */
199 unsigned int insn_index;
200 };
201
202 /* A branch trace function call iterator. */
203 struct btrace_call_iterator
204 {
205 /* The branch trace information for this thread. Will never be NULL. */
206 const struct btrace_thread_info *btinfo;
207
208 /* The index of the function segment in BTINFO->FUNCTIONS. */
209 unsigned int index;
210 };
211
212 /* Branch trace iteration state for "record instruction-history". */
213 struct btrace_insn_history
214 {
215 /* The branch trace instruction range from BEGIN (inclusive) to
216 END (exclusive) that has been covered last time. */
217 struct btrace_insn_iterator begin;
218 struct btrace_insn_iterator end;
219 };
220
221 /* Branch trace iteration state for "record function-call-history". */
222 struct btrace_call_history
223 {
224 /* The branch trace function range from BEGIN (inclusive) to END (exclusive)
225 that has been covered last time. */
226 struct btrace_call_iterator begin;
227 struct btrace_call_iterator end;
228 };
229
230 /* Branch trace thread flags. */
231 enum btrace_thread_flag : unsigned
232 {
233 /* The thread is to be stepped forwards. */
234 BTHR_STEP = (1 << 0),
235
236 /* The thread is to be stepped backwards. */
237 BTHR_RSTEP = (1 << 1),
238
239 /* The thread is to be continued forwards. */
240 BTHR_CONT = (1 << 2),
241
242 /* The thread is to be continued backwards. */
243 BTHR_RCONT = (1 << 3),
244
245 /* The thread is to be moved. */
246 BTHR_MOVE = (BTHR_STEP | BTHR_RSTEP | BTHR_CONT | BTHR_RCONT),
247
248 /* The thread is to be stopped. */
249 BTHR_STOP = (1 << 4)
250 };
251 DEF_ENUM_FLAGS_TYPE (enum btrace_thread_flag, btrace_thread_flags);
252
253 #if defined (HAVE_LIBIPT)
254 /* A packet. */
255 struct btrace_pt_packet
256 {
257 /* The offset in the trace stream. */
258 uint64_t offset;
259
260 /* The decode error code. */
261 enum pt_error_code errcode;
262
263 /* The decoded packet. Only valid if ERRCODE == pte_ok. */
264 struct pt_packet packet;
265 };
266
267 #endif /* defined (HAVE_LIBIPT) */
268
269 /* Branch trace iteration state for "maintenance btrace packet-history". */
270 struct btrace_maint_packet_history
271 {
272 /* The branch trace packet range from BEGIN (inclusive) to
273 END (exclusive) that has been covered last time. */
274 unsigned int begin;
275 unsigned int end;
276 };
277
278 /* Branch trace maintenance information per thread.
279
280 This information is used by "maintenance btrace" commands. */
281 struct btrace_maint_info
282 {
283 /* Most information is format-specific.
284 The format can be found in the BTRACE.DATA.FORMAT field of each thread. */
285 union
286 {
287 /* BTRACE.DATA.FORMAT == BTRACE_FORMAT_BTS */
288 struct
289 {
290 /* The packet history iterator.
291 We are iterating over BTRACE.DATA.FORMAT.VARIANT.BTS.BLOCKS. */
292 struct btrace_maint_packet_history packet_history;
293 } bts;
294
295 #if defined (HAVE_LIBIPT)
296 /* BTRACE.DATA.FORMAT == BTRACE_FORMAT_PT */
297 struct
298 {
299 /* A vector of decoded packets. */
300 std::vector<btrace_pt_packet> *packets;
301
302 /* The packet history iterator.
303 We are iterating over the above PACKETS vector. */
304 struct btrace_maint_packet_history packet_history;
305 } pt;
306 #endif /* defined (HAVE_LIBIPT) */
307 } variant;
308 };
309
310 /* Branch trace information per thread.
311
312 This represents the branch trace configuration as well as the entry point
313 into the branch trace data. For the latter, it also contains the index into
314 an array of branch trace blocks used for iterating though the branch trace
315 blocks of a thread. */
316 struct btrace_thread_info
317 {
318 /* The target branch trace information for this thread.
319
320 This contains the branch trace configuration as well as any
321 target-specific information necessary for implementing branch tracing on
322 the underlying architecture. */
323 struct btrace_target_info *target;
324
325 /* The raw branch trace data for the below branch trace. */
326 struct btrace_data data;
327
328 /* Vector of decoded function segments in execution flow order.
329 Note that the numbering for btrace function segments starts with 1, so
330 function segment i will be at index (i - 1). */
331 std::vector<btrace_function> functions;
332
333 /* The function level offset. When added to each function's LEVEL,
334 this normalizes the function levels such that the smallest level
335 becomes zero. */
336 int level;
337
338 /* The number of gaps in the trace. */
339 unsigned int ngaps;
340
341 /* A bit-vector of btrace_thread_flag. */
342 btrace_thread_flags flags;
343
344 /* The instruction history iterator. */
345 struct btrace_insn_history *insn_history;
346
347 /* The function call history iterator. */
348 struct btrace_call_history *call_history;
349
350 /* The current replay position. NULL if not replaying.
351 Gaps are skipped during replay, so REPLAY always points to a valid
352 instruction. */
353 struct btrace_insn_iterator *replay;
354
355 /* Why the thread stopped, if we need to track it. */
356 enum target_stop_reason stop_reason;
357
358 /* Maintenance information. */
359 struct btrace_maint_info maint;
360 };
361
362 /* Enable branch tracing for a thread. */
363 extern void btrace_enable (struct thread_info *tp,
364 const struct btrace_config *conf);
365
366 /* Get the branch trace configuration for a thread.
367 Return NULL if branch tracing is not enabled for that thread. */
368 extern const struct btrace_config *
369 btrace_conf (const struct btrace_thread_info *);
370
371 /* Disable branch tracing for a thread.
372 This will also delete the current branch trace data. */
373 extern void btrace_disable (struct thread_info *);
374
375 /* Disable branch tracing for a thread during teardown.
376 This is similar to btrace_disable, except that it will use
377 target_teardown_btrace instead of target_disable_btrace. */
378 extern void btrace_teardown (struct thread_info *);
379
380 /* Return a human readable error string for the given ERRCODE in FORMAT.
381 The pointer will never be NULL and must not be freed. */
382
383 extern const char *btrace_decode_error (enum btrace_format format, int errcode);
384
385 /* Fetch the branch trace for a single thread. If CPU is not NULL, assume
386 CPU for trace decode. */
387 extern void btrace_fetch (struct thread_info *,
388 const struct btrace_cpu *cpu);
389
390 /* Clear the branch trace for a single thread. */
391 extern void btrace_clear (struct thread_info *);
392
393 /* Clear the branch trace for all threads when an object file goes away. */
394 extern void btrace_free_objfile (struct objfile *);
395
396 /* Parse a branch trace xml document XML into DATA. */
397 extern void parse_xml_btrace (struct btrace_data *data, const char *xml);
398
399 /* Parse a branch trace configuration xml document XML into CONF. */
400 extern void parse_xml_btrace_conf (struct btrace_config *conf, const char *xml);
401
402 /* Dereference a branch trace instruction iterator. Return a pointer to the
403 instruction the iterator points to.
404 May return NULL if the iterator points to a gap in the trace. */
405 extern const struct btrace_insn *
406 btrace_insn_get (const struct btrace_insn_iterator *);
407
408 /* Return the error code for a branch trace instruction iterator. Returns zero
409 if there is no error, i.e. the instruction is valid. */
410 extern int btrace_insn_get_error (const struct btrace_insn_iterator *);
411
412 /* Return the instruction number for a branch trace iterator.
413 Returns one past the maximum instruction number for the end iterator. */
414 extern unsigned int btrace_insn_number (const struct btrace_insn_iterator *);
415
416 /* Initialize a branch trace instruction iterator to point to the begin/end of
417 the branch trace. Throws an error if there is no branch trace. */
418 extern void btrace_insn_begin (struct btrace_insn_iterator *,
419 const struct btrace_thread_info *);
420 extern void btrace_insn_end (struct btrace_insn_iterator *,
421 const struct btrace_thread_info *);
422
423 /* Increment/decrement a branch trace instruction iterator by at most STRIDE
424 instructions. Return the number of instructions by which the instruction
425 iterator has been advanced.
426 Returns zero, if the operation failed or STRIDE had been zero. */
427 extern unsigned int btrace_insn_next (struct btrace_insn_iterator *,
428 unsigned int stride);
429 extern unsigned int btrace_insn_prev (struct btrace_insn_iterator *,
430 unsigned int stride);
431
432 /* Compare two branch trace instruction iterators.
433 Return a negative number if LHS < RHS.
434 Return zero if LHS == RHS.
435 Return a positive number if LHS > RHS. */
436 extern int btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
437 const struct btrace_insn_iterator *rhs);
438
439 /* Find an instruction or gap in the function branch trace by its number.
440 If the instruction is found, initialize the branch trace instruction
441 iterator to point to this instruction and return non-zero.
442 Return zero otherwise. */
443 extern int btrace_find_insn_by_number (struct btrace_insn_iterator *,
444 const struct btrace_thread_info *,
445 unsigned int number);
446
447 /* Dereference a branch trace call iterator. Return a pointer to the
448 function the iterator points to or NULL if the iterator points past
449 the end of the branch trace. */
450 extern const struct btrace_function *
451 btrace_call_get (const struct btrace_call_iterator *);
452
453 /* Return the function number for a branch trace call iterator.
454 Returns one past the maximum function number for the end iterator.
455 Returns zero if the iterator does not point to a valid function. */
456 extern unsigned int btrace_call_number (const struct btrace_call_iterator *);
457
458 /* Initialize a branch trace call iterator to point to the begin/end of
459 the branch trace. Throws an error if there is no branch trace. */
460 extern void btrace_call_begin (struct btrace_call_iterator *,
461 const struct btrace_thread_info *);
462 extern void btrace_call_end (struct btrace_call_iterator *,
463 const struct btrace_thread_info *);
464
465 /* Increment/decrement a branch trace call iterator by at most STRIDE function
466 segments. Return the number of function segments by which the call
467 iterator has been advanced.
468 Returns zero, if the operation failed or STRIDE had been zero. */
469 extern unsigned int btrace_call_next (struct btrace_call_iterator *,
470 unsigned int stride);
471 extern unsigned int btrace_call_prev (struct btrace_call_iterator *,
472 unsigned int stride);
473
474 /* Compare two branch trace call iterators.
475 Return a negative number if LHS < RHS.
476 Return zero if LHS == RHS.
477 Return a positive number if LHS > RHS. */
478 extern int btrace_call_cmp (const struct btrace_call_iterator *lhs,
479 const struct btrace_call_iterator *rhs);
480
481 /* Find a function in the function branch trace by its NUMBER.
482 If the function is found, initialize the branch trace call
483 iterator to point to this function and return non-zero.
484 Return zero otherwise. */
485 extern int btrace_find_call_by_number (struct btrace_call_iterator *,
486 const struct btrace_thread_info *,
487 unsigned int number);
488
489 /* Set the branch trace instruction history from BEGIN (inclusive) to
490 END (exclusive). */
491 extern void btrace_set_insn_history (struct btrace_thread_info *,
492 const struct btrace_insn_iterator *begin,
493 const struct btrace_insn_iterator *end);
494
495 /* Set the branch trace function call history from BEGIN (inclusive) to
496 END (exclusive). */
497 extern void btrace_set_call_history (struct btrace_thread_info *,
498 const struct btrace_call_iterator *begin,
499 const struct btrace_call_iterator *end);
500
501 /* Determine if branch tracing is currently replaying TP. */
502 extern int btrace_is_replaying (struct thread_info *tp);
503
504 /* Return non-zero if the branch trace for TP is empty; zero otherwise. */
505 extern int btrace_is_empty (struct thread_info *tp);
506
507 #endif /* BTRACE_H */
This page took 0.051274 seconds and 4 git commands to generate.