gdbserver/linux-low: turn 'supports_hardware_single_step' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #define PTRACE_XFER_TYPE long
35
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func) (struct regcache *, void *);
38 typedef void (*regset_store_func) (struct regcache *, const void *);
39 enum regset_type {
40 GENERAL_REGS,
41 FP_REGS,
42 EXTENDED_REGS,
43 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
44 };
45
46 /* The arch's regsets array initializer must be terminated with a NULL
47 regset. */
48 #define NULL_REGSET \
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
50
51 struct regset_info
52 {
53 int get_request, set_request;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
56 int nt_type;
57 int size;
58 enum regset_type type;
59 regset_fill_func fill_function;
60 regset_store_func store_function;
61 };
62
63 /* Aggregation of all the supported regsets of a given
64 architecture/mode. */
65
66 struct regsets_info
67 {
68 /* The regsets array. */
69 struct regset_info *regsets;
70
71 /* The number of regsets in the REGSETS array. */
72 int num_regsets;
73
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets;
80 };
81
82 #endif
83
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
86
87 struct usrregs_info
88 {
89 /* The number of registers accessible. */
90 int num_regs;
91
92 /* The registers map. */
93 int *regmap;
94 };
95
96 /* All info needed to access an architecture/mode's registers. */
97
98 struct regs_info
99 {
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap;
105
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info *usrregs;
110
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info *regsets_info;
114 #endif
115 };
116
117 struct process_info_private
118 {
119 /* Arch-specific additions. */
120 struct arch_process_info *arch_private;
121
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db *thread_db;
125
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
127 CORE_ADDR r_debug;
128 };
129
130 struct lwp_info;
131
132 struct linux_target_ops
133 {
134 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
135 inferior is stopped due to SYSCALL_SIGTRAP. */
136 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
137
138 /* See target.h. */
139 int (*get_ipa_tdesc_idx) (void);
140 };
141
142 extern struct linux_target_ops the_low_target;
143
144 /* Target ops definitions for a Linux target. */
145
146 class linux_process_target : public process_stratum_target
147 {
148 public:
149
150 int create_inferior (const char *program,
151 const std::vector<char *> &program_args) override;
152
153 void post_create_inferior () override;
154
155 int attach (unsigned long pid) override;
156
157 int kill (process_info *proc) override;
158
159 int detach (process_info *proc) override;
160
161 void mourn (process_info *proc) override;
162
163 void join (int pid) override;
164
165 bool thread_alive (ptid_t pid) override;
166
167 void resume (thread_resume *resume_info, size_t n) override;
168
169 ptid_t wait (ptid_t ptid, target_waitstatus *status,
170 int options) override;
171
172 void fetch_registers (regcache *regcache, int regno) override;
173
174 void store_registers (regcache *regcache, int regno) override;
175
176 int prepare_to_access_memory () override;
177
178 void done_accessing_memory () override;
179
180 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
181 int len) override;
182
183 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
184 int len) override;
185
186 void look_up_symbols () override;
187
188 void request_interrupt () override;
189
190 bool supports_read_auxv () override;
191
192 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
193 unsigned int len) override;
194
195 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
196 int size, raw_breakpoint *bp) override;
197
198 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
199 int size, raw_breakpoint *bp) override;
200
201 bool stopped_by_sw_breakpoint () override;
202
203 bool supports_stopped_by_sw_breakpoint () override;
204
205 bool stopped_by_hw_breakpoint () override;
206
207 bool supports_stopped_by_hw_breakpoint () override;
208
209 bool supports_hardware_single_step () override;
210
211 bool stopped_by_watchpoint () override;
212
213 CORE_ADDR stopped_data_address () override;
214
215 bool supports_read_offsets () override;
216
217 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
218
219 bool supports_get_tls_address () override;
220
221 int get_tls_address (thread_info *thread, CORE_ADDR offset,
222 CORE_ADDR load_module, CORE_ADDR *address) override;
223
224 bool supports_qxfer_osdata () override;
225
226 int qxfer_osdata (const char *annex, unsigned char *readbuf,
227 unsigned const char *writebuf,
228 CORE_ADDR offset, int len) override;
229
230 bool supports_qxfer_siginfo () override;
231
232 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
233 unsigned const char *writebuf,
234 CORE_ADDR offset, int len) override;
235
236 bool supports_non_stop () override;
237
238 bool async (bool enable) override;
239
240 int start_non_stop (bool enable) override;
241
242 bool supports_multi_process () override;
243
244 bool supports_fork_events () override;
245
246 bool supports_vfork_events () override;
247
248 bool supports_exec_events () override;
249
250 void handle_new_gdb_connection () override;
251
252 int handle_monitor_command (char *mon) override;
253
254 int core_of_thread (ptid_t ptid) override;
255
256 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
257 bool supports_read_loadmap () override;
258
259 int read_loadmap (const char *annex, CORE_ADDR offset,
260 unsigned char *myaddr, unsigned int len) override;
261 #endif
262
263 CORE_ADDR read_pc (regcache *regcache) override;
264
265 void write_pc (regcache *regcache, CORE_ADDR pc) override;
266
267 bool supports_thread_stopped () override;
268
269 bool thread_stopped (thread_info *thread) override;
270
271 void pause_all (bool freeze) override;
272
273 void unpause_all (bool unfreeze) override;
274
275 void stabilize_threads () override;
276
277 bool supports_disable_randomization () override;
278
279 bool supports_qxfer_libraries_svr4 () override;
280
281 int qxfer_libraries_svr4 (const char *annex,
282 unsigned char *readbuf,
283 unsigned const char *writebuf,
284 CORE_ADDR offset, int len) override;
285
286 bool supports_agent () override;
287
288 #ifdef HAVE_LINUX_BTRACE
289 btrace_target_info *enable_btrace (ptid_t ptid,
290 const btrace_config *conf) override;
291
292 int disable_btrace (btrace_target_info *tinfo) override;
293
294 int read_btrace (btrace_target_info *tinfo, buffer *buf,
295 enum btrace_read_type type) override;
296
297 int read_btrace_conf (const btrace_target_info *tinfo,
298 buffer *buf) override;
299 #endif
300
301 bool supports_range_stepping () override;
302
303 bool supports_pid_to_exec_file () override;
304
305 char *pid_to_exec_file (int pid) override;
306
307 bool supports_multifs () override;
308
309 int multifs_open (int pid, const char *filename, int flags,
310 mode_t mode) override;
311
312 int multifs_unlink (int pid, const char *filename) override;
313
314 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
315 size_t bufsiz) override;
316
317 const char *thread_name (ptid_t thread) override;
318
319 #if USE_THREAD_DB
320 bool thread_handle (ptid_t ptid, gdb_byte **handle,
321 int *handle_len) override;
322 #endif
323
324 bool supports_catch_syscall () override;
325
326 int get_ipa_tdesc_idx () override;
327
328 /* Return the information to access registers. This has public
329 visibility because proc-service uses it. */
330 virtual const regs_info *get_regs_info () = 0;
331
332 private:
333
334 /* Handle a GNU/Linux extended wait response. If we see a clone,
335 fork, or vfork event, we need to add the new LWP to our list
336 (and return 0 so as not to report the trap to higher layers).
337 If we see an exec event, we will modify ORIG_EVENT_LWP to point
338 to a new LWP representing the new program. */
339 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
340
341 /* Do low-level handling of the event, and check if we should go on
342 and pass it to caller code. Return the affected lwp if we are, or
343 NULL otherwise. */
344 lwp_info *filter_event (int lwpid, int wstat);
345
346 /* Wait for an event from child(ren) WAIT_PTID, and return any that
347 match FILTER_PTID (leaving others pending). The PTIDs can be:
348 minus_one_ptid, to specify any child; a pid PTID, specifying all
349 lwps of a thread group; or a PTID representing a single lwp. Store
350 the stop status through the status pointer WSTAT. OPTIONS is
351 passed to the waitpid call. Return 0 if no event was found and
352 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
353 was found. Return the PID of the stopped child otherwise. */
354 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
355 int *wstatp, int options);
356
357 /* Wait for an event from child(ren) PTID. PTIDs can be:
358 minus_one_ptid, to specify any child; a pid PTID, specifying all
359 lwps of a thread group; or a PTID representing a single lwp. Store
360 the stop status through the status pointer WSTAT. OPTIONS is
361 passed to the waitpid call. Return 0 if no event was found and
362 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
363 was found. Return the PID of the stopped child otherwise. */
364 int wait_for_event (ptid_t ptid, int *wstatp, int options);
365
366 /* Wait for all children to stop for the SIGSTOPs we just queued. */
367 void wait_for_sigstop ();
368
369 /* Wait for process, returns status. */
370 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
371 int target_options);
372
373 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
374 If SUSPEND, then also increase the suspend count of every LWP,
375 except EXCEPT. */
376 void stop_all_lwps (int suspend, lwp_info *except);
377
378 /* Stopped LWPs that the client wanted to be running, that don't have
379 pending statuses, are set to run again, except for EXCEPT, if not
380 NULL. This undoes a stop_all_lwps call. */
381 void unstop_all_lwps (int unsuspend, lwp_info *except);
382
383 /* Start a step-over operation on LWP. When LWP stopped at a
384 breakpoint, to make progress, we need to remove the breakpoint out
385 of the way. If we let other threads run while we do that, they may
386 pass by the breakpoint location and miss hitting it. To avoid
387 that, a step-over momentarily stops all threads while LWP is
388 single-stepped by either hardware or software while the breakpoint
389 is temporarily uninserted from the inferior. When the single-step
390 finishes, we reinsert the breakpoint, and let all threads that are
391 supposed to be running, run again. */
392 void start_step_over (lwp_info *lwp);
393
394 /* If there's a step over in progress, wait until all threads stop
395 (that is, until the stepping thread finishes its step), and
396 unsuspend all lwps. The stepping thread ends with its status
397 pending, which is processed later when we get back to processing
398 events. */
399 void complete_ongoing_step_over ();
400
401 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
402 start_step_over, if still there, and delete any single-step
403 breakpoints we've set, on non hardware single-step targets.
404 Return true if step over finished. */
405 bool finish_step_over (lwp_info *lwp);
406
407 /* When we finish a step-over, set threads running again. If there's
408 another thread that may need a step-over, now's the time to start
409 it. Eventually, we'll move all threads past their breakpoints. */
410 void proceed_all_lwps ();
411
412 /* The reason we resume in the caller, is because we want to be able
413 to pass lwp->status_pending as WSTAT, and we need to clear
414 status_pending_p before resuming, otherwise, resume_one_lwp
415 refuses to resume. */
416 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
417
418 /* Move THREAD out of the jump pad. */
419 void move_out_of_jump_pad (thread_info *thread);
420
421 /* Call low_arch_setup on THREAD. */
422 void arch_setup_thread (thread_info *thread);
423
424 #ifdef HAVE_LINUX_USRREGS
425 /* Fetch one register. */
426 void fetch_register (const usrregs_info *usrregs, regcache *regcache,
427 int regno);
428
429 /* Store one register. */
430 void store_register (const usrregs_info *usrregs, regcache *regcache,
431 int regno);
432 #endif
433
434 /* Fetch all registers, or just one, from the child process.
435 If REGNO is -1, do this for all registers, skipping any that are
436 assumed to have been retrieved by regsets_fetch_inferior_registers,
437 unless ALL is non-zero.
438 Otherwise, REGNO specifies which register (so we can save time). */
439 void usr_fetch_inferior_registers (const regs_info *regs_info,
440 regcache *regcache, int regno, int all);
441
442 /* Store our register values back into the inferior.
443 If REGNO is -1, do this for all registers, skipping any that are
444 assumed to have been saved by regsets_store_inferior_registers,
445 unless ALL is non-zero.
446 Otherwise, REGNO specifies which register (so we can save time). */
447 void usr_store_inferior_registers (const regs_info *regs_info,
448 regcache *regcache, int regno, int all);
449
450 /* Return the PC as read from the regcache of LWP, without any
451 adjustment. */
452 CORE_ADDR get_pc (lwp_info *lwp);
453
454 /* Called when the LWP stopped for a signal/trap. If it stopped for a
455 trap check what caused it (breakpoint, watchpoint, trace, etc.),
456 and save the result in the LWP's stop_reason field. If it stopped
457 for a breakpoint, decrement the PC if necessary on the lwp's
458 architecture. Returns true if we now have the LWP's stop PC. */
459 bool save_stop_reason (lwp_info *lwp);
460
461 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
462 SIGNAL is nonzero, give it that signal. */
463 void resume_one_lwp_throw (lwp_info *lwp, int step, int signal,
464 siginfo_t *info);
465
466 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
467 disappears while we try to resume it. */
468 void resume_one_lwp (lwp_info *lwp, int step, int signal, siginfo_t *info);
469
470 /* This function is called once per thread. We check the thread's
471 last resume request, which will tell us whether to resume, step, or
472 leave the thread stopped. Any signal the client requested to be
473 delivered has already been enqueued at this point.
474
475 If any thread that GDB wants running is stopped at an internal
476 breakpoint that needs stepping over, we start a step-over operation
477 on that particular thread, and leave all others stopped. */
478 void proceed_one_lwp (thread_info *thread, lwp_info *except);
479
480 /* This function is called once per thread. We check the thread's
481 resume request, which will tell us whether to resume, step, or
482 leave the thread stopped; and what signal, if any, it should be
483 sent.
484
485 For threads which we aren't explicitly told otherwise, we preserve
486 the stepping flag; this is used for stepping over gdbserver-placed
487 breakpoints.
488
489 If pending_flags was set in any thread, we queue any needed
490 signals, since we won't actually resume. We already have a pending
491 event to report, so we don't need to preserve any step requests;
492 they should be re-issued if necessary. */
493 void resume_one_thread (thread_info *thread, bool leave_all_stopped);
494
495 /* Return true if this lwp has an interesting status pending. */
496 bool status_pending_p_callback (thread_info *thread, ptid_t ptid);
497
498 /* Resume LWPs that are currently stopped without any pending status
499 to report, but are resumed from the core's perspective. */
500 void resume_stopped_resumed_lwps (thread_info *thread);
501
502 /* Unsuspend THREAD, except EXCEPT, and proceed. */
503 void unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except);
504
505 /* Return true if this lwp still has an interesting status pending.
506 If not (e.g., it had stopped for a breakpoint that is gone), return
507 false. */
508 bool thread_still_has_status_pending (thread_info *thread);
509
510 /* Return true if this lwp is to-be-resumed and has an interesting
511 status pending. */
512 bool resume_status_pending (thread_info *thread);
513
514 /* Return true if this lwp that GDB wants running is stopped at an
515 internal breakpoint that we need to step over. It assumes that
516 any required STOP_PC adjustment has already been propagated to
517 the inferior's regcache. */
518 bool thread_needs_step_over (thread_info *thread);
519
520 /* Single step via hardware or software single step.
521 Return 1 if hardware single stepping, 0 if software single stepping
522 or can't single step. */
523 int single_step (lwp_info* lwp);
524
525 /* Return true if THREAD is doing hardware single step. */
526 bool maybe_hw_step (thread_info *thread);
527
528 /* Install breakpoints for software single stepping. */
529 void install_software_single_step_breakpoints (lwp_info *lwp);
530
531 /* Fetch the possibly triggered data watchpoint info and store it in
532 CHILD.
533
534 On some archs, like x86, that use debug registers to set
535 watchpoints, it's possible that the way to know which watched
536 address trapped, is to check the register that is used to select
537 which address to watch. Problem is, between setting the watchpoint
538 and reading back which data address trapped, the user may change
539 the set of watchpoints, and, as a consequence, GDB changes the
540 debug registers in the inferior. To avoid reading back a stale
541 stopped-data-address when that happens, we cache in LP the fact
542 that a watchpoint trapped, and the corresponding data address, as
543 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
544 registers meanwhile, we have the cached data we can rely on. */
545 bool check_stopped_by_watchpoint (lwp_info *child);
546
547 /* Convert a native/host siginfo object, into/from the siginfo in the
548 layout of the inferiors' architecture. */
549 void siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo,
550 int direction);
551
552 /* Add a process to the common process list, and set its private
553 data. */
554 process_info *add_linux_process (int pid, int attached);
555
556 /* Add a new thread. */
557 lwp_info *add_lwp (ptid_t ptid);
558
559 /* Delete a thread. */
560 void delete_lwp (lwp_info *lwp);
561
562 public: /* Make this public because it's used from outside. */
563 /* Attach to an inferior process. Returns 0 on success, ERRNO on
564 error. */
565 int attach_lwp (ptid_t ptid);
566
567 private: /* Back to private. */
568 /* Detach from LWP. */
569 void detach_one_lwp (lwp_info *lwp);
570
571 /* Detect zombie thread group leaders, and "exit" them. We can't
572 reap their exits until all other threads in the group have
573 exited. */
574 void check_zombie_leaders ();
575
576 /* Convenience function that is called when the kernel reports an exit
577 event. This decides whether to report the event to GDB as a
578 process exit event, a thread exit event, or to suppress the
579 event. */
580 ptid_t filter_exit_event (lwp_info *event_child,
581 target_waitstatus *ourstatus);
582
583 /* Returns true if THREAD is stopped in a jump pad, and we can't
584 move it out, because we need to report the stop event to GDB. For
585 example, if the user puts a breakpoint in the jump pad, it's
586 because she wants to debug it. */
587 bool stuck_in_jump_pad (thread_info *thread);
588
589 /* Convenience wrapper. Returns information about LWP's fast tracepoint
590 collection status. */
591 fast_tpoint_collect_result linux_fast_tracepoint_collecting
592 (lwp_info *lwp, fast_tpoint_collect_status *status);
593
594 protected:
595 /* The architecture-specific "low" methods are listed below. */
596
597 /* Architecture-specific setup for the current thread. */
598 virtual void low_arch_setup () = 0;
599
600 /* Return false if we can fetch/store the register, true if we cannot
601 fetch/store the register. */
602 virtual bool low_cannot_fetch_register (int regno) = 0;
603
604 virtual bool low_cannot_store_register (int regno) = 0;
605
606 /* Hook to fetch a register in some non-standard way. Used for
607 example by backends that have read-only registers with hardcoded
608 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
609 REGNO was supplied, false if not, and we should fallback to the
610 standard ptrace methods. */
611 virtual bool low_fetch_register (regcache *regcache, int regno);
612
613 /* Return true if breakpoints are supported. Such targets must
614 implement the GET_PC and SET_PC methods. */
615 virtual bool low_supports_breakpoints ();
616
617 virtual CORE_ADDR low_get_pc (regcache *regcache);
618
619 virtual void low_set_pc (regcache *regcache, CORE_ADDR newpc);
620
621 /* Find the next possible PCs after the current instruction executes.
622 Targets that override this method should also override
623 'supports_software_single_step' to return true. */
624 virtual std::vector<CORE_ADDR> low_get_next_pcs (regcache *regcache);
625
626 /* Return true if there is a breakpoint at PC. */
627 virtual bool low_breakpoint_at (CORE_ADDR pc) = 0;
628
629 /* Breakpoint and watchpoint related functions. See target.h for
630 comments. */
631 virtual int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
632 int size, raw_breakpoint *bp);
633
634 virtual int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
635 int size, raw_breakpoint *bp);
636
637 virtual bool low_stopped_by_watchpoint ();
638
639 virtual CORE_ADDR low_stopped_data_address ();
640
641 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
642 for registers smaller than an xfer unit). */
643 virtual void low_collect_ptrace_register (regcache *regcache, int regno,
644 char *buf);
645
646 virtual void low_supply_ptrace_register (regcache *regcache, int regno,
647 const char *buf);
648
649 /* Hook to convert from target format to ptrace format and back.
650 Returns true if any conversion was done; false otherwise.
651 If DIRECTION is 1, then copy from INF to NATIVE.
652 If DIRECTION is 0, copy from NATIVE to INF. */
653 virtual bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
654 int direction);
655
656 /* Hook to call when a new process is created or attached to.
657 If extra per-process architecture-specific data is needed,
658 allocate it here. */
659 virtual arch_process_info *low_new_process ();
660
661 /* Hook to call when a process is being deleted. If extra per-process
662 architecture-specific data is needed, delete it here. */
663 virtual void low_delete_process (arch_process_info *info);
664
665 /* Hook to call when a new thread is detected.
666 If extra per-thread architecture-specific data is needed,
667 allocate it here. */
668 virtual void low_new_thread (lwp_info *);
669
670 /* Hook to call when a thread is being deleted. If extra per-thread
671 architecture-specific data is needed, delete it here. */
672 virtual void low_delete_thread (arch_lwp_info *);
673
674 /* Hook to call, if any, when a new fork is attached. */
675 virtual void low_new_fork (process_info *parent, process_info *child);
676
677 /* Hook to call prior to resuming a thread. */
678 virtual void low_prepare_to_resume (lwp_info *lwp);
679
680 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
681 success, -1 on failure. */
682 virtual int low_get_thread_area (int lwpid, CORE_ADDR *addrp);
683
684 /* Returns true if the low target supports range stepping. */
685 virtual bool low_supports_range_stepping ();
686
687 /* How many bytes the PC should be decremented after a break. */
688 virtual int low_decr_pc_after_break ();
689 };
690
691 extern linux_process_target *the_linux_target;
692
693 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
694 #define get_lwp_thread(lwp) ((lwp)->thread)
695
696 /* This struct is recorded in the target_data field of struct thread_info.
697
698 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
699 GDB protocol representation of the thread ID. Threads also have
700 a "process ID" (poorly named) which is (presently) the same as the
701 LWP ID.
702
703 There is also ``all_processes'' is keyed by the "overall process ID",
704 which GNU/Linux calls tgid, "thread group ID". */
705
706 struct lwp_info
707 {
708 /* Backlink to the parent object. */
709 struct thread_info *thread;
710
711 /* If this flag is set, the next SIGSTOP will be ignored (the
712 process will be immediately resumed). This means that either we
713 sent the SIGSTOP to it ourselves and got some other pending event
714 (so the SIGSTOP is still pending), or that we stopped the
715 inferior implicitly via PTRACE_ATTACH and have not waited for it
716 yet. */
717 int stop_expected;
718
719 /* When this is true, we shall not try to resume this thread, even
720 if last_resume_kind isn't resume_stop. */
721 int suspended;
722
723 /* If this flag is set, the lwp is known to be stopped right now (stop
724 event already received in a wait()). */
725 int stopped;
726
727 /* Signal whether we are in a SYSCALL_ENTRY or
728 in a SYSCALL_RETURN event.
729 Values:
730 - TARGET_WAITKIND_SYSCALL_ENTRY
731 - TARGET_WAITKIND_SYSCALL_RETURN */
732 enum target_waitkind syscall_state;
733
734 /* When stopped is set, the last wait status recorded for this lwp. */
735 int last_status;
736
737 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
738 this LWP's last event, to pass to GDB without any further
739 processing. This is used to store extended ptrace event
740 information or exit status until it can be reported to GDB. */
741 struct target_waitstatus waitstatus;
742
743 /* A pointer to the fork child/parent relative. Valid only while
744 the parent fork event is not reported to higher layers. Used to
745 avoid wildcard vCont actions resuming a fork child before GDB is
746 notified about the parent's fork event. */
747 struct lwp_info *fork_relative;
748
749 /* When stopped is set, this is where the lwp last stopped, with
750 decr_pc_after_break already accounted for. If the LWP is
751 running, this is the address at which the lwp was resumed. */
752 CORE_ADDR stop_pc;
753
754 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
755 been reported. */
756 int status_pending_p;
757 int status_pending;
758
759 /* The reason the LWP last stopped, if we need to track it
760 (breakpoint, watchpoint, etc.) */
761 enum target_stop_reason stop_reason;
762
763 /* On architectures where it is possible to know the data address of
764 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
765 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
766 is true. */
767 CORE_ADDR stopped_data_address;
768
769 /* If this is non-zero, it is a breakpoint to be reinserted at our next
770 stop (SIGTRAP stops only). */
771 CORE_ADDR bp_reinsert;
772
773 /* If this flag is set, the last continue operation at the ptrace
774 level on this process was a single-step. */
775 int stepping;
776
777 /* Range to single step within. This is a copy of the step range
778 passed along the last resume request. See 'struct
779 thread_resume'. */
780 CORE_ADDR step_range_start; /* Inclusive */
781 CORE_ADDR step_range_end; /* Exclusive */
782
783 /* If this flag is set, we need to set the event request flags the
784 next time we see this LWP stop. */
785 int must_set_ptrace_flags;
786
787 /* If this is non-zero, it points to a chain of signals which need to
788 be delivered to this process. */
789 struct pending_signals *pending_signals;
790
791 /* A link used when resuming. It is initialized from the resume request,
792 and then processed and cleared in linux_resume_one_lwp. */
793 struct thread_resume *resume;
794
795 /* Information bout this lwp's fast tracepoint collection status (is it
796 currently stopped in the jump pad, and if so, before or at/after the
797 relocated instruction). Normally, we won't care about this, but we will
798 if a signal arrives to this lwp while it is collecting. */
799 fast_tpoint_collect_result collecting_fast_tracepoint;
800
801 /* If this is non-zero, it points to a chain of signals which need
802 to be reported to GDB. These were deferred because the thread
803 was doing a fast tracepoint collect when they arrived. */
804 struct pending_signals *pending_signals_to_report;
805
806 /* When collecting_fast_tracepoint is first found to be 1, we insert
807 a exit-jump-pad-quickly breakpoint. This is it. */
808 struct breakpoint *exit_jump_pad_bkpt;
809
810 #ifdef USE_THREAD_DB
811 int thread_known;
812 /* The thread handle, used for e.g. TLS access. Only valid if
813 THREAD_KNOWN is set. */
814 td_thrhandle_t th;
815
816 /* The pthread_t handle. */
817 thread_t thread_handle;
818 #endif
819
820 /* Arch-specific additions. */
821 struct arch_lwp_info *arch_private;
822 };
823
824 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
825
826 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
827 errno). */
828 int linux_attach_lwp (ptid_t ptid);
829
830 struct lwp_info *find_lwp_pid (ptid_t ptid);
831 /* For linux_stop_lwp see nat/linux-nat.h. */
832
833 #ifdef HAVE_LINUX_REGSETS
834 void initialize_regsets_info (struct regsets_info *regsets_info);
835 #endif
836
837 void initialize_low_arch (void);
838
839 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
840 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
841
842 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
843 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
844
845 /* From thread-db.c */
846 int thread_db_init (void);
847 void thread_db_detach (struct process_info *);
848 void thread_db_mourn (struct process_info *);
849 int thread_db_handle_monitor_command (char *);
850 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
851 CORE_ADDR load_module, CORE_ADDR *address);
852 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
853
854 /* Called from linux-low.c when a clone event is detected. Upon entry,
855 both the clone and the parent should be stopped. This function does
856 whatever is required have the clone under thread_db's control. */
857
858 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
859
860 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
861
862 extern int have_ptrace_getregset;
863
864 /* Search for the value with type MATCH in the auxv vector with
865 entries of length WORDSIZE bytes. If found, store the value in
866 *VALP and return 1. If not found or if there is an error, return
867 0. */
868
869 int linux_get_auxv (int wordsize, CORE_ADDR match,
870 CORE_ADDR *valp);
871
872 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
873 WORDSIZE. If no entry was found, return zero. */
874
875 CORE_ADDR linux_get_hwcap (int wordsize);
876
877 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
878 WORDSIZE. If no entry was found, return zero. */
879
880 CORE_ADDR linux_get_hwcap2 (int wordsize);
881
882 #endif /* GDBSERVER_LINUX_LOW_H */
This page took 0.046826 seconds and 4 git commands to generate.