26dc831f874ffe6e5be98027a7d9eda3fdb1a4ac
[deliverable/binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #define PTRACE_XFER_TYPE long
35
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func) (struct regcache *, void *);
38 typedef void (*regset_store_func) (struct regcache *, const void *);
39 enum regset_type {
40 GENERAL_REGS,
41 FP_REGS,
42 EXTENDED_REGS,
43 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
44 };
45
46 /* The arch's regsets array initializer must be terminated with a NULL
47 regset. */
48 #define NULL_REGSET \
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
50
51 struct regset_info
52 {
53 int get_request, set_request;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
56 int nt_type;
57 int size;
58 enum regset_type type;
59 regset_fill_func fill_function;
60 regset_store_func store_function;
61 };
62
63 /* Aggregation of all the supported regsets of a given
64 architecture/mode. */
65
66 struct regsets_info
67 {
68 /* The regsets array. */
69 struct regset_info *regsets;
70
71 /* The number of regsets in the REGSETS array. */
72 int num_regsets;
73
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets;
80 };
81
82 #endif
83
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
86
87 struct usrregs_info
88 {
89 /* The number of registers accessible. */
90 int num_regs;
91
92 /* The registers map. */
93 int *regmap;
94 };
95
96 /* All info needed to access an architecture/mode's registers. */
97
98 struct regs_info
99 {
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap;
105
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info *usrregs;
110
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info *regsets_info;
114 #endif
115 };
116
117 struct process_info_private
118 {
119 /* Arch-specific additions. */
120 struct arch_process_info *arch_private;
121
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db *thread_db;
125
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
127 CORE_ADDR r_debug;
128 };
129
130 struct lwp_info;
131
132 struct linux_target_ops
133 {
134 const struct regs_info *(*regs_info) (void);
135
136 /* Return 0 if we can fetch/store the register, 1 if we cannot
137 fetch/store the register. */
138 int (*cannot_fetch_register) (int);
139 int (*cannot_store_register) (int);
140
141 /* Hook to fetch a register in some non-standard way. Used for
142 example by backends that have read-only registers with hardcoded
143 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
144 REGNO was supplied, false if not, and we should fallback to the
145 standard ptrace methods. */
146 int (*fetch_register) (struct regcache *regcache, int regno);
147
148 CORE_ADDR (*get_pc) (struct regcache *regcache);
149 void (*set_pc) (struct regcache *regcache, CORE_ADDR newpc);
150
151 /* See target.h for details. */
152 int (*breakpoint_kind_from_pc) (CORE_ADDR *pcptr);
153
154 /* See target.h for details. */
155 const gdb_byte *(*sw_breakpoint_from_kind) (int kind, int *size);
156
157 /* Find the next possible PCs after the current instruction executes. */
158 std::vector<CORE_ADDR> (*get_next_pcs) (struct regcache *regcache);
159
160 int decr_pc_after_break;
161 int (*breakpoint_at) (CORE_ADDR pc);
162
163 /* Breakpoint and watchpoint related functions. See target.h for
164 comments. */
165 int (*supports_z_point_type) (char z_type);
166 int (*insert_point) (enum raw_bkpt_type type, CORE_ADDR addr,
167 int size, struct raw_breakpoint *bp);
168 int (*remove_point) (enum raw_bkpt_type type, CORE_ADDR addr,
169 int size, struct raw_breakpoint *bp);
170
171 int (*stopped_by_watchpoint) (void);
172 CORE_ADDR (*stopped_data_address) (void);
173
174 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
175 for registers smaller than an xfer unit). */
176 void (*collect_ptrace_register) (struct regcache *regcache,
177 int regno, char *buf);
178 void (*supply_ptrace_register) (struct regcache *regcache,
179 int regno, const char *buf);
180
181 /* Hook to convert from target format to ptrace format and back.
182 Returns true if any conversion was done; false otherwise.
183 If DIRECTION is 1, then copy from INF to NATIVE.
184 If DIRECTION is 0, copy from NATIVE to INF. */
185 int (*siginfo_fixup) (siginfo_t *native, gdb_byte *inf, int direction);
186
187 /* Hook to call when a new process is created or attached to.
188 If extra per-process architecture-specific data is needed,
189 allocate it here. */
190 struct arch_process_info * (*new_process) (void);
191
192 /* Hook to call when a process is being deleted. If extra per-process
193 architecture-specific data is needed, delete it here. */
194 void (*delete_process) (struct arch_process_info *info);
195
196 /* Hook to call when a new thread is detected.
197 If extra per-thread architecture-specific data is needed,
198 allocate it here. */
199 void (*new_thread) (struct lwp_info *);
200
201 /* Hook to call when a thread is being deleted. If extra per-thread
202 architecture-specific data is needed, delete it here. */
203 void (*delete_thread) (struct arch_lwp_info *);
204
205 /* Hook to call, if any, when a new fork is attached. */
206 void (*new_fork) (struct process_info *parent, struct process_info *child);
207
208 /* Hook to call prior to resuming a thread. */
209 void (*prepare_to_resume) (struct lwp_info *);
210
211 /* Hook to support target specific qSupported. */
212 void (*process_qsupported) (char **, int count);
213
214 /* Returns true if the low target supports tracepoints. */
215 int (*supports_tracepoints) (void);
216
217 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
218 success, -1 on failure. */
219 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
220
221 /* Install a fast tracepoint jump pad. See target.h for
222 comments. */
223 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
224 CORE_ADDR collector,
225 CORE_ADDR lockaddr,
226 ULONGEST orig_size,
227 CORE_ADDR *jump_entry,
228 CORE_ADDR *trampoline,
229 ULONGEST *trampoline_size,
230 unsigned char *jjump_pad_insn,
231 ULONGEST *jjump_pad_insn_size,
232 CORE_ADDR *adjusted_insn_addr,
233 CORE_ADDR *adjusted_insn_addr_end,
234 char *err);
235
236 /* Return the bytecode operations vector for the current inferior.
237 Returns NULL if bytecode compilation is not supported. */
238 struct emit_ops *(*emit_ops) (void);
239
240 /* Return the minimum length of an instruction that can be safely overwritten
241 for use as a fast tracepoint. */
242 int (*get_min_fast_tracepoint_insn_len) (void);
243
244 /* Returns true if the low target supports range stepping. */
245 int (*supports_range_stepping) (void);
246
247 /* See target.h. */
248 int (*breakpoint_kind_from_current_state) (CORE_ADDR *pcptr);
249
250 /* See target.h. */
251 int (*supports_hardware_single_step) (void);
252
253 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
254 inferior is stopped due to SYSCALL_SIGTRAP. */
255 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
256
257 /* See target.h. */
258 int (*get_ipa_tdesc_idx) (void);
259 };
260
261 extern struct linux_target_ops the_low_target;
262
263 /* Target ops definitions for a Linux target. */
264
265 class linux_process_target : public process_stratum_target
266 {
267 public:
268
269 int create_inferior (const char *program,
270 const std::vector<char *> &program_args) override;
271
272 void post_create_inferior () override;
273
274 int attach (unsigned long pid) override;
275
276 int kill (process_info *proc) override;
277
278 int detach (process_info *proc) override;
279
280 void mourn (process_info *proc) override;
281
282 void join (int pid) override;
283
284 bool thread_alive (ptid_t pid) override;
285
286 void resume (thread_resume *resume_info, size_t n) override;
287
288 ptid_t wait (ptid_t ptid, target_waitstatus *status,
289 int options) override;
290
291 void fetch_registers (regcache *regcache, int regno) override;
292
293 void store_registers (regcache *regcache, int regno) override;
294
295 int prepare_to_access_memory () override;
296
297 void done_accessing_memory () override;
298
299 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
300 int len) override;
301
302 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
303 int len) override;
304
305 void look_up_symbols () override;
306
307 void request_interrupt () override;
308
309 bool supports_read_auxv () override;
310
311 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
312 unsigned int len) override;
313
314 bool supports_z_point_type (char z_type) override;
315
316 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
317 int size, raw_breakpoint *bp) override;
318
319 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
320 int size, raw_breakpoint *bp) override;
321
322 bool stopped_by_sw_breakpoint () override;
323
324 bool supports_stopped_by_sw_breakpoint () override;
325
326 bool stopped_by_hw_breakpoint () override;
327
328 bool supports_stopped_by_hw_breakpoint () override;
329
330 bool supports_hardware_single_step () override;
331
332 bool stopped_by_watchpoint () override;
333
334 CORE_ADDR stopped_data_address () override;
335
336 bool supports_read_offsets () override;
337
338 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
339
340 bool supports_get_tls_address () override;
341
342 int get_tls_address (thread_info *thread, CORE_ADDR offset,
343 CORE_ADDR load_module, CORE_ADDR *address) override;
344
345 bool supports_qxfer_osdata () override;
346
347 int qxfer_osdata (const char *annex, unsigned char *readbuf,
348 unsigned const char *writebuf,
349 CORE_ADDR offset, int len) override;
350
351 bool supports_qxfer_siginfo () override;
352
353 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
354 unsigned const char *writebuf,
355 CORE_ADDR offset, int len) override;
356
357 bool supports_non_stop () override;
358
359 bool async (bool enable) override;
360
361 int start_non_stop (bool enable) override;
362
363 bool supports_multi_process () override;
364
365 bool supports_fork_events () override;
366
367 bool supports_vfork_events () override;
368
369 bool supports_exec_events () override;
370
371 void handle_new_gdb_connection () override;
372
373 int handle_monitor_command (char *mon) override;
374
375 int core_of_thread (ptid_t ptid) override;
376
377 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
378 bool supports_read_loadmap () override;
379
380 int read_loadmap (const char *annex, CORE_ADDR offset,
381 unsigned char *myaddr, unsigned int len) override;
382 #endif
383
384 void process_qsupported (char **features, int count) override;
385
386 bool supports_tracepoints () override;
387
388 CORE_ADDR read_pc (regcache *regcache) override;
389
390 void write_pc (regcache *regcache, CORE_ADDR pc) override;
391
392 bool supports_thread_stopped () override;
393
394 bool thread_stopped (thread_info *thread) override;
395
396 void pause_all (bool freeze) override;
397
398 void unpause_all (bool unfreeze) override;
399
400 void stabilize_threads () override;
401
402 bool supports_fast_tracepoints () override;
403
404 int install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
405 CORE_ADDR tpaddr,
406 CORE_ADDR collector,
407 CORE_ADDR lockaddr,
408 ULONGEST orig_size,
409 CORE_ADDR *jump_entry,
410 CORE_ADDR *trampoline,
411 ULONGEST *trampoline_size,
412 unsigned char *jjump_pad_insn,
413 ULONGEST *jjump_pad_insn_size,
414 CORE_ADDR *adjusted_insn_addr,
415 CORE_ADDR *adjusted_insn_addr_end,
416 char *err) override;
417
418 int get_min_fast_tracepoint_insn_len () override;
419
420 struct emit_ops *emit_ops () override;
421
422 bool supports_disable_randomization () override;
423
424 bool supports_qxfer_libraries_svr4 () override;
425
426 int qxfer_libraries_svr4 (const char *annex,
427 unsigned char *readbuf,
428 unsigned const char *writebuf,
429 CORE_ADDR offset, int len) override;
430
431 bool supports_agent () override;
432
433 #ifdef HAVE_LINUX_BTRACE
434 btrace_target_info *enable_btrace (ptid_t ptid,
435 const btrace_config *conf) override;
436
437 int disable_btrace (btrace_target_info *tinfo) override;
438
439 int read_btrace (btrace_target_info *tinfo, buffer *buf,
440 enum btrace_read_type type) override;
441
442 int read_btrace_conf (const btrace_target_info *tinfo,
443 buffer *buf) override;
444 #endif
445
446 bool supports_range_stepping () override;
447
448 bool supports_pid_to_exec_file () override;
449
450 char *pid_to_exec_file (int pid) override;
451
452 bool supports_multifs () override;
453
454 int multifs_open (int pid, const char *filename, int flags,
455 mode_t mode) override;
456
457 int multifs_unlink (int pid, const char *filename) override;
458
459 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
460 size_t bufsiz) override;
461
462 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
463
464 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
465
466 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
467
468 const char *thread_name (ptid_t thread) override;
469
470 #if USE_THREAD_DB
471 bool thread_handle (ptid_t ptid, gdb_byte **handle,
472 int *handle_len) override;
473 #endif
474
475 bool supports_software_single_step () override;
476
477 bool supports_catch_syscall () override;
478
479 int get_ipa_tdesc_idx () override;
480
481 private:
482
483 /* Handle a GNU/Linux extended wait response. If we see a clone,
484 fork, or vfork event, we need to add the new LWP to our list
485 (and return 0 so as not to report the trap to higher layers).
486 If we see an exec event, we will modify ORIG_EVENT_LWP to point
487 to a new LWP representing the new program. */
488 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
489
490 /* Do low-level handling of the event, and check if we should go on
491 and pass it to caller code. Return the affected lwp if we are, or
492 NULL otherwise. */
493 lwp_info *filter_event (int lwpid, int wstat);
494
495 /* Wait for an event from child(ren) WAIT_PTID, and return any that
496 match FILTER_PTID (leaving others pending). The PTIDs can be:
497 minus_one_ptid, to specify any child; a pid PTID, specifying all
498 lwps of a thread group; or a PTID representing a single lwp. Store
499 the stop status through the status pointer WSTAT. OPTIONS is
500 passed to the waitpid call. Return 0 if no event was found and
501 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
502 was found. Return the PID of the stopped child otherwise. */
503 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
504 int *wstatp, int options);
505
506 /* Wait for an event from child(ren) PTID. PTIDs can be:
507 minus_one_ptid, to specify any child; a pid PTID, specifying all
508 lwps of a thread group; or a PTID representing a single lwp. Store
509 the stop status through the status pointer WSTAT. OPTIONS is
510 passed to the waitpid call. Return 0 if no event was found and
511 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
512 was found. Return the PID of the stopped child otherwise. */
513 int wait_for_event (ptid_t ptid, int *wstatp, int options);
514
515 /* Wait for all children to stop for the SIGSTOPs we just queued. */
516 void wait_for_sigstop ();
517
518 /* Wait for process, returns status. */
519 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
520 int target_options);
521
522 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
523 If SUSPEND, then also increase the suspend count of every LWP,
524 except EXCEPT. */
525 void stop_all_lwps (int suspend, lwp_info *except);
526
527 /* Stopped LWPs that the client wanted to be running, that don't have
528 pending statuses, are set to run again, except for EXCEPT, if not
529 NULL. This undoes a stop_all_lwps call. */
530 void unstop_all_lwps (int unsuspend, lwp_info *except);
531
532 /* Start a step-over operation on LWP. When LWP stopped at a
533 breakpoint, to make progress, we need to remove the breakpoint out
534 of the way. If we let other threads run while we do that, they may
535 pass by the breakpoint location and miss hitting it. To avoid
536 that, a step-over momentarily stops all threads while LWP is
537 single-stepped by either hardware or software while the breakpoint
538 is temporarily uninserted from the inferior. When the single-step
539 finishes, we reinsert the breakpoint, and let all threads that are
540 supposed to be running, run again. */
541 void start_step_over (lwp_info *lwp);
542
543 /* If there's a step over in progress, wait until all threads stop
544 (that is, until the stepping thread finishes its step), and
545 unsuspend all lwps. The stepping thread ends with its status
546 pending, which is processed later when we get back to processing
547 events. */
548 void complete_ongoing_step_over ();
549
550 /* When we finish a step-over, set threads running again. If there's
551 another thread that may need a step-over, now's the time to start
552 it. Eventually, we'll move all threads past their breakpoints. */
553 void proceed_all_lwps ();
554
555 /* The reason we resume in the caller, is because we want to be able
556 to pass lwp->status_pending as WSTAT, and we need to clear
557 status_pending_p before resuming, otherwise, resume_one_lwp
558 refuses to resume. */
559 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
560
561 /* Move THREAD out of the jump pad. */
562 void move_out_of_jump_pad (thread_info *thread);
563
564 /* Call low_arch_setup on THREAD. */
565 void arch_setup_thread (thread_info *thread);
566
567 protected:
568 /* The architecture-specific "low" methods are listed below. */
569
570 /* Architecture-specific setup for the current thread. */
571 virtual void low_arch_setup () = 0;
572 };
573
574 extern linux_process_target *the_linux_target;
575
576 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
577 #define get_lwp_thread(lwp) ((lwp)->thread)
578
579 /* This struct is recorded in the target_data field of struct thread_info.
580
581 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
582 GDB protocol representation of the thread ID. Threads also have
583 a "process ID" (poorly named) which is (presently) the same as the
584 LWP ID.
585
586 There is also ``all_processes'' is keyed by the "overall process ID",
587 which GNU/Linux calls tgid, "thread group ID". */
588
589 struct lwp_info
590 {
591 /* Backlink to the parent object. */
592 struct thread_info *thread;
593
594 /* If this flag is set, the next SIGSTOP will be ignored (the
595 process will be immediately resumed). This means that either we
596 sent the SIGSTOP to it ourselves and got some other pending event
597 (so the SIGSTOP is still pending), or that we stopped the
598 inferior implicitly via PTRACE_ATTACH and have not waited for it
599 yet. */
600 int stop_expected;
601
602 /* When this is true, we shall not try to resume this thread, even
603 if last_resume_kind isn't resume_stop. */
604 int suspended;
605
606 /* If this flag is set, the lwp is known to be stopped right now (stop
607 event already received in a wait()). */
608 int stopped;
609
610 /* Signal whether we are in a SYSCALL_ENTRY or
611 in a SYSCALL_RETURN event.
612 Values:
613 - TARGET_WAITKIND_SYSCALL_ENTRY
614 - TARGET_WAITKIND_SYSCALL_RETURN */
615 enum target_waitkind syscall_state;
616
617 /* When stopped is set, the last wait status recorded for this lwp. */
618 int last_status;
619
620 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
621 this LWP's last event, to pass to GDB without any further
622 processing. This is used to store extended ptrace event
623 information or exit status until it can be reported to GDB. */
624 struct target_waitstatus waitstatus;
625
626 /* A pointer to the fork child/parent relative. Valid only while
627 the parent fork event is not reported to higher layers. Used to
628 avoid wildcard vCont actions resuming a fork child before GDB is
629 notified about the parent's fork event. */
630 struct lwp_info *fork_relative;
631
632 /* When stopped is set, this is where the lwp last stopped, with
633 decr_pc_after_break already accounted for. If the LWP is
634 running, this is the address at which the lwp was resumed. */
635 CORE_ADDR stop_pc;
636
637 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
638 been reported. */
639 int status_pending_p;
640 int status_pending;
641
642 /* The reason the LWP last stopped, if we need to track it
643 (breakpoint, watchpoint, etc.) */
644 enum target_stop_reason stop_reason;
645
646 /* On architectures where it is possible to know the data address of
647 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
648 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
649 is true. */
650 CORE_ADDR stopped_data_address;
651
652 /* If this is non-zero, it is a breakpoint to be reinserted at our next
653 stop (SIGTRAP stops only). */
654 CORE_ADDR bp_reinsert;
655
656 /* If this flag is set, the last continue operation at the ptrace
657 level on this process was a single-step. */
658 int stepping;
659
660 /* Range to single step within. This is a copy of the step range
661 passed along the last resume request. See 'struct
662 thread_resume'. */
663 CORE_ADDR step_range_start; /* Inclusive */
664 CORE_ADDR step_range_end; /* Exclusive */
665
666 /* If this flag is set, we need to set the event request flags the
667 next time we see this LWP stop. */
668 int must_set_ptrace_flags;
669
670 /* If this is non-zero, it points to a chain of signals which need to
671 be delivered to this process. */
672 struct pending_signals *pending_signals;
673
674 /* A link used when resuming. It is initialized from the resume request,
675 and then processed and cleared in linux_resume_one_lwp. */
676 struct thread_resume *resume;
677
678 /* Information bout this lwp's fast tracepoint collection status (is it
679 currently stopped in the jump pad, and if so, before or at/after the
680 relocated instruction). Normally, we won't care about this, but we will
681 if a signal arrives to this lwp while it is collecting. */
682 fast_tpoint_collect_result collecting_fast_tracepoint;
683
684 /* If this is non-zero, it points to a chain of signals which need
685 to be reported to GDB. These were deferred because the thread
686 was doing a fast tracepoint collect when they arrived. */
687 struct pending_signals *pending_signals_to_report;
688
689 /* When collecting_fast_tracepoint is first found to be 1, we insert
690 a exit-jump-pad-quickly breakpoint. This is it. */
691 struct breakpoint *exit_jump_pad_bkpt;
692
693 #ifdef USE_THREAD_DB
694 int thread_known;
695 /* The thread handle, used for e.g. TLS access. Only valid if
696 THREAD_KNOWN is set. */
697 td_thrhandle_t th;
698
699 /* The pthread_t handle. */
700 thread_t thread_handle;
701 #endif
702
703 /* Arch-specific additions. */
704 struct arch_lwp_info *arch_private;
705 };
706
707 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
708
709 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
710 errno). */
711 int linux_attach_lwp (ptid_t ptid);
712
713 struct lwp_info *find_lwp_pid (ptid_t ptid);
714 /* For linux_stop_lwp see nat/linux-nat.h. */
715
716 #ifdef HAVE_LINUX_REGSETS
717 void initialize_regsets_info (struct regsets_info *regsets_info);
718 #endif
719
720 void initialize_low_arch (void);
721
722 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
723 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
724
725 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
726 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
727
728 /* From thread-db.c */
729 int thread_db_init (void);
730 void thread_db_detach (struct process_info *);
731 void thread_db_mourn (struct process_info *);
732 int thread_db_handle_monitor_command (char *);
733 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
734 CORE_ADDR load_module, CORE_ADDR *address);
735 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
736
737 /* Called from linux-low.c when a clone event is detected. Upon entry,
738 both the clone and the parent should be stopped. This function does
739 whatever is required have the clone under thread_db's control. */
740
741 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
742
743 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
744
745 extern int have_ptrace_getregset;
746
747 /* Search for the value with type MATCH in the auxv vector with
748 entries of length WORDSIZE bytes. If found, store the value in
749 *VALP and return 1. If not found or if there is an error, return
750 0. */
751
752 int linux_get_auxv (int wordsize, CORE_ADDR match,
753 CORE_ADDR *valp);
754
755 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
756 WORDSIZE. If no entry was found, return zero. */
757
758 CORE_ADDR linux_get_hwcap (int wordsize);
759
760 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
761 WORDSIZE. If no entry was found, return zero. */
762
763 CORE_ADDR linux_get_hwcap2 (int wordsize);
764
765 #endif /* GDBSERVER_LINUX_LOW_H */
This page took 0.065469 seconds and 3 git commands to generate.