1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2014 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "gdb_thread_db.h"
22 #include "gdbthread.h"
23 #include "gdb_proc_service.h"
25 /* Included for ptrace type definitions. */
26 #include "linux-ptrace.h"
28 #define PTRACE_XFER_TYPE long
30 #ifdef HAVE_LINUX_REGSETS
31 typedef void (*regset_fill_func
) (struct regcache
*, void *);
32 typedef void (*regset_store_func
) (struct regcache
*, const void *);
41 int get_request
, set_request
;
42 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
43 argument and the 4th argument should be "const struct iovec *". */
46 enum regset_type type
;
47 regset_fill_func fill_function
;
48 regset_store_func store_function
;
51 /* Aggregation of all the supported regsets of a given
56 /* The regsets array. */
57 struct regset_info
*regsets
;
59 /* The number of regsets in the REGSETS array. */
62 /* If we get EIO on a regset, do not try it again. Note the set of
63 supported regsets may depend on processor mode on biarch
64 machines. This is a (lazily allocated) array holding one boolean
65 byte (0/1) per regset, with each element corresponding to the
66 regset in the REGSETS array above at the same offset. */
67 char *disabled_regsets
;
72 /* Mapping between the general-purpose registers in `struct user'
73 format and GDB's register array layout. */
77 /* The number of registers accessible. */
80 /* The registers map. */
84 /* All info needed to access an architecture/mode's registers. */
88 /* Regset support bitmap: 1 for registers that are transferred as a part
89 of a regset, 0 for ones that need to be handled individually. This
90 can be NULL if all registers are transferred with regsets or regsets
92 unsigned char *regset_bitmap
;
94 /* Info used when accessing registers with PTRACE_PEEKUSER /
95 PTRACE_POKEUSER. This can be NULL if all registers are
96 transferred with regsets .*/
97 struct usrregs_info
*usrregs
;
99 #ifdef HAVE_LINUX_REGSETS
100 /* Info used when accessing registers with regsets. */
101 struct regsets_info
*regsets_info
;
105 struct process_info_private
107 /* Arch-specific additions. */
108 struct arch_process_info
*arch_private
;
110 /* libthread_db-specific additions. Not NULL if this process has loaded
111 thread_db, and it is active. */
112 struct thread_db
*thread_db
;
114 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
117 /* This flag is true iff we've just created or attached to the first
118 LWP of this process but it has not stopped yet. As soon as it
119 does, we need to call the low target's arch_setup callback. */
125 struct linux_target_ops
127 /* Architecture-specific setup. */
128 void (*arch_setup
) (void);
130 const struct regs_info
*(*regs_info
) (void);
131 int (*cannot_fetch_register
) (int);
133 /* Returns 0 if we can store the register, 1 if we can not
134 store the register, and 2 if failure to store the register
136 int (*cannot_store_register
) (int);
138 /* Hook to fetch a register in some non-standard way. Used for
139 example by backends that have read-only registers with hardcoded
140 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
141 REGNO was supplied, false if not, and we should fallback to the
142 standard ptrace methods. */
143 int (*fetch_register
) (struct regcache
*regcache
, int regno
);
145 CORE_ADDR (*get_pc
) (struct regcache
*regcache
);
146 void (*set_pc
) (struct regcache
*regcache
, CORE_ADDR newpc
);
147 const unsigned char *breakpoint
;
149 CORE_ADDR (*breakpoint_reinsert_addr
) (void);
151 int decr_pc_after_break
;
152 int (*breakpoint_at
) (CORE_ADDR pc
);
154 /* Breakpoint and watchpoint related functions. See target.h for
156 int (*insert_point
) (char type
, CORE_ADDR addr
, int len
);
157 int (*remove_point
) (char type
, CORE_ADDR addr
, int len
);
158 int (*stopped_by_watchpoint
) (void);
159 CORE_ADDR (*stopped_data_address
) (void);
161 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
162 for registers smaller than an xfer unit). */
163 void (*collect_ptrace_register
) (struct regcache
*regcache
,
164 int regno
, char *buf
);
165 void (*supply_ptrace_register
) (struct regcache
*regcache
,
166 int regno
, const char *buf
);
168 /* Hook to convert from target format to ptrace format and back.
169 Returns true if any conversion was done; false otherwise.
170 If DIRECTION is 1, then copy from INF to NATIVE.
171 If DIRECTION is 0, copy from NATIVE to INF. */
172 int (*siginfo_fixup
) (siginfo_t
*native
, void *inf
, int direction
);
174 /* Hook to call when a new process is created or attached to.
175 If extra per-process architecture-specific data is needed,
177 struct arch_process_info
* (*new_process
) (void);
179 /* Hook to call when a new thread is detected.
180 If extra per-thread architecture-specific data is needed,
182 struct arch_lwp_info
* (*new_thread
) (void);
184 /* Hook to call prior to resuming a thread. */
185 void (*prepare_to_resume
) (struct lwp_info
*);
187 /* Hook to support target specific qSupported. */
188 void (*process_qsupported
) (const char *);
190 /* Returns true if the low target supports tracepoints. */
191 int (*supports_tracepoints
) (void);
193 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
194 success, -1 on failure. */
195 int (*get_thread_area
) (int lwpid
, CORE_ADDR
*addrp
);
197 /* Install a fast tracepoint jump pad. See target.h for
199 int (*install_fast_tracepoint_jump_pad
) (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
203 CORE_ADDR
*jump_entry
,
204 CORE_ADDR
*trampoline
,
205 ULONGEST
*trampoline_size
,
206 unsigned char *jjump_pad_insn
,
207 ULONGEST
*jjump_pad_insn_size
,
208 CORE_ADDR
*adjusted_insn_addr
,
209 CORE_ADDR
*adjusted_insn_addr_end
,
212 /* Return the bytecode operations vector for the current inferior.
213 Returns NULL if bytecode compilation is not supported. */
214 struct emit_ops
*(*emit_ops
) (void);
216 /* Return the minimum length of an instruction that can be safely overwritten
217 for use as a fast tracepoint. */
218 int (*get_min_fast_tracepoint_insn_len
) (void);
220 /* Returns true if the low target supports range stepping. */
221 int (*supports_range_stepping
) (void);
224 extern struct linux_target_ops the_low_target
;
226 #define get_thread_lwp(thr) ((struct lwp_info *) (inferior_target_data (thr)))
227 #define get_lwp_thread(lwp) ((lwp)->thread)
229 /* This struct is recorded in the target_data field of struct thread_info.
231 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
232 GDB protocol representation of the thread ID. Threads also have
233 a "process ID" (poorly named) which is (presently) the same as the
236 There is also ``all_processes'' is keyed by the "overall process ID",
237 which GNU/Linux calls tgid, "thread group ID". */
241 /* Backlink to the parent object. */
242 struct thread_info
*thread
;
244 /* If this flag is set, the next SIGSTOP will be ignored (the
245 process will be immediately resumed). This means that either we
246 sent the SIGSTOP to it ourselves and got some other pending event
247 (so the SIGSTOP is still pending), or that we stopped the
248 inferior implicitly via PTRACE_ATTACH and have not waited for it
252 /* When this is true, we shall not try to resume this thread, even
253 if last_resume_kind isn't resume_stop. */
256 /* If this flag is set, the lwp is known to be stopped right now (stop
257 event already received in a wait()). */
260 /* If this flag is set, the lwp is known to be dead already (exit
261 event already received in a wait(), and is cached in
265 /* When stopped is set, the last wait status recorded for this lwp. */
268 /* When stopped is set, this is where the lwp stopped, with
269 decr_pc_after_break already accounted for. */
272 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
274 int status_pending_p
;
277 /* STOPPED_BY_WATCHPOINT is non-zero if this LWP stopped with a data
279 int stopped_by_watchpoint
;
281 /* On architectures where it is possible to know the data address of
282 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
283 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
285 CORE_ADDR stopped_data_address
;
287 /* If this is non-zero, it is a breakpoint to be reinserted at our next
288 stop (SIGTRAP stops only). */
289 CORE_ADDR bp_reinsert
;
291 /* If this flag is set, the last continue operation at the ptrace
292 level on this process was a single-step. */
295 /* Range to single step within. This is a copy of the step range
296 passed along the last resume request. See 'struct
298 CORE_ADDR step_range_start
; /* Inclusive */
299 CORE_ADDR step_range_end
; /* Exclusive */
301 /* If this flag is set, we need to set the event request flags the
302 next time we see this LWP stop. */
303 int must_set_ptrace_flags
;
305 /* If this is non-zero, it points to a chain of signals which need to
306 be delivered to this process. */
307 struct pending_signals
*pending_signals
;
309 /* A link used when resuming. It is initialized from the resume request,
310 and then processed and cleared in linux_resume_one_lwp. */
311 struct thread_resume
*resume
;
313 /* True if it is known that this lwp is presently collecting a fast
314 tracepoint (it is in the jump pad or in some code that will
315 return to the jump pad. Normally, we won't care about this, but
316 we will if a signal arrives to this lwp while it is
318 int collecting_fast_tracepoint
;
320 /* If this is non-zero, it points to a chain of signals which need
321 to be reported to GDB. These were deferred because the thread
322 was doing a fast tracepoint collect when they arrived. */
323 struct pending_signals
*pending_signals_to_report
;
325 /* When collecting_fast_tracepoint is first found to be 1, we insert
326 a exit-jump-pad-quickly breakpoint. This is it. */
327 struct breakpoint
*exit_jump_pad_bkpt
;
329 /* True if the LWP was seen stop at an internal breakpoint and needs
330 stepping over later when it is resumed. */
335 /* The thread handle, used for e.g. TLS access. Only valid if
336 THREAD_KNOWN is set. */
340 /* Arch-specific additions. */
341 struct arch_lwp_info
*arch_private
;
344 int linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
);
346 void linux_attach_lwp (unsigned long pid
);
347 struct lwp_info
*find_lwp_pid (ptid_t ptid
);
348 void linux_stop_lwp (struct lwp_info
*lwp
);
350 #ifdef HAVE_LINUX_REGSETS
351 void initialize_regsets_info (struct regsets_info
*regsets_info
);
354 void initialize_low_arch (void);
356 /* From thread-db.c */
357 int thread_db_init (int use_events
);
358 void thread_db_detach (struct process_info
*);
359 void thread_db_mourn (struct process_info
*);
360 int thread_db_handle_monitor_command (char *);
361 int thread_db_get_tls_address (struct thread_info
*thread
, CORE_ADDR offset
,
362 CORE_ADDR load_module
, CORE_ADDR
*address
);
363 int thread_db_look_up_one_symbol (const char *name
, CORE_ADDR
*addrp
);