1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2014 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "gdb_thread_db.h"
22 #include "gdbthread.h"
23 #include "gdb_proc_service.h"
25 /* Included for ptrace type definitions. */
26 #include "linux-ptrace.h"
28 #define PTRACE_XFER_TYPE long
30 #ifdef HAVE_LINUX_REGSETS
31 typedef void (*regset_fill_func
) (struct regcache
*, void *);
32 typedef void (*regset_store_func
) (struct regcache
*, const void *);
41 int get_request
, set_request
;
42 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
43 argument and the 4th argument should be "const struct iovec *". */
46 enum regset_type type
;
47 regset_fill_func fill_function
;
48 regset_store_func store_function
;
51 /* Aggregation of all the supported regsets of a given
56 /* The regsets array. */
57 struct regset_info
*regsets
;
59 /* The number of regsets in the REGSETS array. */
62 /* If we get EIO on a regset, do not try it again. Note the set of
63 supported regsets may depend on processor mode on biarch
64 machines. This is a (lazily allocated) array holding one boolean
65 byte (0/1) per regset, with each element corresponding to the
66 regset in the REGSETS array above at the same offset. */
67 char *disabled_regsets
;
72 /* Mapping between the general-purpose registers in `struct user'
73 format and GDB's register array layout. */
77 /* The number of registers accessible. */
80 /* The registers map. */
84 /* All info needed to access an architecture/mode's registers. */
88 /* Regset support bitmap: 1 for registers that are transferred as a part
89 of a regset, 0 for ones that need to be handled individually. This
90 can be NULL if all registers are transferred with regsets or regsets
92 unsigned char *regset_bitmap
;
94 /* Info used when accessing registers with PTRACE_PEEKUSER /
95 PTRACE_POKEUSER. This can be NULL if all registers are
96 transferred with regsets .*/
97 struct usrregs_info
*usrregs
;
99 #ifdef HAVE_LINUX_REGSETS
100 /* Info used when accessing registers with regsets. */
101 struct regsets_info
*regsets_info
;
105 struct process_info_private
107 /* Arch-specific additions. */
108 struct arch_process_info
*arch_private
;
110 /* libthread_db-specific additions. Not NULL if this process has loaded
111 thread_db, and it is active. */
112 struct thread_db
*thread_db
;
114 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
117 /* This flag is true iff we've just created or attached to the first
118 LWP of this process but it has not stopped yet. As soon as it
119 does, we need to call the low target's arch_setup callback. */
125 struct linux_target_ops
127 /* Architecture-specific setup. */
128 void (*arch_setup
) (void);
130 const struct regs_info
*(*regs_info
) (void);
131 int (*cannot_fetch_register
) (int);
133 /* Returns 0 if we can store the register, 1 if we can not
134 store the register, and 2 if failure to store the register
136 int (*cannot_store_register
) (int);
138 /* Hook to fetch a register in some non-standard way. Used for
139 example by backends that have read-only registers with hardcoded
140 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
141 REGNO was supplied, false if not, and we should fallback to the
142 standard ptrace methods. */
143 int (*fetch_register
) (struct regcache
*regcache
, int regno
);
145 CORE_ADDR (*get_pc
) (struct regcache
*regcache
);
146 void (*set_pc
) (struct regcache
*regcache
, CORE_ADDR newpc
);
147 const unsigned char *breakpoint
;
149 CORE_ADDR (*breakpoint_reinsert_addr
) (void);
151 int decr_pc_after_break
;
152 int (*breakpoint_at
) (CORE_ADDR pc
);
154 /* Breakpoint and watchpoint related functions. See target.h for
156 int (*insert_point
) (char type
, CORE_ADDR addr
, int len
);
157 int (*remove_point
) (char type
, CORE_ADDR addr
, int len
);
158 int (*stopped_by_watchpoint
) (void);
159 CORE_ADDR (*stopped_data_address
) (void);
161 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
162 for registers smaller than an xfer unit). */
163 void (*collect_ptrace_register
) (struct regcache
*regcache
,
164 int regno
, char *buf
);
165 void (*supply_ptrace_register
) (struct regcache
*regcache
,
166 int regno
, const char *buf
);
168 /* Hook to convert from target format to ptrace format and back.
169 Returns true if any conversion was done; false otherwise.
170 If DIRECTION is 1, then copy from INF to NATIVE.
171 If DIRECTION is 0, copy from NATIVE to INF. */
172 int (*siginfo_fixup
) (siginfo_t
*native
, void *inf
, int direction
);
174 /* Hook to call when a new process is created or attached to.
175 If extra per-process architecture-specific data is needed,
177 struct arch_process_info
* (*new_process
) (void);
179 /* Hook to call when a new thread is detected.
180 If extra per-thread architecture-specific data is needed,
182 struct arch_lwp_info
* (*new_thread
) (void);
184 /* Hook to call prior to resuming a thread. */
185 void (*prepare_to_resume
) (struct lwp_info
*);
187 /* Hook to support target specific qSupported. */
188 void (*process_qsupported
) (const char *);
190 /* Returns true if the low target supports tracepoints. */
191 int (*supports_tracepoints
) (void);
193 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
194 success, -1 on failure. */
195 int (*get_thread_area
) (int lwpid
, CORE_ADDR
*addrp
);
197 /* Install a fast tracepoint jump pad. See target.h for
199 int (*install_fast_tracepoint_jump_pad
) (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
203 CORE_ADDR
*jump_entry
,
204 CORE_ADDR
*trampoline
,
205 ULONGEST
*trampoline_size
,
206 unsigned char *jjump_pad_insn
,
207 ULONGEST
*jjump_pad_insn_size
,
208 CORE_ADDR
*adjusted_insn_addr
,
209 CORE_ADDR
*adjusted_insn_addr_end
,
212 /* Return the bytecode operations vector for the current inferior.
213 Returns NULL if bytecode compilation is not supported. */
214 struct emit_ops
*(*emit_ops
) (void);
216 /* Return the minimum length of an instruction that can be safely overwritten
217 for use as a fast tracepoint. */
218 int (*get_min_fast_tracepoint_insn_len
) (void);
220 /* Returns true if the low target supports range stepping. */
221 int (*supports_range_stepping
) (void);
224 extern struct linux_target_ops the_low_target
;
226 #define ptid_of(proc) ((proc)->head.id)
227 #define pid_of(proc) ptid_get_pid ((proc)->head.id)
228 #define lwpid_of(proc) ptid_get_lwp ((proc)->head.id)
230 #define get_lwp(inf) ((struct lwp_info *)(inf))
231 #define get_thread_lwp(thr) (get_lwp (inferior_target_data (thr)))
232 #define get_lwp_thread(proc) ((struct thread_info *) \
233 find_inferior_id (&all_threads, \
234 get_lwp (proc)->head.id))
238 struct inferior_list_entry head
;
240 /* If this flag is set, the next SIGSTOP will be ignored (the
241 process will be immediately resumed). This means that either we
242 sent the SIGSTOP to it ourselves and got some other pending event
243 (so the SIGSTOP is still pending), or that we stopped the
244 inferior implicitly via PTRACE_ATTACH and have not waited for it
248 /* When this is true, we shall not try to resume this thread, even
249 if last_resume_kind isn't resume_stop. */
252 /* If this flag is set, the lwp is known to be stopped right now (stop
253 event already received in a wait()). */
256 /* If this flag is set, the lwp is known to be dead already (exit
257 event already received in a wait(), and is cached in
261 /* When stopped is set, the last wait status recorded for this lwp. */
264 /* When stopped is set, this is where the lwp stopped, with
265 decr_pc_after_break already accounted for. */
268 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
270 int status_pending_p
;
273 /* STOPPED_BY_WATCHPOINT is non-zero if this LWP stopped with a data
275 int stopped_by_watchpoint
;
277 /* On architectures where it is possible to know the data address of
278 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
279 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
281 CORE_ADDR stopped_data_address
;
283 /* If this is non-zero, it is a breakpoint to be reinserted at our next
284 stop (SIGTRAP stops only). */
285 CORE_ADDR bp_reinsert
;
287 /* If this flag is set, the last continue operation at the ptrace
288 level on this process was a single-step. */
291 /* Range to single step within. This is a copy of the step range
292 passed along the last resume request. See 'struct
294 CORE_ADDR step_range_start
; /* Inclusive */
295 CORE_ADDR step_range_end
; /* Exclusive */
297 /* If this flag is set, we need to set the event request flags the
298 next time we see this LWP stop. */
299 int must_set_ptrace_flags
;
301 /* If this is non-zero, it points to a chain of signals which need to
302 be delivered to this process. */
303 struct pending_signals
*pending_signals
;
305 /* A link used when resuming. It is initialized from the resume request,
306 and then processed and cleared in linux_resume_one_lwp. */
307 struct thread_resume
*resume
;
309 /* True if it is known that this lwp is presently collecting a fast
310 tracepoint (it is in the jump pad or in some code that will
311 return to the jump pad. Normally, we won't care about this, but
312 we will if a signal arrives to this lwp while it is
314 int collecting_fast_tracepoint
;
316 /* If this is non-zero, it points to a chain of signals which need
317 to be reported to GDB. These were deferred because the thread
318 was doing a fast tracepoint collect when they arrived. */
319 struct pending_signals
*pending_signals_to_report
;
321 /* When collecting_fast_tracepoint is first found to be 1, we insert
322 a exit-jump-pad-quickly breakpoint. This is it. */
323 struct breakpoint
*exit_jump_pad_bkpt
;
325 /* True if the LWP was seen stop at an internal breakpoint and needs
326 stepping over later when it is resumed. */
331 /* The thread handle, used for e.g. TLS access. Only valid if
332 THREAD_KNOWN is set. */
336 /* Arch-specific additions. */
337 struct arch_lwp_info
*arch_private
;
340 extern struct inferior_list all_lwps
;
342 int linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
);
344 void linux_attach_lwp (unsigned long pid
);
345 struct lwp_info
*find_lwp_pid (ptid_t ptid
);
346 void linux_stop_lwp (struct lwp_info
*lwp
);
348 #ifdef HAVE_LINUX_REGSETS
349 void initialize_regsets_info (struct regsets_info
*regsets_info
);
352 void initialize_low_arch (void);
354 /* From thread-db.c */
355 int thread_db_init (int use_events
);
356 void thread_db_detach (struct process_info
*);
357 void thread_db_mourn (struct process_info
*);
358 int thread_db_handle_monitor_command (char *);
359 int thread_db_get_tls_address (struct thread_info
*thread
, CORE_ADDR offset
,
360 CORE_ADDR load_module
, CORE_ADDR
*address
);
361 int thread_db_look_up_one_symbol (const char *name
, CORE_ADDR
*addrp
);