[gdbserver] linux-low.c: better starvation avoidance, handle non-stop mode too
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "nat/gdb_thread_db.h"
20 #include <signal.h>
21
22 #include "gdbthread.h"
23 #include "gdb_proc_service.h"
24
25 /* Included for ptrace type definitions. */
26 #include "nat/linux-ptrace.h"
27
28 #define PTRACE_XFER_TYPE long
29
30 #ifdef HAVE_LINUX_REGSETS
31 typedef void (*regset_fill_func) (struct regcache *, void *);
32 typedef void (*regset_store_func) (struct regcache *, const void *);
33 enum regset_type {
34 GENERAL_REGS,
35 FP_REGS,
36 EXTENDED_REGS,
37 };
38
39 struct regset_info
40 {
41 int get_request, set_request;
42 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
43 argument and the 4th argument should be "const struct iovec *". */
44 int nt_type;
45 int size;
46 enum regset_type type;
47 regset_fill_func fill_function;
48 regset_store_func store_function;
49 };
50
51 /* Aggregation of all the supported regsets of a given
52 architecture/mode. */
53
54 struct regsets_info
55 {
56 /* The regsets array. */
57 struct regset_info *regsets;
58
59 /* The number of regsets in the REGSETS array. */
60 int num_regsets;
61
62 /* If we get EIO on a regset, do not try it again. Note the set of
63 supported regsets may depend on processor mode on biarch
64 machines. This is a (lazily allocated) array holding one boolean
65 byte (0/1) per regset, with each element corresponding to the
66 regset in the REGSETS array above at the same offset. */
67 char *disabled_regsets;
68 };
69
70 #endif
71
72 /* Mapping between the general-purpose registers in `struct user'
73 format and GDB's register array layout. */
74
75 struct usrregs_info
76 {
77 /* The number of registers accessible. */
78 int num_regs;
79
80 /* The registers map. */
81 int *regmap;
82 };
83
84 /* All info needed to access an architecture/mode's registers. */
85
86 struct regs_info
87 {
88 /* Regset support bitmap: 1 for registers that are transferred as a part
89 of a regset, 0 for ones that need to be handled individually. This
90 can be NULL if all registers are transferred with regsets or regsets
91 are not supported. */
92 unsigned char *regset_bitmap;
93
94 /* Info used when accessing registers with PTRACE_PEEKUSER /
95 PTRACE_POKEUSER. This can be NULL if all registers are
96 transferred with regsets .*/
97 struct usrregs_info *usrregs;
98
99 #ifdef HAVE_LINUX_REGSETS
100 /* Info used when accessing registers with regsets. */
101 struct regsets_info *regsets_info;
102 #endif
103 };
104
105 struct process_info_private
106 {
107 /* Arch-specific additions. */
108 struct arch_process_info *arch_private;
109
110 /* libthread_db-specific additions. Not NULL if this process has loaded
111 thread_db, and it is active. */
112 struct thread_db *thread_db;
113
114 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
115 CORE_ADDR r_debug;
116
117 /* This flag is true iff we've just created or attached to the first
118 LWP of this process but it has not stopped yet. As soon as it
119 does, we need to call the low target's arch_setup callback. */
120 int new_inferior;
121 };
122
123 struct lwp_info;
124
125 struct linux_target_ops
126 {
127 /* Architecture-specific setup. */
128 void (*arch_setup) (void);
129
130 const struct regs_info *(*regs_info) (void);
131 int (*cannot_fetch_register) (int);
132
133 /* Returns 0 if we can store the register, 1 if we can not
134 store the register, and 2 if failure to store the register
135 is acceptable. */
136 int (*cannot_store_register) (int);
137
138 /* Hook to fetch a register in some non-standard way. Used for
139 example by backends that have read-only registers with hardcoded
140 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
141 REGNO was supplied, false if not, and we should fallback to the
142 standard ptrace methods. */
143 int (*fetch_register) (struct regcache *regcache, int regno);
144
145 CORE_ADDR (*get_pc) (struct regcache *regcache);
146 void (*set_pc) (struct regcache *regcache, CORE_ADDR newpc);
147 const unsigned char *breakpoint;
148 int breakpoint_len;
149 CORE_ADDR (*breakpoint_reinsert_addr) (void);
150
151 int decr_pc_after_break;
152 int (*breakpoint_at) (CORE_ADDR pc);
153
154 /* Breakpoint and watchpoint related functions. See target.h for
155 comments. */
156 int (*supports_z_point_type) (char z_type);
157 int (*insert_point) (enum raw_bkpt_type type, CORE_ADDR addr,
158 int size, struct raw_breakpoint *bp);
159 int (*remove_point) (enum raw_bkpt_type type, CORE_ADDR addr,
160 int size, struct raw_breakpoint *bp);
161
162 int (*stopped_by_watchpoint) (void);
163 CORE_ADDR (*stopped_data_address) (void);
164
165 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
166 for registers smaller than an xfer unit). */
167 void (*collect_ptrace_register) (struct regcache *regcache,
168 int regno, char *buf);
169 void (*supply_ptrace_register) (struct regcache *regcache,
170 int regno, const char *buf);
171
172 /* Hook to convert from target format to ptrace format and back.
173 Returns true if any conversion was done; false otherwise.
174 If DIRECTION is 1, then copy from INF to NATIVE.
175 If DIRECTION is 0, copy from NATIVE to INF. */
176 int (*siginfo_fixup) (siginfo_t *native, void *inf, int direction);
177
178 /* Hook to call when a new process is created or attached to.
179 If extra per-process architecture-specific data is needed,
180 allocate it here. */
181 struct arch_process_info * (*new_process) (void);
182
183 /* Hook to call when a new thread is detected.
184 If extra per-thread architecture-specific data is needed,
185 allocate it here. */
186 struct arch_lwp_info * (*new_thread) (void);
187
188 /* Hook to call prior to resuming a thread. */
189 void (*prepare_to_resume) (struct lwp_info *);
190
191 /* Hook to support target specific qSupported. */
192 void (*process_qsupported) (const char *);
193
194 /* Returns true if the low target supports tracepoints. */
195 int (*supports_tracepoints) (void);
196
197 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
198 success, -1 on failure. */
199 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
200
201 /* Install a fast tracepoint jump pad. See target.h for
202 comments. */
203 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
204 CORE_ADDR collector,
205 CORE_ADDR lockaddr,
206 ULONGEST orig_size,
207 CORE_ADDR *jump_entry,
208 CORE_ADDR *trampoline,
209 ULONGEST *trampoline_size,
210 unsigned char *jjump_pad_insn,
211 ULONGEST *jjump_pad_insn_size,
212 CORE_ADDR *adjusted_insn_addr,
213 CORE_ADDR *adjusted_insn_addr_end,
214 char *err);
215
216 /* Return the bytecode operations vector for the current inferior.
217 Returns NULL if bytecode compilation is not supported. */
218 struct emit_ops *(*emit_ops) (void);
219
220 /* Return the minimum length of an instruction that can be safely overwritten
221 for use as a fast tracepoint. */
222 int (*get_min_fast_tracepoint_insn_len) (void);
223
224 /* Returns true if the low target supports range stepping. */
225 int (*supports_range_stepping) (void);
226 };
227
228 extern struct linux_target_ops the_low_target;
229
230 #define get_thread_lwp(thr) ((struct lwp_info *) (inferior_target_data (thr)))
231 #define get_lwp_thread(lwp) ((lwp)->thread)
232
233 /* Reasons an LWP last stopped. */
234
235 enum lwp_stop_reason
236 {
237 /* Either not stopped, or stopped for a reason that doesn't require
238 special tracking. */
239 LWP_STOPPED_BY_NO_REASON,
240
241 /* Stopped by a software breakpoint. */
242 LWP_STOPPED_BY_SW_BREAKPOINT,
243
244 /* Stopped by a hardware breakpoint. */
245 LWP_STOPPED_BY_HW_BREAKPOINT,
246
247 /* Stopped by a watchpoint. */
248 LWP_STOPPED_BY_WATCHPOINT
249 };
250
251 /* This struct is recorded in the target_data field of struct thread_info.
252
253 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
254 GDB protocol representation of the thread ID. Threads also have
255 a "process ID" (poorly named) which is (presently) the same as the
256 LWP ID.
257
258 There is also ``all_processes'' is keyed by the "overall process ID",
259 which GNU/Linux calls tgid, "thread group ID". */
260
261 struct lwp_info
262 {
263 /* Backlink to the parent object. */
264 struct thread_info *thread;
265
266 /* If this flag is set, the next SIGSTOP will be ignored (the
267 process will be immediately resumed). This means that either we
268 sent the SIGSTOP to it ourselves and got some other pending event
269 (so the SIGSTOP is still pending), or that we stopped the
270 inferior implicitly via PTRACE_ATTACH and have not waited for it
271 yet. */
272 int stop_expected;
273
274 /* When this is true, we shall not try to resume this thread, even
275 if last_resume_kind isn't resume_stop. */
276 int suspended;
277
278 /* If this flag is set, the lwp is known to be stopped right now (stop
279 event already received in a wait()). */
280 int stopped;
281
282 /* If this flag is set, the lwp is known to be dead already (exit
283 event already received in a wait(), and is cached in
284 status_pending). */
285 int dead;
286
287 /* When stopped is set, the last wait status recorded for this lwp. */
288 int last_status;
289
290 /* When stopped is set, this is where the lwp last stopped, with
291 decr_pc_after_break already accounted for. If the LWP is
292 running, this is the address at which the lwp was resumed. */
293 CORE_ADDR stop_pc;
294
295 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
296 been reported. */
297 int status_pending_p;
298 int status_pending;
299
300 /* The reason the LWP last stopped, if we need to track it
301 (breakpoint, watchpoint, etc.) */
302 enum lwp_stop_reason stop_reason;
303
304 /* On architectures where it is possible to know the data address of
305 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
306 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
307 is true. */
308 CORE_ADDR stopped_data_address;
309
310 /* If this is non-zero, it is a breakpoint to be reinserted at our next
311 stop (SIGTRAP stops only). */
312 CORE_ADDR bp_reinsert;
313
314 /* If this flag is set, the last continue operation at the ptrace
315 level on this process was a single-step. */
316 int stepping;
317
318 /* Range to single step within. This is a copy of the step range
319 passed along the last resume request. See 'struct
320 thread_resume'. */
321 CORE_ADDR step_range_start; /* Inclusive */
322 CORE_ADDR step_range_end; /* Exclusive */
323
324 /* If this flag is set, we need to set the event request flags the
325 next time we see this LWP stop. */
326 int must_set_ptrace_flags;
327
328 /* If this is non-zero, it points to a chain of signals which need to
329 be delivered to this process. */
330 struct pending_signals *pending_signals;
331
332 /* A link used when resuming. It is initialized from the resume request,
333 and then processed and cleared in linux_resume_one_lwp. */
334 struct thread_resume *resume;
335
336 /* True if it is known that this lwp is presently collecting a fast
337 tracepoint (it is in the jump pad or in some code that will
338 return to the jump pad. Normally, we won't care about this, but
339 we will if a signal arrives to this lwp while it is
340 collecting. */
341 int collecting_fast_tracepoint;
342
343 /* If this is non-zero, it points to a chain of signals which need
344 to be reported to GDB. These were deferred because the thread
345 was doing a fast tracepoint collect when they arrived. */
346 struct pending_signals *pending_signals_to_report;
347
348 /* When collecting_fast_tracepoint is first found to be 1, we insert
349 a exit-jump-pad-quickly breakpoint. This is it. */
350 struct breakpoint *exit_jump_pad_bkpt;
351
352 /* True if the LWP was seen stop at an internal breakpoint and needs
353 stepping over later when it is resumed. */
354 int need_step_over;
355
356 #ifdef USE_THREAD_DB
357 int thread_known;
358 /* The thread handle, used for e.g. TLS access. Only valid if
359 THREAD_KNOWN is set. */
360 td_thrhandle_t th;
361 #endif
362
363 /* Arch-specific additions. */
364 struct arch_lwp_info *arch_private;
365 };
366
367 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
368
369 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
370 errno). */
371 int linux_attach_lwp (ptid_t ptid);
372
373 struct lwp_info *find_lwp_pid (ptid_t ptid);
374 void linux_stop_lwp (struct lwp_info *lwp);
375
376 #ifdef HAVE_LINUX_REGSETS
377 void initialize_regsets_info (struct regsets_info *regsets_info);
378 #endif
379
380 void initialize_low_arch (void);
381
382 /* From thread-db.c */
383 int thread_db_init (int use_events);
384 void thread_db_detach (struct process_info *);
385 void thread_db_mourn (struct process_info *);
386 int thread_db_handle_monitor_command (char *);
387 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
388 CORE_ADDR load_module, CORE_ADDR *address);
389 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
This page took 0.038613 seconds and 5 git commands to generate.