musl: Move W_STOPCODE to common/gdb_wait.h.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
32d0add0 2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
58b4daa5 22#include "agent.h"
de0d863e 23#include "tdesc.h"
b20a6524 24#include "rsp-low.h"
da6d8c04 25
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
8bdce1ff 28#include "gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
602e3198 46#include "filestuff.h"
c144c7a0 47#include "tracepoint.h"
533b0600 48#include "hostio.h"
957f3f49
DE
49#ifndef ELFMAG0
50/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54#include <elf.h>
55#endif
14d2069a 56#include "nat/linux-namespaces.h"
efcbbd14
UW
57
58#ifndef SPUFS_MAGIC
59#define SPUFS_MAGIC 0x23c9b64e
60#endif
da6d8c04 61
03583c20
UW
62#ifdef HAVE_PERSONALITY
63# include <sys/personality.h>
64# if !HAVE_DECL_ADDR_NO_RANDOMIZE
65# define ADDR_NO_RANDOMIZE 0x0040000
66# endif
67#endif
68
fd462a61
DJ
69#ifndef O_LARGEFILE
70#define O_LARGEFILE 0
71#endif
72
1a981360
PA
73/* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75#ifndef __SIGRTMIN
76#define __SIGRTMIN 32
77#endif
78
db0dfaa0
LM
79/* Some targets did not define these ptrace constants from the start,
80 so gdbserver defines them locally here. In the future, these may
81 be removed after they are added to asm/ptrace.h. */
82#if !(defined(PT_TEXT_ADDR) \
83 || defined(PT_DATA_ADDR) \
84 || defined(PT_TEXT_END_ADDR))
85#if defined(__mcoldfire__)
86/* These are still undefined in 3.10 kernels. */
87#define PT_TEXT_ADDR 49*4
88#define PT_DATA_ADDR 50*4
89#define PT_TEXT_END_ADDR 51*4
90/* BFIN already defines these since at least 2.6.32 kernels. */
91#elif defined(BFIN)
92#define PT_TEXT_ADDR 220
93#define PT_TEXT_END_ADDR 224
94#define PT_DATA_ADDR 228
95/* These are still undefined in 3.10 kernels. */
96#elif defined(__TMS320C6X__)
97#define PT_TEXT_ADDR (0x10000*4)
98#define PT_DATA_ADDR (0x10004*4)
99#define PT_TEXT_END_ADDR (0x10008*4)
100#endif
101#endif
102
9accd112 103#ifdef HAVE_LINUX_BTRACE
125f8a3d 104# include "nat/linux-btrace.h"
734b0e4b 105# include "btrace-common.h"
9accd112
MM
106#endif
107
8365dcf5
TJB
108#ifndef HAVE_ELF32_AUXV_T
109/* Copied from glibc's elf.h. */
110typedef struct
111{
112 uint32_t a_type; /* Entry type */
113 union
114 {
115 uint32_t a_val; /* Integer value */
116 /* We use to have pointer elements added here. We cannot do that,
117 though, since it does not work when using 32-bit definitions
118 on 64-bit platforms and vice versa. */
119 } a_un;
120} Elf32_auxv_t;
121#endif
122
123#ifndef HAVE_ELF64_AUXV_T
124/* Copied from glibc's elf.h. */
125typedef struct
126{
127 uint64_t a_type; /* Entry type */
128 union
129 {
130 uint64_t a_val; /* Integer value */
131 /* We use to have pointer elements added here. We cannot do that,
132 though, since it does not work when using 32-bit definitions
133 on 64-bit platforms and vice versa. */
134 } a_un;
135} Elf64_auxv_t;
136#endif
137
ded48a5e
YQ
138/* Does the current host support PTRACE_GETREGSET? */
139int have_ptrace_getregset = -1;
140
cff068da
GB
141/* LWP accessors. */
142
143/* See nat/linux-nat.h. */
144
145ptid_t
146ptid_of_lwp (struct lwp_info *lwp)
147{
148 return ptid_of (get_lwp_thread (lwp));
149}
150
151/* See nat/linux-nat.h. */
152
4b134ca1
GB
153void
154lwp_set_arch_private_info (struct lwp_info *lwp,
155 struct arch_lwp_info *info)
156{
157 lwp->arch_private = info;
158}
159
160/* See nat/linux-nat.h. */
161
162struct arch_lwp_info *
163lwp_arch_private_info (struct lwp_info *lwp)
164{
165 return lwp->arch_private;
166}
167
168/* See nat/linux-nat.h. */
169
cff068da
GB
170int
171lwp_is_stopped (struct lwp_info *lwp)
172{
173 return lwp->stopped;
174}
175
176/* See nat/linux-nat.h. */
177
178enum target_stop_reason
179lwp_stop_reason (struct lwp_info *lwp)
180{
181 return lwp->stop_reason;
182}
183
05044653
PA
184/* A list of all unknown processes which receive stop signals. Some
185 other process will presumably claim each of these as forked
186 children momentarily. */
24a09b5f 187
05044653
PA
188struct simple_pid_list
189{
190 /* The process ID. */
191 int pid;
192
193 /* The status as reported by waitpid. */
194 int status;
195
196 /* Next in chain. */
197 struct simple_pid_list *next;
198};
199struct simple_pid_list *stopped_pids;
200
201/* Trivial list manipulation functions to keep track of a list of new
202 stopped processes. */
203
204static void
205add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
206{
8d749320 207 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
208
209 new_pid->pid = pid;
210 new_pid->status = status;
211 new_pid->next = *listp;
212 *listp = new_pid;
213}
214
215static int
216pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
217{
218 struct simple_pid_list **p;
219
220 for (p = listp; *p != NULL; p = &(*p)->next)
221 if ((*p)->pid == pid)
222 {
223 struct simple_pid_list *next = (*p)->next;
224
225 *statusp = (*p)->status;
226 xfree (*p);
227 *p = next;
228 return 1;
229 }
230 return 0;
231}
24a09b5f 232
bde24c0a
PA
233enum stopping_threads_kind
234 {
235 /* Not stopping threads presently. */
236 NOT_STOPPING_THREADS,
237
238 /* Stopping threads. */
239 STOPPING_THREADS,
240
241 /* Stopping and suspending threads. */
242 STOPPING_AND_SUSPENDING_THREADS
243 };
244
245/* This is set while stop_all_lwps is in effect. */
246enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
247
248/* FIXME make into a target method? */
24a09b5f 249int using_threads = 1;
24a09b5f 250
fa593d66
PA
251/* True if we're presently stabilizing threads (moving them out of
252 jump pads). */
253static int stabilizing_threads;
254
2acc282a 255static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 256 int step, int signal, siginfo_t *info);
2bd7c093 257static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
258static void stop_all_lwps (int suspend, struct lwp_info *except);
259static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
fa96cb38
PA
260static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
261 int *wstat, int options);
95954743 262static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 263static struct lwp_info *add_lwp (ptid_t ptid);
94585166 264static void linux_mourn (struct process_info *process);
c35fafde 265static int linux_stopped_by_watchpoint (void);
95954743 266static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 267static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 268static void proceed_all_lwps (void);
d50171e4 269static int finish_step_over (struct lwp_info *lwp);
d50171e4 270static int kill_lwp (unsigned long lwpid, int signo);
863d01bd
PA
271static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
272static void complete_ongoing_step_over (void);
d50171e4 273
582511be
PA
274/* When the event-loop is doing a step-over, this points at the thread
275 being stepped. */
276ptid_t step_over_bkpt;
277
d50171e4
PA
278/* True if the low target can hardware single-step. Such targets
279 don't need a BREAKPOINT_REINSERT_ADDR callback. */
280
281static int
282can_hardware_single_step (void)
283{
284 return (the_low_target.breakpoint_reinsert_addr == NULL);
285}
286
287/* True if the low target supports memory breakpoints. If so, we'll
288 have a GET_PC implementation. */
289
290static int
291supports_breakpoints (void)
292{
293 return (the_low_target.get_pc != NULL);
294}
0d62e5e8 295
fa593d66
PA
296/* Returns true if this target can support fast tracepoints. This
297 does not mean that the in-process agent has been loaded in the
298 inferior. */
299
300static int
301supports_fast_tracepoints (void)
302{
303 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
304}
305
c2d6af84
PA
306/* True if LWP is stopped in its stepping range. */
307
308static int
309lwp_in_step_range (struct lwp_info *lwp)
310{
311 CORE_ADDR pc = lwp->stop_pc;
312
313 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
314}
315
0d62e5e8
DJ
316struct pending_signals
317{
318 int signal;
32ca6d61 319 siginfo_t info;
0d62e5e8
DJ
320 struct pending_signals *prev;
321};
611cb4a5 322
bd99dc85
PA
323/* The read/write ends of the pipe registered as waitable file in the
324 event loop. */
325static int linux_event_pipe[2] = { -1, -1 };
326
327/* True if we're currently in async mode. */
328#define target_is_async_p() (linux_event_pipe[0] != -1)
329
02fc4de7 330static void send_sigstop (struct lwp_info *lwp);
fa96cb38 331static void wait_for_sigstop (void);
bd99dc85 332
d0722149
DE
333/* Return non-zero if HEADER is a 64-bit ELF file. */
334
335static int
214d508e 336elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 337{
214d508e
L
338 if (header->e_ident[EI_MAG0] == ELFMAG0
339 && header->e_ident[EI_MAG1] == ELFMAG1
340 && header->e_ident[EI_MAG2] == ELFMAG2
341 && header->e_ident[EI_MAG3] == ELFMAG3)
342 {
343 *machine = header->e_machine;
344 return header->e_ident[EI_CLASS] == ELFCLASS64;
345
346 }
347 *machine = EM_NONE;
348 return -1;
d0722149
DE
349}
350
351/* Return non-zero if FILE is a 64-bit ELF file,
352 zero if the file is not a 64-bit ELF file,
353 and -1 if the file is not accessible or doesn't exist. */
354
be07f1a2 355static int
214d508e 356elf_64_file_p (const char *file, unsigned int *machine)
d0722149 357{
957f3f49 358 Elf64_Ehdr header;
d0722149
DE
359 int fd;
360
361 fd = open (file, O_RDONLY);
362 if (fd < 0)
363 return -1;
364
365 if (read (fd, &header, sizeof (header)) != sizeof (header))
366 {
367 close (fd);
368 return 0;
369 }
370 close (fd);
371
214d508e 372 return elf_64_header_p (&header, machine);
d0722149
DE
373}
374
be07f1a2
PA
375/* Accepts an integer PID; Returns true if the executable PID is
376 running is a 64-bit ELF file.. */
377
378int
214d508e 379linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 380{
d8d2a3ee 381 char file[PATH_MAX];
be07f1a2
PA
382
383 sprintf (file, "/proc/%d/exe", pid);
214d508e 384 return elf_64_file_p (file, machine);
be07f1a2
PA
385}
386
bd99dc85
PA
387static void
388delete_lwp (struct lwp_info *lwp)
389{
fa96cb38
PA
390 struct thread_info *thr = get_lwp_thread (lwp);
391
392 if (debug_threads)
393 debug_printf ("deleting %ld\n", lwpid_of (thr));
394
395 remove_thread (thr);
aa5ca48f 396 free (lwp->arch_private);
bd99dc85
PA
397 free (lwp);
398}
399
95954743
PA
400/* Add a process to the common process list, and set its private
401 data. */
402
403static struct process_info *
404linux_add_process (int pid, int attached)
405{
406 struct process_info *proc;
407
95954743 408 proc = add_process (pid, attached);
8d749320 409 proc->priv = XCNEW (struct process_info_private);
95954743 410
aa5ca48f 411 if (the_low_target.new_process != NULL)
fe978cb0 412 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 413
95954743
PA
414 return proc;
415}
416
582511be
PA
417static CORE_ADDR get_pc (struct lwp_info *lwp);
418
94585166
DB
419/* Implement the arch_setup target_ops method. */
420
421static void
422linux_arch_setup (void)
423{
424 the_low_target.arch_setup ();
425}
426
427/* Call the target arch_setup function on THREAD. */
428
429static void
430linux_arch_setup_thread (struct thread_info *thread)
431{
432 struct thread_info *saved_thread;
433
434 saved_thread = current_thread;
435 current_thread = thread;
436
437 linux_arch_setup ();
438
439 current_thread = saved_thread;
440}
441
442/* Handle a GNU/Linux extended wait response. If we see a clone,
443 fork, or vfork event, we need to add the new LWP to our list
444 (and return 0 so as not to report the trap to higher layers).
445 If we see an exec event, we will modify ORIG_EVENT_LWP to point
446 to a new LWP representing the new program. */
0d62e5e8 447
de0d863e 448static int
94585166 449handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
24a09b5f 450{
94585166 451 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 452 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 453 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 454 struct lwp_info *new_lwp;
24a09b5f 455
c269dbdb
DB
456 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
457 || (event == PTRACE_EVENT_CLONE))
24a09b5f 458 {
95954743 459 ptid_t ptid;
24a09b5f 460 unsigned long new_pid;
05044653 461 int ret, status;
24a09b5f 462
de0d863e 463 /* Get the pid of the new lwp. */
d86d4aaf 464 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 465 &new_pid);
24a09b5f
DJ
466
467 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 468 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
469 {
470 /* The new child has a pending SIGSTOP. We can't affect it until it
471 hits the SIGSTOP, but we're already attached. */
472
97438e3f 473 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
474
475 if (ret == -1)
476 perror_with_name ("waiting for new child");
477 else if (ret != new_pid)
478 warning ("wait returned unexpected PID %d", ret);
da5898ce 479 else if (!WIFSTOPPED (status))
24a09b5f
DJ
480 warning ("wait returned unexpected status 0x%x", status);
481 }
482
c269dbdb 483 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
484 {
485 struct process_info *parent_proc;
486 struct process_info *child_proc;
487 struct lwp_info *child_lwp;
bfacd19d 488 struct thread_info *child_thr;
de0d863e
DB
489 struct target_desc *tdesc;
490
491 ptid = ptid_build (new_pid, new_pid, 0);
492
493 if (debug_threads)
494 {
495 debug_printf ("HEW: Got fork event from LWP %ld, "
496 "new child is %d\n",
497 ptid_get_lwp (ptid_of (event_thr)),
498 ptid_get_pid (ptid));
499 }
500
501 /* Add the new process to the tables and clone the breakpoint
502 lists of the parent. We need to do this even if the new process
503 will be detached, since we will need the process object and the
504 breakpoints to remove any breakpoints from memory when we
505 detach, and the client side will access registers. */
506 child_proc = linux_add_process (new_pid, 0);
507 gdb_assert (child_proc != NULL);
508 child_lwp = add_lwp (ptid);
509 gdb_assert (child_lwp != NULL);
510 child_lwp->stopped = 1;
bfacd19d
DB
511 child_lwp->must_set_ptrace_flags = 1;
512 child_lwp->status_pending_p = 0;
513 child_thr = get_lwp_thread (child_lwp);
514 child_thr->last_resume_kind = resume_stop;
998d452a
PA
515 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
516
863d01bd
PA
517 /* If we're suspending all threads, leave this one suspended
518 too. */
519 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
520 {
521 if (debug_threads)
522 debug_printf ("HEW: leaving child suspended\n");
523 child_lwp->suspended = 1;
524 }
525
de0d863e
DB
526 parent_proc = get_thread_process (event_thr);
527 child_proc->attached = parent_proc->attached;
528 clone_all_breakpoints (&child_proc->breakpoints,
529 &child_proc->raw_breakpoints,
530 parent_proc->breakpoints);
531
8d749320 532 tdesc = XNEW (struct target_desc);
de0d863e
DB
533 copy_target_description (tdesc, parent_proc->tdesc);
534 child_proc->tdesc = tdesc;
de0d863e 535
3a8a0396
DB
536 /* Clone arch-specific process data. */
537 if (the_low_target.new_fork != NULL)
538 the_low_target.new_fork (parent_proc, child_proc);
539
de0d863e 540 /* Save fork info in the parent thread. */
c269dbdb
DB
541 if (event == PTRACE_EVENT_FORK)
542 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
543 else if (event == PTRACE_EVENT_VFORK)
544 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
545
de0d863e 546 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 547
de0d863e
DB
548 /* The status_pending field contains bits denoting the
549 extended event, so when the pending event is handled,
550 the handler will look at lwp->waitstatus. */
551 event_lwp->status_pending_p = 1;
552 event_lwp->status_pending = wstat;
553
554 /* Report the event. */
555 return 0;
556 }
557
fa96cb38
PA
558 if (debug_threads)
559 debug_printf ("HEW: Got clone event "
560 "from LWP %ld, new child is LWP %ld\n",
561 lwpid_of (event_thr), new_pid);
562
d86d4aaf 563 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
b3312d80 564 new_lwp = add_lwp (ptid);
24a09b5f 565
e27d73f6
DE
566 /* Either we're going to immediately resume the new thread
567 or leave it stopped. linux_resume_one_lwp is a nop if it
568 thinks the thread is currently running, so set this first
569 before calling linux_resume_one_lwp. */
570 new_lwp->stopped = 1;
571
bde24c0a
PA
572 /* If we're suspending all threads, leave this one suspended
573 too. */
574 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
575 new_lwp->suspended = 1;
576
da5898ce
DJ
577 /* Normally we will get the pending SIGSTOP. But in some cases
578 we might get another signal delivered to the group first.
f21cc1a2 579 If we do get another signal, be sure not to lose it. */
20ba1ce6 580 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 581 {
54a0b537 582 new_lwp->stop_expected = 1;
20ba1ce6
PA
583 new_lwp->status_pending_p = 1;
584 new_lwp->status_pending = status;
da5898ce 585 }
de0d863e
DB
586
587 /* Don't report the event. */
588 return 1;
24a09b5f 589 }
c269dbdb
DB
590 else if (event == PTRACE_EVENT_VFORK_DONE)
591 {
592 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
593
594 /* Report the event. */
595 return 0;
596 }
94585166
DB
597 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
598 {
599 struct process_info *proc;
600 ptid_t event_ptid;
601 pid_t event_pid;
602
603 if (debug_threads)
604 {
605 debug_printf ("HEW: Got exec event from LWP %ld\n",
606 lwpid_of (event_thr));
607 }
608
609 /* Get the event ptid. */
610 event_ptid = ptid_of (event_thr);
611 event_pid = ptid_get_pid (event_ptid);
612
613 /* Delete the execing process and all its threads. */
614 proc = get_thread_process (event_thr);
615 linux_mourn (proc);
616 current_thread = NULL;
617
618 /* Create a new process/lwp/thread. */
619 proc = linux_add_process (event_pid, 0);
620 event_lwp = add_lwp (event_ptid);
621 event_thr = get_lwp_thread (event_lwp);
622 gdb_assert (current_thread == event_thr);
623 linux_arch_setup_thread (event_thr);
624
625 /* Set the event status. */
626 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
627 event_lwp->waitstatus.value.execd_pathname
628 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
629
630 /* Mark the exec status as pending. */
631 event_lwp->stopped = 1;
632 event_lwp->status_pending_p = 1;
633 event_lwp->status_pending = wstat;
634 event_thr->last_resume_kind = resume_continue;
635 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
636
637 /* Report the event. */
638 *orig_event_lwp = event_lwp;
639 return 0;
640 }
de0d863e
DB
641
642 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
643}
644
d50171e4
PA
645/* Return the PC as read from the regcache of LWP, without any
646 adjustment. */
647
648static CORE_ADDR
649get_pc (struct lwp_info *lwp)
650{
0bfdf32f 651 struct thread_info *saved_thread;
d50171e4
PA
652 struct regcache *regcache;
653 CORE_ADDR pc;
654
655 if (the_low_target.get_pc == NULL)
656 return 0;
657
0bfdf32f
GB
658 saved_thread = current_thread;
659 current_thread = get_lwp_thread (lwp);
d50171e4 660
0bfdf32f 661 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
662 pc = (*the_low_target.get_pc) (regcache);
663
664 if (debug_threads)
87ce2a04 665 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 666
0bfdf32f 667 current_thread = saved_thread;
d50171e4
PA
668 return pc;
669}
670
671/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
672 The SIGTRAP could mean several things.
673
674 On i386, where decr_pc_after_break is non-zero:
582511be
PA
675
676 If we were single-stepping this process using PTRACE_SINGLESTEP, we
677 will get only the one SIGTRAP. The value of $eip will be the next
678 instruction. If the instruction we stepped over was a breakpoint,
679 we need to decrement the PC.
680
0d62e5e8
DJ
681 If we continue the process using PTRACE_CONT, we will get a
682 SIGTRAP when we hit a breakpoint. The value of $eip will be
683 the instruction after the breakpoint (i.e. needs to be
684 decremented). If we report the SIGTRAP to GDB, we must also
582511be 685 report the undecremented PC. If the breakpoint is removed, we
0d62e5e8
DJ
686 must resume at the decremented PC.
687
582511be
PA
688 On a non-decr_pc_after_break machine with hardware or kernel
689 single-step:
690
691 If we either single-step a breakpoint instruction, or continue and
692 hit a breakpoint instruction, our PC will point at the breakpoint
0d62e5e8
DJ
693 instruction. */
694
582511be
PA
695static int
696check_stopped_by_breakpoint (struct lwp_info *lwp)
0d62e5e8 697{
582511be
PA
698 CORE_ADDR pc;
699 CORE_ADDR sw_breakpoint_pc;
700 struct thread_info *saved_thread;
3e572f71
PA
701#if USE_SIGTRAP_SIGINFO
702 siginfo_t siginfo;
703#endif
d50171e4
PA
704
705 if (the_low_target.get_pc == NULL)
706 return 0;
0d62e5e8 707
582511be
PA
708 pc = get_pc (lwp);
709 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 710
582511be
PA
711 /* breakpoint_at reads from the current thread. */
712 saved_thread = current_thread;
713 current_thread = get_lwp_thread (lwp);
47c0c975 714
3e572f71
PA
715#if USE_SIGTRAP_SIGINFO
716 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
717 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
718 {
719 if (siginfo.si_signo == SIGTRAP)
720 {
1db33b5a 721 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
3e572f71
PA
722 {
723 if (debug_threads)
724 {
725 struct thread_info *thr = get_lwp_thread (lwp);
726
2bf6fb9d 727 debug_printf ("CSBB: %s stopped by software breakpoint\n",
3e572f71
PA
728 target_pid_to_str (ptid_of (thr)));
729 }
730
731 /* Back up the PC if necessary. */
732 if (pc != sw_breakpoint_pc)
733 {
734 struct regcache *regcache
735 = get_thread_regcache (current_thread, 1);
736 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
737 }
738
739 lwp->stop_pc = sw_breakpoint_pc;
740 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
741 current_thread = saved_thread;
742 return 1;
743 }
744 else if (siginfo.si_code == TRAP_HWBKPT)
745 {
746 if (debug_threads)
747 {
748 struct thread_info *thr = get_lwp_thread (lwp);
749
2bf6fb9d
PA
750 debug_printf ("CSBB: %s stopped by hardware "
751 "breakpoint/watchpoint\n",
3e572f71
PA
752 target_pid_to_str (ptid_of (thr)));
753 }
754
755 lwp->stop_pc = pc;
756 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
757 current_thread = saved_thread;
758 return 1;
759 }
2bf6fb9d
PA
760 else if (siginfo.si_code == TRAP_TRACE)
761 {
762 if (debug_threads)
763 {
764 struct thread_info *thr = get_lwp_thread (lwp);
765
766 debug_printf ("CSBB: %s stopped by trace\n",
767 target_pid_to_str (ptid_of (thr)));
768 }
863d01bd
PA
769
770 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 771 }
3e572f71
PA
772 }
773 }
774#else
582511be
PA
775 /* We may have just stepped a breakpoint instruction. E.g., in
776 non-stop mode, GDB first tells the thread A to step a range, and
777 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
778 case we need to report the breakpoint PC. */
779 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be
PA
780 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
781 {
782 if (debug_threads)
783 {
784 struct thread_info *thr = get_lwp_thread (lwp);
785
786 debug_printf ("CSBB: %s stopped by software breakpoint\n",
787 target_pid_to_str (ptid_of (thr)));
788 }
789
790 /* Back up the PC if necessary. */
791 if (pc != sw_breakpoint_pc)
792 {
793 struct regcache *regcache
794 = get_thread_regcache (current_thread, 1);
795 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
796 }
797
798 lwp->stop_pc = sw_breakpoint_pc;
15c66dd6 799 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
582511be
PA
800 current_thread = saved_thread;
801 return 1;
802 }
803
804 if (hardware_breakpoint_inserted_here (pc))
805 {
806 if (debug_threads)
807 {
808 struct thread_info *thr = get_lwp_thread (lwp);
809
810 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
811 target_pid_to_str (ptid_of (thr)));
812 }
47c0c975 813
582511be 814 lwp->stop_pc = pc;
15c66dd6 815 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
582511be
PA
816 current_thread = saved_thread;
817 return 1;
818 }
3e572f71 819#endif
582511be
PA
820
821 current_thread = saved_thread;
822 return 0;
0d62e5e8 823}
ce3a066d 824
b3312d80 825static struct lwp_info *
95954743 826add_lwp (ptid_t ptid)
611cb4a5 827{
54a0b537 828 struct lwp_info *lwp;
0d62e5e8 829
8d749320 830 lwp = XCNEW (struct lwp_info);
00db26fa
PA
831
832 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
0d62e5e8 833
aa5ca48f 834 if (the_low_target.new_thread != NULL)
34c703da 835 the_low_target.new_thread (lwp);
aa5ca48f 836
f7667f0d 837 lwp->thread = add_thread (ptid, lwp);
0d62e5e8 838
54a0b537 839 return lwp;
0d62e5e8 840}
611cb4a5 841
da6d8c04
DJ
842/* Start an inferior process and returns its pid.
843 ALLARGS is a vector of program-name and args. */
844
ce3a066d
DJ
845static int
846linux_create_inferior (char *program, char **allargs)
da6d8c04 847{
a6dbe5df 848 struct lwp_info *new_lwp;
da6d8c04 849 int pid;
95954743 850 ptid_t ptid;
8cc73a39
SDJ
851 struct cleanup *restore_personality
852 = maybe_disable_address_space_randomization (disable_randomization);
03583c20 853
42c81e2a 854#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
855 pid = vfork ();
856#else
da6d8c04 857 pid = fork ();
52fb6437 858#endif
da6d8c04
DJ
859 if (pid < 0)
860 perror_with_name ("fork");
861
862 if (pid == 0)
863 {
602e3198 864 close_most_fds ();
b8e1b30e 865 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 866
1a981360 867#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 868 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 869#endif
0d62e5e8 870
a9fa9f7d
DJ
871 setpgid (0, 0);
872
e0f9f062
DE
873 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
874 stdout to stderr so that inferior i/o doesn't corrupt the connection.
875 Also, redirect stdin to /dev/null. */
876 if (remote_connection_is_stdio ())
877 {
878 close (0);
879 open ("/dev/null", O_RDONLY);
880 dup2 (2, 1);
3e52c33d
JK
881 if (write (2, "stdin/stdout redirected\n",
882 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
883 {
884 /* Errors ignored. */;
885 }
e0f9f062
DE
886 }
887
2b876972
DJ
888 execv (program, allargs);
889 if (errno == ENOENT)
890 execvp (program, allargs);
da6d8c04
DJ
891
892 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 893 strerror (errno));
da6d8c04
DJ
894 fflush (stderr);
895 _exit (0177);
896 }
897
8cc73a39 898 do_cleanups (restore_personality);
03583c20 899
55d7b841 900 linux_add_process (pid, 0);
95954743
PA
901
902 ptid = ptid_build (pid, pid, 0);
903 new_lwp = add_lwp (ptid);
a6dbe5df 904 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 905
a9fa9f7d 906 return pid;
da6d8c04
DJ
907}
908
8784d563
PA
909/* Attach to an inferior process. Returns 0 on success, ERRNO on
910 error. */
da6d8c04 911
7ae1a6a6
PA
912int
913linux_attach_lwp (ptid_t ptid)
da6d8c04 914{
54a0b537 915 struct lwp_info *new_lwp;
7ae1a6a6 916 int lwpid = ptid_get_lwp (ptid);
611cb4a5 917
b8e1b30e 918 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 919 != 0)
7ae1a6a6 920 return errno;
24a09b5f 921
b3312d80 922 new_lwp = add_lwp (ptid);
0d62e5e8 923
a6dbe5df
PA
924 /* We need to wait for SIGSTOP before being able to make the next
925 ptrace call on this LWP. */
926 new_lwp->must_set_ptrace_flags = 1;
927
644cebc9 928 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
929 {
930 if (debug_threads)
87ce2a04 931 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
932
933 /* The process is definitely stopped. It is in a job control
934 stop, unless the kernel predates the TASK_STOPPED /
935 TASK_TRACED distinction, in which case it might be in a
936 ptrace stop. Make sure it is in a ptrace stop; from there we
937 can kill it, signal it, et cetera.
938
939 First make sure there is a pending SIGSTOP. Since we are
940 already attached, the process can not transition from stopped
941 to running without a PTRACE_CONT; so we know this signal will
942 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
943 probably already in the queue (unless this kernel is old
944 enough to use TASK_STOPPED for ptrace stops); but since
945 SIGSTOP is not an RT signal, it can only be queued once. */
946 kill_lwp (lwpid, SIGSTOP);
947
948 /* Finally, resume the stopped process. This will deliver the
949 SIGSTOP (or a higher priority signal, just like normal
950 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 951 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
952 }
953
0d62e5e8 954 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
955 brings it to a halt.
956
957 There are several cases to consider here:
958
959 1) gdbserver has already attached to the process and is being notified
1b3f6016 960 of a new thread that is being created.
d50171e4
PA
961 In this case we should ignore that SIGSTOP and resume the
962 process. This is handled below by setting stop_expected = 1,
8336d594 963 and the fact that add_thread sets last_resume_kind ==
d50171e4 964 resume_continue.
0e21c1ec
DE
965
966 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
967 to it via attach_inferior.
968 In this case we want the process thread to stop.
d50171e4
PA
969 This is handled by having linux_attach set last_resume_kind ==
970 resume_stop after we return.
e3deef73
LM
971
972 If the pid we are attaching to is also the tgid, we attach to and
973 stop all the existing threads. Otherwise, we attach to pid and
974 ignore any other threads in the same group as this pid.
0e21c1ec
DE
975
976 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
977 existing threads.
978 In this case we want the thread to stop.
979 FIXME: This case is currently not properly handled.
980 We should wait for the SIGSTOP but don't. Things work apparently
981 because enough time passes between when we ptrace (ATTACH) and when
982 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
983
984 On the other hand, if we are currently trying to stop all threads, we
985 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 986 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
987 end of the list, and so the new thread has not yet reached
988 wait_for_sigstop (but will). */
d50171e4 989 new_lwp->stop_expected = 1;
0d62e5e8 990
7ae1a6a6 991 return 0;
95954743
PA
992}
993
8784d563
PA
994/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
995 already attached. Returns true if a new LWP is found, false
996 otherwise. */
997
998static int
999attach_proc_task_lwp_callback (ptid_t ptid)
1000{
1001 /* Is this a new thread? */
1002 if (find_thread_ptid (ptid) == NULL)
1003 {
1004 int lwpid = ptid_get_lwp (ptid);
1005 int err;
1006
1007 if (debug_threads)
1008 debug_printf ("Found new lwp %d\n", lwpid);
1009
1010 err = linux_attach_lwp (ptid);
1011
1012 /* Be quiet if we simply raced with the thread exiting. EPERM
1013 is returned if the thread's task still exists, and is marked
1014 as exited or zombie, as well as other conditions, so in that
1015 case, confirm the status in /proc/PID/status. */
1016 if (err == ESRCH
1017 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1018 {
1019 if (debug_threads)
1020 {
1021 debug_printf ("Cannot attach to lwp %d: "
1022 "thread is gone (%d: %s)\n",
1023 lwpid, err, strerror (err));
1024 }
1025 }
1026 else if (err != 0)
1027 {
1028 warning (_("Cannot attach to lwp %d: %s"),
1029 lwpid,
1030 linux_ptrace_attach_fail_reason_string (ptid, err));
1031 }
1032
1033 return 1;
1034 }
1035 return 0;
1036}
1037
e3deef73
LM
1038/* Attach to PID. If PID is the tgid, attach to it and all
1039 of its threads. */
1040
c52daf70 1041static int
a1928bad 1042linux_attach (unsigned long pid)
0d62e5e8 1043{
7ae1a6a6
PA
1044 ptid_t ptid = ptid_build (pid, pid, 0);
1045 int err;
1046
e3deef73
LM
1047 /* Attach to PID. We will check for other threads
1048 soon. */
7ae1a6a6
PA
1049 err = linux_attach_lwp (ptid);
1050 if (err != 0)
1051 error ("Cannot attach to process %ld: %s",
8784d563 1052 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
7ae1a6a6 1053
55d7b841 1054 linux_add_process (pid, 1);
0d62e5e8 1055
bd99dc85
PA
1056 if (!non_stop)
1057 {
8336d594
PA
1058 struct thread_info *thread;
1059
1060 /* Don't ignore the initial SIGSTOP if we just attached to this
1061 process. It will be collected by wait shortly. */
1062 thread = find_thread_ptid (ptid_build (pid, pid, 0));
1063 thread->last_resume_kind = resume_stop;
bd99dc85 1064 }
0d62e5e8 1065
8784d563
PA
1066 /* We must attach to every LWP. If /proc is mounted, use that to
1067 find them now. On the one hand, the inferior may be using raw
1068 clone instead of using pthreads. On the other hand, even if it
1069 is using pthreads, GDB may not be connected yet (thread_db needs
1070 to do symbol lookups, through qSymbol). Also, thread_db walks
1071 structures in the inferior's address space to find the list of
1072 threads/LWPs, and those structures may well be corrupted. Note
1073 that once thread_db is loaded, we'll still use it to list threads
1074 and associate pthread info with each LWP. */
1075 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
95954743
PA
1076 return 0;
1077}
1078
1079struct counter
1080{
1081 int pid;
1082 int count;
1083};
1084
1085static int
1086second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1087{
9a3c8263 1088 struct counter *counter = (struct counter *) args;
95954743
PA
1089
1090 if (ptid_get_pid (entry->id) == counter->pid)
1091 {
1092 if (++counter->count > 1)
1093 return 1;
1094 }
d61ddec4 1095
da6d8c04
DJ
1096 return 0;
1097}
1098
95954743 1099static int
fa96cb38 1100last_thread_of_process_p (int pid)
95954743 1101{
95954743 1102 struct counter counter = { pid , 0 };
da6d8c04 1103
95954743
PA
1104 return (find_inferior (&all_threads,
1105 second_thread_of_pid_p, &counter) == NULL);
1106}
1107
da84f473
PA
1108/* Kill LWP. */
1109
1110static void
1111linux_kill_one_lwp (struct lwp_info *lwp)
1112{
d86d4aaf
DE
1113 struct thread_info *thr = get_lwp_thread (lwp);
1114 int pid = lwpid_of (thr);
da84f473
PA
1115
1116 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1117 there is no signal context, and ptrace(PTRACE_KILL) (or
1118 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1119 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1120 alternative is to kill with SIGKILL. We only need one SIGKILL
1121 per process, not one for each thread. But since we still support
1122 linuxthreads, and we also support debugging programs using raw
1123 clone without CLONE_THREAD, we send one for each thread. For
1124 years, we used PTRACE_KILL only, so we're being a bit paranoid
1125 about some old kernels where PTRACE_KILL might work better
1126 (dubious if there are any such, but that's why it's paranoia), so
1127 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1128 everywhere. */
1129
1130 errno = 0;
69ff6be5 1131 kill_lwp (pid, SIGKILL);
da84f473 1132 if (debug_threads)
ce9e3fe7
PA
1133 {
1134 int save_errno = errno;
1135
1136 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1137 target_pid_to_str (ptid_of (thr)),
1138 save_errno ? strerror (save_errno) : "OK");
1139 }
da84f473
PA
1140
1141 errno = 0;
b8e1b30e 1142 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1143 if (debug_threads)
ce9e3fe7
PA
1144 {
1145 int save_errno = errno;
1146
1147 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1148 target_pid_to_str (ptid_of (thr)),
1149 save_errno ? strerror (save_errno) : "OK");
1150 }
da84f473
PA
1151}
1152
e76126e8
PA
1153/* Kill LWP and wait for it to die. */
1154
1155static void
1156kill_wait_lwp (struct lwp_info *lwp)
1157{
1158 struct thread_info *thr = get_lwp_thread (lwp);
1159 int pid = ptid_get_pid (ptid_of (thr));
1160 int lwpid = ptid_get_lwp (ptid_of (thr));
1161 int wstat;
1162 int res;
1163
1164 if (debug_threads)
1165 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1166
1167 do
1168 {
1169 linux_kill_one_lwp (lwp);
1170
1171 /* Make sure it died. Notes:
1172
1173 - The loop is most likely unnecessary.
1174
1175 - We don't use linux_wait_for_event as that could delete lwps
1176 while we're iterating over them. We're not interested in
1177 any pending status at this point, only in making sure all
1178 wait status on the kernel side are collected until the
1179 process is reaped.
1180
1181 - We don't use __WALL here as the __WALL emulation relies on
1182 SIGCHLD, and killing a stopped process doesn't generate
1183 one, nor an exit status.
1184 */
1185 res = my_waitpid (lwpid, &wstat, 0);
1186 if (res == -1 && errno == ECHILD)
1187 res = my_waitpid (lwpid, &wstat, __WCLONE);
1188 } while (res > 0 && WIFSTOPPED (wstat));
1189
586b02a9
PA
1190 /* Even if it was stopped, the child may have already disappeared.
1191 E.g., if it was killed by SIGKILL. */
1192 if (res < 0 && errno != ECHILD)
1193 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1194}
1195
da84f473
PA
1196/* Callback for `find_inferior'. Kills an lwp of a given process,
1197 except the leader. */
95954743
PA
1198
1199static int
da84f473 1200kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 1201{
0d62e5e8 1202 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1203 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
1204 int pid = * (int *) args;
1205
1206 if (ptid_get_pid (entry->id) != pid)
1207 return 0;
0d62e5e8 1208
fd500816
DJ
1209 /* We avoid killing the first thread here, because of a Linux kernel (at
1210 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1211 the children get a chance to be reaped, it will remain a zombie
1212 forever. */
95954743 1213
d86d4aaf 1214 if (lwpid_of (thread) == pid)
95954743
PA
1215 {
1216 if (debug_threads)
87ce2a04
DE
1217 debug_printf ("lkop: is last of process %s\n",
1218 target_pid_to_str (entry->id));
95954743
PA
1219 return 0;
1220 }
fd500816 1221
e76126e8 1222 kill_wait_lwp (lwp);
95954743 1223 return 0;
da6d8c04
DJ
1224}
1225
95954743
PA
1226static int
1227linux_kill (int pid)
0d62e5e8 1228{
95954743 1229 struct process_info *process;
54a0b537 1230 struct lwp_info *lwp;
fd500816 1231
95954743
PA
1232 process = find_process_pid (pid);
1233 if (process == NULL)
1234 return -1;
9d606399 1235
f9e39928
PA
1236 /* If we're killing a running inferior, make sure it is stopped
1237 first, as PTRACE_KILL will not work otherwise. */
7984d532 1238 stop_all_lwps (0, NULL);
f9e39928 1239
da84f473 1240 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 1241
54a0b537 1242 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1243 thread in the list, so do so now. */
95954743 1244 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 1245
784867a5 1246 if (lwp == NULL)
fd500816 1247 {
784867a5 1248 if (debug_threads)
d86d4aaf
DE
1249 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1250 pid);
784867a5
JK
1251 }
1252 else
e76126e8 1253 kill_wait_lwp (lwp);
2d717e4f 1254
8336d594 1255 the_target->mourn (process);
f9e39928
PA
1256
1257 /* Since we presently can only stop all lwps of all processes, we
1258 need to unstop lwps of other processes. */
7984d532 1259 unstop_all_lwps (0, NULL);
95954743 1260 return 0;
0d62e5e8
DJ
1261}
1262
9b224c5e
PA
1263/* Get pending signal of THREAD, for detaching purposes. This is the
1264 signal the thread last stopped for, which we need to deliver to the
1265 thread when detaching, otherwise, it'd be suppressed/lost. */
1266
1267static int
1268get_detach_signal (struct thread_info *thread)
1269{
a493e3e2 1270 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1271 int status;
1272 struct lwp_info *lp = get_thread_lwp (thread);
1273
1274 if (lp->status_pending_p)
1275 status = lp->status_pending;
1276 else
1277 {
1278 /* If the thread had been suspended by gdbserver, and it stopped
1279 cleanly, then it'll have stopped with SIGSTOP. But we don't
1280 want to deliver that SIGSTOP. */
1281 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1282 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1283 return 0;
1284
1285 /* Otherwise, we may need to deliver the signal we
1286 intercepted. */
1287 status = lp->last_status;
1288 }
1289
1290 if (!WIFSTOPPED (status))
1291 {
1292 if (debug_threads)
87ce2a04 1293 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1294 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1295 return 0;
1296 }
1297
1298 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1299 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1300 {
1301 if (debug_threads)
87ce2a04
DE
1302 debug_printf ("GPS: lwp %s had stopped with extended "
1303 "status: no pending signal\n",
d86d4aaf 1304 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1305 return 0;
1306 }
1307
2ea28649 1308 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1309
1310 if (program_signals_p && !program_signals[signo])
1311 {
1312 if (debug_threads)
87ce2a04 1313 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1314 target_pid_to_str (ptid_of (thread)),
87ce2a04 1315 gdb_signal_to_string (signo));
9b224c5e
PA
1316 return 0;
1317 }
1318 else if (!program_signals_p
1319 /* If we have no way to know which signals GDB does not
1320 want to have passed to the program, assume
1321 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1322 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1323 {
1324 if (debug_threads)
87ce2a04
DE
1325 debug_printf ("GPS: lwp %s had signal %s, "
1326 "but we don't know if we should pass it. "
1327 "Default to not.\n",
d86d4aaf 1328 target_pid_to_str (ptid_of (thread)),
87ce2a04 1329 gdb_signal_to_string (signo));
9b224c5e
PA
1330 return 0;
1331 }
1332 else
1333 {
1334 if (debug_threads)
87ce2a04 1335 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1336 target_pid_to_str (ptid_of (thread)),
87ce2a04 1337 gdb_signal_to_string (signo));
9b224c5e
PA
1338
1339 return WSTOPSIG (status);
1340 }
1341}
1342
95954743
PA
1343static int
1344linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1345{
1346 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1347 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1348 int pid = * (int *) args;
9b224c5e 1349 int sig;
95954743
PA
1350
1351 if (ptid_get_pid (entry->id) != pid)
1352 return 0;
6ad8ae5c 1353
9b224c5e 1354 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1355 if (lwp->stop_expected)
ae13219e 1356 {
9b224c5e 1357 if (debug_threads)
87ce2a04 1358 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1359 target_pid_to_str (ptid_of (thread)));
9b224c5e 1360
d86d4aaf 1361 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1362 lwp->stop_expected = 0;
ae13219e
DJ
1363 }
1364
1365 /* Flush any pending changes to the process's registers. */
d86d4aaf 1366 regcache_invalidate_thread (thread);
ae13219e 1367
9b224c5e
PA
1368 /* Pass on any pending signal for this thread. */
1369 sig = get_detach_signal (thread);
1370
ae13219e 1371 /* Finally, let it resume. */
82bfbe7e
PA
1372 if (the_low_target.prepare_to_resume != NULL)
1373 the_low_target.prepare_to_resume (lwp);
d86d4aaf 1374 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1375 (PTRACE_TYPE_ARG4) (long) sig) < 0)
9b224c5e 1376 error (_("Can't detach %s: %s"),
d86d4aaf 1377 target_pid_to_str (ptid_of (thread)),
9b224c5e 1378 strerror (errno));
bd99dc85
PA
1379
1380 delete_lwp (lwp);
95954743 1381 return 0;
6ad8ae5c
DJ
1382}
1383
95954743
PA
1384static int
1385linux_detach (int pid)
1386{
1387 struct process_info *process;
1388
1389 process = find_process_pid (pid);
1390 if (process == NULL)
1391 return -1;
1392
863d01bd
PA
1393 /* As there's a step over already in progress, let it finish first,
1394 otherwise nesting a stabilize_threads operation on top gets real
1395 messy. */
1396 complete_ongoing_step_over ();
1397
f9e39928
PA
1398 /* Stop all threads before detaching. First, ptrace requires that
1399 the thread is stopped to sucessfully detach. Second, thread_db
1400 may need to uninstall thread event breakpoints from memory, which
1401 only works with a stopped process anyway. */
7984d532 1402 stop_all_lwps (0, NULL);
f9e39928 1403
ca5c370d 1404#ifdef USE_THREAD_DB
8336d594 1405 thread_db_detach (process);
ca5c370d
PA
1406#endif
1407
fa593d66
PA
1408 /* Stabilize threads (move out of jump pads). */
1409 stabilize_threads ();
1410
95954743 1411 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1412
1413 the_target->mourn (process);
f9e39928
PA
1414
1415 /* Since we presently can only stop all lwps of all processes, we
1416 need to unstop lwps of other processes. */
7984d532 1417 unstop_all_lwps (0, NULL);
f9e39928
PA
1418 return 0;
1419}
1420
1421/* Remove all LWPs that belong to process PROC from the lwp list. */
1422
1423static int
1424delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1425{
d86d4aaf
DE
1426 struct thread_info *thread = (struct thread_info *) entry;
1427 struct lwp_info *lwp = get_thread_lwp (thread);
9a3c8263 1428 struct process_info *process = (struct process_info *) proc;
f9e39928 1429
d86d4aaf 1430 if (pid_of (thread) == pid_of (process))
f9e39928
PA
1431 delete_lwp (lwp);
1432
dd6953e1 1433 return 0;
6ad8ae5c
DJ
1434}
1435
8336d594
PA
1436static void
1437linux_mourn (struct process_info *process)
1438{
1439 struct process_info_private *priv;
1440
1441#ifdef USE_THREAD_DB
1442 thread_db_mourn (process);
1443#endif
1444
d86d4aaf 1445 find_inferior (&all_threads, delete_lwp_callback, process);
f9e39928 1446
8336d594 1447 /* Freeing all private data. */
fe978cb0 1448 priv = process->priv;
8336d594
PA
1449 free (priv->arch_private);
1450 free (priv);
fe978cb0 1451 process->priv = NULL;
505106cd
PA
1452
1453 remove_process (process);
8336d594
PA
1454}
1455
444d6139 1456static void
95954743 1457linux_join (int pid)
444d6139 1458{
444d6139
PA
1459 int status, ret;
1460
1461 do {
95954743 1462 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1463 if (WIFEXITED (status) || WIFSIGNALED (status))
1464 break;
1465 } while (ret != -1 || errno != ECHILD);
1466}
1467
6ad8ae5c 1468/* Return nonzero if the given thread is still alive. */
0d62e5e8 1469static int
95954743 1470linux_thread_alive (ptid_t ptid)
0d62e5e8 1471{
95954743
PA
1472 struct lwp_info *lwp = find_lwp_pid (ptid);
1473
1474 /* We assume we always know if a thread exits. If a whole process
1475 exited but we still haven't been able to report it to GDB, we'll
1476 hold on to the last lwp of the dead process. */
1477 if (lwp != NULL)
00db26fa 1478 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1479 else
1480 return 0;
1481}
1482
582511be
PA
1483/* Return 1 if this lwp still has an interesting status pending. If
1484 not (e.g., it had stopped for a breakpoint that is gone), return
1485 false. */
1486
1487static int
1488thread_still_has_status_pending_p (struct thread_info *thread)
1489{
1490 struct lwp_info *lp = get_thread_lwp (thread);
1491
1492 if (!lp->status_pending_p)
1493 return 0;
1494
1495 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1496 report any status pending the LWP may have. */
1497 if (thread->last_resume_kind == resume_stop
1498 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1499 return 0;
1500
1501 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1502 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1503 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1504 {
1505 struct thread_info *saved_thread;
1506 CORE_ADDR pc;
1507 int discard = 0;
1508
1509 gdb_assert (lp->last_status != 0);
1510
1511 pc = get_pc (lp);
1512
1513 saved_thread = current_thread;
1514 current_thread = thread;
1515
1516 if (pc != lp->stop_pc)
1517 {
1518 if (debug_threads)
1519 debug_printf ("PC of %ld changed\n",
1520 lwpid_of (thread));
1521 discard = 1;
1522 }
3e572f71
PA
1523
1524#if !USE_SIGTRAP_SIGINFO
15c66dd6 1525 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1526 && !(*the_low_target.breakpoint_at) (pc))
1527 {
1528 if (debug_threads)
1529 debug_printf ("previous SW breakpoint of %ld gone\n",
1530 lwpid_of (thread));
1531 discard = 1;
1532 }
15c66dd6 1533 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1534 && !hardware_breakpoint_inserted_here (pc))
1535 {
1536 if (debug_threads)
1537 debug_printf ("previous HW breakpoint of %ld gone\n",
1538 lwpid_of (thread));
1539 discard = 1;
1540 }
3e572f71 1541#endif
582511be
PA
1542
1543 current_thread = saved_thread;
1544
1545 if (discard)
1546 {
1547 if (debug_threads)
1548 debug_printf ("discarding pending breakpoint status\n");
1549 lp->status_pending_p = 0;
1550 return 0;
1551 }
1552 }
1553
1554 return 1;
1555}
1556
6bf5e0ba 1557/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1558static int
d50171e4 1559status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1560{
d86d4aaf 1561 struct thread_info *thread = (struct thread_info *) entry;
582511be 1562 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1563 ptid_t ptid = * (ptid_t *) arg;
1564
1565 /* Check if we're only interested in events from a specific process
afa8d396
PA
1566 or a specific LWP. */
1567 if (!ptid_match (ptid_of (thread), ptid))
95954743 1568 return 0;
0d62e5e8 1569
582511be
PA
1570 if (lp->status_pending_p
1571 && !thread_still_has_status_pending_p (thread))
1572 {
1573 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1574 return 0;
1575 }
0d62e5e8 1576
582511be 1577 return lp->status_pending_p;
0d62e5e8
DJ
1578}
1579
95954743
PA
1580static int
1581same_lwp (struct inferior_list_entry *entry, void *data)
1582{
1583 ptid_t ptid = *(ptid_t *) data;
1584 int lwp;
1585
1586 if (ptid_get_lwp (ptid) != 0)
1587 lwp = ptid_get_lwp (ptid);
1588 else
1589 lwp = ptid_get_pid (ptid);
1590
1591 if (ptid_get_lwp (entry->id) == lwp)
1592 return 1;
1593
1594 return 0;
1595}
1596
1597struct lwp_info *
1598find_lwp_pid (ptid_t ptid)
1599{
d86d4aaf
DE
1600 struct inferior_list_entry *thread
1601 = find_inferior (&all_threads, same_lwp, &ptid);
1602
1603 if (thread == NULL)
1604 return NULL;
1605
1606 return get_thread_lwp ((struct thread_info *) thread);
95954743
PA
1607}
1608
fa96cb38 1609/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1610
fa96cb38
PA
1611static int
1612num_lwps (int pid)
1613{
1614 struct inferior_list_entry *inf, *tmp;
1615 int count = 0;
0d62e5e8 1616
fa96cb38 1617 ALL_INFERIORS (&all_threads, inf, tmp)
24a09b5f 1618 {
fa96cb38
PA
1619 if (ptid_get_pid (inf->id) == pid)
1620 count++;
24a09b5f 1621 }
3aee8918 1622
fa96cb38
PA
1623 return count;
1624}
d61ddec4 1625
6d4ee8c6
GB
1626/* The arguments passed to iterate_over_lwps. */
1627
1628struct iterate_over_lwps_args
1629{
1630 /* The FILTER argument passed to iterate_over_lwps. */
1631 ptid_t filter;
1632
1633 /* The CALLBACK argument passed to iterate_over_lwps. */
1634 iterate_over_lwps_ftype *callback;
1635
1636 /* The DATA argument passed to iterate_over_lwps. */
1637 void *data;
1638};
1639
1640/* Callback for find_inferior used by iterate_over_lwps to filter
1641 calls to the callback supplied to that function. Returning a
1642 nonzero value causes find_inferiors to stop iterating and return
1643 the current inferior_list_entry. Returning zero indicates that
1644 find_inferiors should continue iterating. */
1645
1646static int
1647iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1648{
1649 struct iterate_over_lwps_args *args
1650 = (struct iterate_over_lwps_args *) args_p;
1651
1652 if (ptid_match (entry->id, args->filter))
1653 {
1654 struct thread_info *thr = (struct thread_info *) entry;
1655 struct lwp_info *lwp = get_thread_lwp (thr);
1656
1657 return (*args->callback) (lwp, args->data);
1658 }
1659
1660 return 0;
1661}
1662
1663/* See nat/linux-nat.h. */
1664
1665struct lwp_info *
1666iterate_over_lwps (ptid_t filter,
1667 iterate_over_lwps_ftype callback,
1668 void *data)
1669{
1670 struct iterate_over_lwps_args args = {filter, callback, data};
1671 struct inferior_list_entry *entry;
1672
1673 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1674 if (entry == NULL)
1675 return NULL;
1676
1677 return get_thread_lwp ((struct thread_info *) entry);
1678}
1679
fa96cb38
PA
1680/* Detect zombie thread group leaders, and "exit" them. We can't reap
1681 their exits until all other threads in the group have exited. */
c3adc08c 1682
fa96cb38
PA
1683static void
1684check_zombie_leaders (void)
1685{
1686 struct process_info *proc, *tmp;
c3adc08c 1687
fa96cb38 1688 ALL_PROCESSES (proc, tmp)
c3adc08c 1689 {
fa96cb38
PA
1690 pid_t leader_pid = pid_of (proc);
1691 struct lwp_info *leader_lp;
c3adc08c 1692
fa96cb38 1693 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
c3adc08c 1694
fa96cb38
PA
1695 if (debug_threads)
1696 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1697 "num_lwps=%d, zombie=%d\n",
1698 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1699 linux_proc_pid_is_zombie (leader_pid));
1700
94585166 1701 if (leader_lp != NULL && !leader_lp->stopped
fa96cb38
PA
1702 /* Check if there are other threads in the group, as we may
1703 have raced with the inferior simply exiting. */
1704 && !last_thread_of_process_p (leader_pid)
1705 && linux_proc_pid_is_zombie (leader_pid))
1706 {
1707 /* A leader zombie can mean one of two things:
1708
1709 - It exited, and there's an exit status pending
1710 available, or only the leader exited (not the whole
1711 program). In the latter case, we can't waitpid the
1712 leader's exit status until all other threads are gone.
1713
1714 - There are 3 or more threads in the group, and a thread
1715 other than the leader exec'd. On an exec, the Linux
1716 kernel destroys all other threads (except the execing
1717 one) in the thread group, and resets the execing thread's
1718 tid to the tgid. No exit notification is sent for the
1719 execing thread -- from the ptracer's perspective, it
1720 appears as though the execing thread just vanishes.
1721 Until we reap all other threads except the leader and the
1722 execing thread, the leader will be zombie, and the
1723 execing thread will be in `D (disc sleep)'. As soon as
1724 all other threads are reaped, the execing thread changes
1725 it's tid to the tgid, and the previous (zombie) leader
1726 vanishes, giving place to the "new" leader. We could try
1727 distinguishing the exit and exec cases, by waiting once
1728 more, and seeing if something comes out, but it doesn't
1729 sound useful. The previous leader _does_ go away, and
1730 we'll re-add the new one once we see the exec event
1731 (which is just the same as what would happen if the
1732 previous leader did exit voluntarily before some other
1733 thread execs). */
c3adc08c 1734
fa96cb38
PA
1735 if (debug_threads)
1736 fprintf (stderr,
1737 "CZL: Thread group leader %d zombie "
1738 "(it exited, or another thread execd).\n",
1739 leader_pid);
c3adc08c 1740
fa96cb38 1741 delete_lwp (leader_lp);
c3adc08c
PA
1742 }
1743 }
fa96cb38 1744}
c3adc08c 1745
fa96cb38
PA
1746/* Callback for `find_inferior'. Returns the first LWP that is not
1747 stopped. ARG is a PTID filter. */
d50171e4 1748
fa96cb38
PA
1749static int
1750not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1751{
1752 struct thread_info *thr = (struct thread_info *) entry;
1753 struct lwp_info *lwp;
1754 ptid_t filter = *(ptid_t *) arg;
47c0c975 1755
fa96cb38
PA
1756 if (!ptid_match (ptid_of (thr), filter))
1757 return 0;
bd99dc85 1758
fa96cb38
PA
1759 lwp = get_thread_lwp (thr);
1760 if (!lwp->stopped)
1761 return 1;
1762
1763 return 0;
0d62e5e8 1764}
611cb4a5 1765
863d01bd
PA
1766/* Increment LWP's suspend count. */
1767
1768static void
1769lwp_suspended_inc (struct lwp_info *lwp)
1770{
1771 lwp->suspended++;
1772
1773 if (debug_threads && lwp->suspended > 4)
1774 {
1775 struct thread_info *thread = get_lwp_thread (lwp);
1776
1777 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1778 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1779 }
1780}
1781
1782/* Decrement LWP's suspend count. */
1783
1784static void
1785lwp_suspended_decr (struct lwp_info *lwp)
1786{
1787 lwp->suspended--;
1788
1789 if (lwp->suspended < 0)
1790 {
1791 struct thread_info *thread = get_lwp_thread (lwp);
1792
1793 internal_error (__FILE__, __LINE__,
1794 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1795 lwp->suspended);
1796 }
1797}
1798
219f2f23
PA
1799/* This function should only be called if the LWP got a SIGTRAP.
1800
1801 Handle any tracepoint steps or hits. Return true if a tracepoint
1802 event was handled, 0 otherwise. */
1803
1804static int
1805handle_tracepoints (struct lwp_info *lwp)
1806{
1807 struct thread_info *tinfo = get_lwp_thread (lwp);
1808 int tpoint_related_event = 0;
1809
582511be
PA
1810 gdb_assert (lwp->suspended == 0);
1811
7984d532
PA
1812 /* If this tracepoint hit causes a tracing stop, we'll immediately
1813 uninsert tracepoints. To do this, we temporarily pause all
1814 threads, unpatch away, and then unpause threads. We need to make
1815 sure the unpausing doesn't resume LWP too. */
863d01bd 1816 lwp_suspended_inc (lwp);
7984d532 1817
219f2f23
PA
1818 /* And we need to be sure that any all-threads-stopping doesn't try
1819 to move threads out of the jump pads, as it could deadlock the
1820 inferior (LWP could be in the jump pad, maybe even holding the
1821 lock.) */
1822
1823 /* Do any necessary step collect actions. */
1824 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1825
fa593d66
PA
1826 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1827
219f2f23
PA
1828 /* See if we just hit a tracepoint and do its main collect
1829 actions. */
1830 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1831
863d01bd 1832 lwp_suspended_decr (lwp);
7984d532
PA
1833
1834 gdb_assert (lwp->suspended == 0);
fa593d66 1835 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1836
219f2f23
PA
1837 if (tpoint_related_event)
1838 {
1839 if (debug_threads)
87ce2a04 1840 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1841 return 1;
1842 }
1843
1844 return 0;
1845}
1846
fa593d66
PA
1847/* Convenience wrapper. Returns true if LWP is presently collecting a
1848 fast tracepoint. */
1849
1850static int
1851linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1852 struct fast_tpoint_collect_status *status)
1853{
1854 CORE_ADDR thread_area;
d86d4aaf 1855 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1856
1857 if (the_low_target.get_thread_area == NULL)
1858 return 0;
1859
1860 /* Get the thread area address. This is used to recognize which
1861 thread is which when tracing with the in-process agent library.
1862 We don't read anything from the address, and treat it as opaque;
1863 it's the address itself that we assume is unique per-thread. */
d86d4aaf 1864 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
fa593d66
PA
1865 return 0;
1866
1867 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1868}
1869
1870/* The reason we resume in the caller, is because we want to be able
1871 to pass lwp->status_pending as WSTAT, and we need to clear
1872 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1873 refuses to resume. */
1874
1875static int
1876maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1877{
0bfdf32f 1878 struct thread_info *saved_thread;
fa593d66 1879
0bfdf32f
GB
1880 saved_thread = current_thread;
1881 current_thread = get_lwp_thread (lwp);
fa593d66
PA
1882
1883 if ((wstat == NULL
1884 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1885 && supports_fast_tracepoints ()
58b4daa5 1886 && agent_loaded_p ())
fa593d66
PA
1887 {
1888 struct fast_tpoint_collect_status status;
1889 int r;
1890
1891 if (debug_threads)
87ce2a04
DE
1892 debug_printf ("Checking whether LWP %ld needs to move out of the "
1893 "jump pad.\n",
0bfdf32f 1894 lwpid_of (current_thread));
fa593d66
PA
1895
1896 r = linux_fast_tracepoint_collecting (lwp, &status);
1897
1898 if (wstat == NULL
1899 || (WSTOPSIG (*wstat) != SIGILL
1900 && WSTOPSIG (*wstat) != SIGFPE
1901 && WSTOPSIG (*wstat) != SIGSEGV
1902 && WSTOPSIG (*wstat) != SIGBUS))
1903 {
1904 lwp->collecting_fast_tracepoint = r;
1905
1906 if (r != 0)
1907 {
1908 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1909 {
1910 /* Haven't executed the original instruction yet.
1911 Set breakpoint there, and wait till it's hit,
1912 then single-step until exiting the jump pad. */
1913 lwp->exit_jump_pad_bkpt
1914 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1915 }
1916
1917 if (debug_threads)
87ce2a04
DE
1918 debug_printf ("Checking whether LWP %ld needs to move out of "
1919 "the jump pad...it does\n",
0bfdf32f
GB
1920 lwpid_of (current_thread));
1921 current_thread = saved_thread;
fa593d66
PA
1922
1923 return 1;
1924 }
1925 }
1926 else
1927 {
1928 /* If we get a synchronous signal while collecting, *and*
1929 while executing the (relocated) original instruction,
1930 reset the PC to point at the tpoint address, before
1931 reporting to GDB. Otherwise, it's an IPA lib bug: just
1932 report the signal to GDB, and pray for the best. */
1933
1934 lwp->collecting_fast_tracepoint = 0;
1935
1936 if (r != 0
1937 && (status.adjusted_insn_addr <= lwp->stop_pc
1938 && lwp->stop_pc < status.adjusted_insn_addr_end))
1939 {
1940 siginfo_t info;
1941 struct regcache *regcache;
1942
1943 /* The si_addr on a few signals references the address
1944 of the faulting instruction. Adjust that as
1945 well. */
1946 if ((WSTOPSIG (*wstat) == SIGILL
1947 || WSTOPSIG (*wstat) == SIGFPE
1948 || WSTOPSIG (*wstat) == SIGBUS
1949 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 1950 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1951 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
1952 /* Final check just to make sure we don't clobber
1953 the siginfo of non-kernel-sent signals. */
1954 && (uintptr_t) info.si_addr == lwp->stop_pc)
1955 {
1956 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 1957 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1958 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
1959 }
1960
0bfdf32f 1961 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
1962 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1963 lwp->stop_pc = status.tpoint_addr;
1964
1965 /* Cancel any fast tracepoint lock this thread was
1966 holding. */
1967 force_unlock_trace_buffer ();
1968 }
1969
1970 if (lwp->exit_jump_pad_bkpt != NULL)
1971 {
1972 if (debug_threads)
87ce2a04
DE
1973 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1974 "stopping all threads momentarily.\n");
fa593d66
PA
1975
1976 stop_all_lwps (1, lwp);
fa593d66
PA
1977
1978 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1979 lwp->exit_jump_pad_bkpt = NULL;
1980
1981 unstop_all_lwps (1, lwp);
1982
1983 gdb_assert (lwp->suspended >= 0);
1984 }
1985 }
1986 }
1987
1988 if (debug_threads)
87ce2a04
DE
1989 debug_printf ("Checking whether LWP %ld needs to move out of the "
1990 "jump pad...no\n",
0bfdf32f 1991 lwpid_of (current_thread));
0cccb683 1992
0bfdf32f 1993 current_thread = saved_thread;
fa593d66
PA
1994 return 0;
1995}
1996
1997/* Enqueue one signal in the "signals to report later when out of the
1998 jump pad" list. */
1999
2000static void
2001enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2002{
2003 struct pending_signals *p_sig;
d86d4aaf 2004 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2005
2006 if (debug_threads)
87ce2a04 2007 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 2008 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2009
2010 if (debug_threads)
2011 {
2012 struct pending_signals *sig;
2013
2014 for (sig = lwp->pending_signals_to_report;
2015 sig != NULL;
2016 sig = sig->prev)
87ce2a04
DE
2017 debug_printf (" Already queued %d\n",
2018 sig->signal);
fa593d66 2019
87ce2a04 2020 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
2021 }
2022
1a981360
PA
2023 /* Don't enqueue non-RT signals if they are already in the deferred
2024 queue. (SIGSTOP being the easiest signal to see ending up here
2025 twice) */
2026 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2027 {
2028 struct pending_signals *sig;
2029
2030 for (sig = lwp->pending_signals_to_report;
2031 sig != NULL;
2032 sig = sig->prev)
2033 {
2034 if (sig->signal == WSTOPSIG (*wstat))
2035 {
2036 if (debug_threads)
87ce2a04
DE
2037 debug_printf ("Not requeuing already queued non-RT signal %d"
2038 " for LWP %ld\n",
2039 sig->signal,
d86d4aaf 2040 lwpid_of (thread));
1a981360
PA
2041 return;
2042 }
2043 }
2044 }
2045
8d749320 2046 p_sig = XCNEW (struct pending_signals);
fa593d66
PA
2047 p_sig->prev = lwp->pending_signals_to_report;
2048 p_sig->signal = WSTOPSIG (*wstat);
8d749320 2049
d86d4aaf 2050 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2051 &p_sig->info);
fa593d66
PA
2052
2053 lwp->pending_signals_to_report = p_sig;
2054}
2055
2056/* Dequeue one signal from the "signals to report later when out of
2057 the jump pad" list. */
2058
2059static int
2060dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2061{
d86d4aaf
DE
2062 struct thread_info *thread = get_lwp_thread (lwp);
2063
fa593d66
PA
2064 if (lwp->pending_signals_to_report != NULL)
2065 {
2066 struct pending_signals **p_sig;
2067
2068 p_sig = &lwp->pending_signals_to_report;
2069 while ((*p_sig)->prev != NULL)
2070 p_sig = &(*p_sig)->prev;
2071
2072 *wstat = W_STOPCODE ((*p_sig)->signal);
2073 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 2074 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2075 &(*p_sig)->info);
fa593d66
PA
2076 free (*p_sig);
2077 *p_sig = NULL;
2078
2079 if (debug_threads)
87ce2a04 2080 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 2081 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2082
2083 if (debug_threads)
2084 {
2085 struct pending_signals *sig;
2086
2087 for (sig = lwp->pending_signals_to_report;
2088 sig != NULL;
2089 sig = sig->prev)
87ce2a04
DE
2090 debug_printf (" Still queued %d\n",
2091 sig->signal);
fa593d66 2092
87ce2a04 2093 debug_printf (" (no more queued signals)\n");
fa593d66
PA
2094 }
2095
2096 return 1;
2097 }
2098
2099 return 0;
2100}
2101
582511be
PA
2102/* Fetch the possibly triggered data watchpoint info and store it in
2103 CHILD.
d50171e4 2104
582511be
PA
2105 On some archs, like x86, that use debug registers to set
2106 watchpoints, it's possible that the way to know which watched
2107 address trapped, is to check the register that is used to select
2108 which address to watch. Problem is, between setting the watchpoint
2109 and reading back which data address trapped, the user may change
2110 the set of watchpoints, and, as a consequence, GDB changes the
2111 debug registers in the inferior. To avoid reading back a stale
2112 stopped-data-address when that happens, we cache in LP the fact
2113 that a watchpoint trapped, and the corresponding data address, as
2114 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2115 registers meanwhile, we have the cached data we can rely on. */
d50171e4 2116
582511be
PA
2117static int
2118check_stopped_by_watchpoint (struct lwp_info *child)
2119{
2120 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 2121 {
582511be 2122 struct thread_info *saved_thread;
d50171e4 2123
582511be
PA
2124 saved_thread = current_thread;
2125 current_thread = get_lwp_thread (child);
2126
2127 if (the_low_target.stopped_by_watchpoint ())
d50171e4 2128 {
15c66dd6 2129 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
2130
2131 if (the_low_target.stopped_data_address != NULL)
2132 child->stopped_data_address
2133 = the_low_target.stopped_data_address ();
2134 else
2135 child->stopped_data_address = 0;
d50171e4
PA
2136 }
2137
0bfdf32f 2138 current_thread = saved_thread;
d50171e4
PA
2139 }
2140
15c66dd6 2141 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
2142}
2143
de0d863e
DB
2144/* Return the ptrace options that we want to try to enable. */
2145
2146static int
2147linux_low_ptrace_options (int attached)
2148{
2149 int options = 0;
2150
2151 if (!attached)
2152 options |= PTRACE_O_EXITKILL;
2153
2154 if (report_fork_events)
2155 options |= PTRACE_O_TRACEFORK;
2156
c269dbdb
DB
2157 if (report_vfork_events)
2158 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2159
94585166
DB
2160 if (report_exec_events)
2161 options |= PTRACE_O_TRACEEXEC;
2162
de0d863e
DB
2163 return options;
2164}
2165
fa96cb38
PA
2166/* Do low-level handling of the event, and check if we should go on
2167 and pass it to caller code. Return the affected lwp if we are, or
2168 NULL otherwise. */
2169
2170static struct lwp_info *
582511be 2171linux_low_filter_event (int lwpid, int wstat)
fa96cb38
PA
2172{
2173 struct lwp_info *child;
2174 struct thread_info *thread;
582511be 2175 int have_stop_pc = 0;
fa96cb38
PA
2176
2177 child = find_lwp_pid (pid_to_ptid (lwpid));
2178
94585166
DB
2179 /* Check for stop events reported by a process we didn't already
2180 know about - anything not already in our LWP list.
2181
2182 If we're expecting to receive stopped processes after
2183 fork, vfork, and clone events, then we'll just add the
2184 new one to our list and go back to waiting for the event
2185 to be reported - the stopped process might be returned
2186 from waitpid before or after the event is.
2187
2188 But note the case of a non-leader thread exec'ing after the
2189 leader having exited, and gone from our lists (because
2190 check_zombie_leaders deleted it). The non-leader thread
2191 changes its tid to the tgid. */
2192
2193 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2194 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2195 {
2196 ptid_t child_ptid;
2197
2198 /* A multi-thread exec after we had seen the leader exiting. */
2199 if (debug_threads)
2200 {
2201 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2202 "after exec.\n", lwpid);
2203 }
2204
2205 child_ptid = ptid_build (lwpid, lwpid, 0);
2206 child = add_lwp (child_ptid);
2207 child->stopped = 1;
2208 current_thread = child->thread;
2209 }
2210
fa96cb38
PA
2211 /* If we didn't find a process, one of two things presumably happened:
2212 - A process we started and then detached from has exited. Ignore it.
2213 - A process we are controlling has forked and the new child's stop
2214 was reported to us by the kernel. Save its PID. */
2215 if (child == NULL && WIFSTOPPED (wstat))
2216 {
2217 add_to_pid_list (&stopped_pids, lwpid, wstat);
2218 return NULL;
2219 }
2220 else if (child == NULL)
2221 return NULL;
2222
2223 thread = get_lwp_thread (child);
2224
2225 child->stopped = 1;
2226
2227 child->last_status = wstat;
2228
582511be
PA
2229 /* Check if the thread has exited. */
2230 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2231 {
2232 if (debug_threads)
2233 debug_printf ("LLFE: %d exited.\n", lwpid);
2234 if (num_lwps (pid_of (thread)) > 1)
2235 {
2236
2237 /* If there is at least one more LWP, then the exit signal was
2238 not the end of the debugged application and should be
2239 ignored. */
2240 delete_lwp (child);
2241 return NULL;
2242 }
2243 else
2244 {
2245 /* This was the last lwp in the process. Since events are
2246 serialized to GDB core, and we can't report this one
2247 right now, but GDB core and the other target layers will
2248 want to be notified about the exit code/signal, leave the
2249 status pending for the next time we're able to report
2250 it. */
2251 mark_lwp_dead (child, wstat);
2252 return child;
2253 }
2254 }
2255
2256 gdb_assert (WIFSTOPPED (wstat));
2257
fa96cb38
PA
2258 if (WIFSTOPPED (wstat))
2259 {
2260 struct process_info *proc;
2261
c06cbd92 2262 /* Architecture-specific setup after inferior is running. */
fa96cb38 2263 proc = find_process_pid (pid_of (thread));
c06cbd92 2264 if (proc->tdesc == NULL)
fa96cb38 2265 {
c06cbd92
YQ
2266 if (proc->attached)
2267 {
c06cbd92
YQ
2268 /* This needs to happen after we have attached to the
2269 inferior and it is stopped for the first time, but
2270 before we access any inferior registers. */
94585166 2271 linux_arch_setup_thread (thread);
c06cbd92
YQ
2272 }
2273 else
2274 {
2275 /* The process is started, but GDBserver will do
2276 architecture-specific setup after the program stops at
2277 the first instruction. */
2278 child->status_pending_p = 1;
2279 child->status_pending = wstat;
2280 return child;
2281 }
fa96cb38
PA
2282 }
2283 }
2284
fa96cb38
PA
2285 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2286 {
beed38b8 2287 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2288 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2289
de0d863e 2290 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2291 child->must_set_ptrace_flags = 0;
2292 }
2293
582511be
PA
2294 /* Be careful to not overwrite stop_pc until
2295 check_stopped_by_breakpoint is called. */
fa96cb38 2296 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2297 && linux_is_extended_waitstatus (wstat))
fa96cb38 2298 {
582511be 2299 child->stop_pc = get_pc (child);
94585166 2300 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2301 {
2302 /* The event has been handled, so just return without
2303 reporting it. */
2304 return NULL;
2305 }
fa96cb38
PA
2306 }
2307
3e572f71
PA
2308 /* Check first whether this was a SW/HW breakpoint before checking
2309 watchpoints, because at least s390 can't tell the data address of
2310 hardware watchpoint hits, and returns stopped-by-watchpoint as
2311 long as there's a watchpoint set. */
2312 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
582511be
PA
2313 {
2314 if (check_stopped_by_breakpoint (child))
2315 have_stop_pc = 1;
2316 }
2317
3e572f71
PA
2318 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2319 or hardware watchpoint. Check which is which if we got
863d01bd
PA
2320 TARGET_STOPPED_BY_HW_BREAKPOINT. Likewise, we may have single
2321 stepped an instruction that triggered a watchpoint. In that
2322 case, on some architectures (such as x86), instead of
2323 TRAP_HWBKPT, si_code indicates TRAP_TRACE, and we need to check
2324 the debug registers separately. */
3e572f71 2325 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
863d01bd 2326 && child->stop_reason != TARGET_STOPPED_BY_SW_BREAKPOINT)
3e572f71
PA
2327 check_stopped_by_watchpoint (child);
2328
582511be
PA
2329 if (!have_stop_pc)
2330 child->stop_pc = get_pc (child);
2331
fa96cb38
PA
2332 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2333 && child->stop_expected)
2334 {
2335 if (debug_threads)
2336 debug_printf ("Expected stop.\n");
2337 child->stop_expected = 0;
2338
2339 if (thread->last_resume_kind == resume_stop)
2340 {
2341 /* We want to report the stop to the core. Treat the
2342 SIGSTOP as a normal event. */
2bf6fb9d
PA
2343 if (debug_threads)
2344 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2345 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2346 }
2347 else if (stopping_threads != NOT_STOPPING_THREADS)
2348 {
2349 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2350 pending. */
2bf6fb9d
PA
2351 if (debug_threads)
2352 debug_printf ("LLW: SIGSTOP caught for %s "
2353 "while stopping threads.\n",
2354 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2355 return NULL;
2356 }
2357 else
2358 {
2bf6fb9d
PA
2359 /* This is a delayed SIGSTOP. Filter out the event. */
2360 if (debug_threads)
2361 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2362 child->stepping ? "step" : "continue",
2363 target_pid_to_str (ptid_of (thread)));
2364
fa96cb38
PA
2365 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2366 return NULL;
2367 }
2368 }
2369
582511be
PA
2370 child->status_pending_p = 1;
2371 child->status_pending = wstat;
fa96cb38
PA
2372 return child;
2373}
2374
20ba1ce6
PA
2375/* Resume LWPs that are currently stopped without any pending status
2376 to report, but are resumed from the core's perspective. */
2377
2378static void
2379resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2380{
2381 struct thread_info *thread = (struct thread_info *) entry;
2382 struct lwp_info *lp = get_thread_lwp (thread);
2383
2384 if (lp->stopped
863d01bd 2385 && !lp->suspended
20ba1ce6
PA
2386 && !lp->status_pending_p
2387 && thread->last_resume_kind != resume_stop
2388 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2389 {
2390 int step = thread->last_resume_kind == resume_step;
2391
2392 if (debug_threads)
2393 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2394 target_pid_to_str (ptid_of (thread)),
2395 paddress (lp->stop_pc),
2396 step);
2397
2398 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2399 }
2400}
2401
fa96cb38
PA
2402/* Wait for an event from child(ren) WAIT_PTID, and return any that
2403 match FILTER_PTID (leaving others pending). The PTIDs can be:
2404 minus_one_ptid, to specify any child; a pid PTID, specifying all
2405 lwps of a thread group; or a PTID representing a single lwp. Store
2406 the stop status through the status pointer WSTAT. OPTIONS is
2407 passed to the waitpid call. Return 0 if no event was found and
2408 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2409 was found. Return the PID of the stopped child otherwise. */
bd99dc85 2410
0d62e5e8 2411static int
fa96cb38
PA
2412linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2413 int *wstatp, int options)
0d62e5e8 2414{
d86d4aaf 2415 struct thread_info *event_thread;
d50171e4 2416 struct lwp_info *event_child, *requested_child;
fa96cb38 2417 sigset_t block_mask, prev_mask;
d50171e4 2418
fa96cb38 2419 retry:
d86d4aaf
DE
2420 /* N.B. event_thread points to the thread_info struct that contains
2421 event_child. Keep them in sync. */
2422 event_thread = NULL;
d50171e4
PA
2423 event_child = NULL;
2424 requested_child = NULL;
0d62e5e8 2425
95954743 2426 /* Check for a lwp with a pending status. */
bd99dc85 2427
fa96cb38 2428 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
0d62e5e8 2429 {
d86d4aaf 2430 event_thread = (struct thread_info *)
fa96cb38 2431 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
d86d4aaf
DE
2432 if (event_thread != NULL)
2433 event_child = get_thread_lwp (event_thread);
2434 if (debug_threads && event_thread)
2435 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2436 }
fa96cb38 2437 else if (!ptid_equal (filter_ptid, null_ptid))
0d62e5e8 2438 {
fa96cb38 2439 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2440
bde24c0a 2441 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
2442 && requested_child->status_pending_p
2443 && requested_child->collecting_fast_tracepoint)
2444 {
2445 enqueue_one_deferred_signal (requested_child,
2446 &requested_child->status_pending);
2447 requested_child->status_pending_p = 0;
2448 requested_child->status_pending = 0;
2449 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2450 }
2451
2452 if (requested_child->suspended
2453 && requested_child->status_pending_p)
38e08fca
GB
2454 {
2455 internal_error (__FILE__, __LINE__,
2456 "requesting an event out of a"
2457 " suspended child?");
2458 }
fa593d66 2459
d50171e4 2460 if (requested_child->status_pending_p)
d86d4aaf
DE
2461 {
2462 event_child = requested_child;
2463 event_thread = get_lwp_thread (event_child);
2464 }
0d62e5e8 2465 }
611cb4a5 2466
0d62e5e8
DJ
2467 if (event_child != NULL)
2468 {
bd99dc85 2469 if (debug_threads)
87ce2a04 2470 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2471 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2472 *wstatp = event_child->status_pending;
bd99dc85
PA
2473 event_child->status_pending_p = 0;
2474 event_child->status_pending = 0;
0bfdf32f 2475 current_thread = event_thread;
d86d4aaf 2476 return lwpid_of (event_thread);
0d62e5e8
DJ
2477 }
2478
fa96cb38
PA
2479 /* But if we don't find a pending event, we'll have to wait.
2480
2481 We only enter this loop if no process has a pending wait status.
2482 Thus any action taken in response to a wait status inside this
2483 loop is responding as soon as we detect the status, not after any
2484 pending events. */
d8301ad1 2485
fa96cb38
PA
2486 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2487 all signals while here. */
2488 sigfillset (&block_mask);
2489 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2490
582511be
PA
2491 /* Always pull all events out of the kernel. We'll randomly select
2492 an event LWP out of all that have events, to prevent
2493 starvation. */
fa96cb38 2494 while (event_child == NULL)
0d62e5e8 2495 {
fa96cb38 2496 pid_t ret = 0;
0d62e5e8 2497
fa96cb38
PA
2498 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2499 quirks:
0d62e5e8 2500
fa96cb38
PA
2501 - If the thread group leader exits while other threads in the
2502 thread group still exist, waitpid(TGID, ...) hangs. That
2503 waitpid won't return an exit status until the other threads
2504 in the group are reaped.
611cb4a5 2505
fa96cb38
PA
2506 - When a non-leader thread execs, that thread just vanishes
2507 without reporting an exit (so we'd hang if we waited for it
2508 explicitly in that case). The exec event is reported to
94585166 2509 the TGID pid. */
fa96cb38
PA
2510 errno = 0;
2511 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2512
fa96cb38
PA
2513 if (debug_threads)
2514 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2515 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2516
fa96cb38 2517 if (ret > 0)
0d62e5e8 2518 {
89be2091 2519 if (debug_threads)
bd99dc85 2520 {
fa96cb38
PA
2521 debug_printf ("LLW: waitpid %ld received %s\n",
2522 (long) ret, status_to_str (*wstatp));
bd99dc85 2523 }
89be2091 2524
582511be
PA
2525 /* Filter all events. IOW, leave all events pending. We'll
2526 randomly select an event LWP out of all that have events
2527 below. */
2528 linux_low_filter_event (ret, *wstatp);
fa96cb38
PA
2529 /* Retry until nothing comes out of waitpid. A single
2530 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2531 continue;
2532 }
2533
20ba1ce6
PA
2534 /* Now that we've pulled all events out of the kernel, resume
2535 LWPs that don't have an interesting event to report. */
2536 if (stopping_threads == NOT_STOPPING_THREADS)
2537 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2538
2539 /* ... and find an LWP with a status to report to the core, if
2540 any. */
582511be
PA
2541 event_thread = (struct thread_info *)
2542 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2543 if (event_thread != NULL)
2544 {
2545 event_child = get_thread_lwp (event_thread);
2546 *wstatp = event_child->status_pending;
2547 event_child->status_pending_p = 0;
2548 event_child->status_pending = 0;
2549 break;
2550 }
2551
fa96cb38
PA
2552 /* Check for zombie thread group leaders. Those can't be reaped
2553 until all other threads in the thread group are. */
2554 check_zombie_leaders ();
2555
2556 /* If there are no resumed children left in the set of LWPs we
2557 want to wait for, bail. We can't just block in
2558 waitpid/sigsuspend, because lwps might have been left stopped
2559 in trace-stop state, and we'd be stuck forever waiting for
2560 their status to change (which would only happen if we resumed
2561 them). Even if WNOHANG is set, this return code is preferred
2562 over 0 (below), as it is more detailed. */
2563 if ((find_inferior (&all_threads,
2564 not_stopped_callback,
2565 &wait_ptid) == NULL))
a6dbe5df 2566 {
fa96cb38
PA
2567 if (debug_threads)
2568 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2569 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2570 return -1;
a6dbe5df
PA
2571 }
2572
fa96cb38
PA
2573 /* No interesting event to report to the caller. */
2574 if ((options & WNOHANG))
24a09b5f 2575 {
fa96cb38
PA
2576 if (debug_threads)
2577 debug_printf ("WNOHANG set, no event found\n");
2578
2579 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2580 return 0;
24a09b5f
DJ
2581 }
2582
fa96cb38
PA
2583 /* Block until we get an event reported with SIGCHLD. */
2584 if (debug_threads)
2585 debug_printf ("sigsuspend'ing\n");
d50171e4 2586
fa96cb38
PA
2587 sigsuspend (&prev_mask);
2588 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2589 goto retry;
2590 }
d50171e4 2591
fa96cb38 2592 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2593
0bfdf32f 2594 current_thread = event_thread;
d50171e4 2595
fa96cb38
PA
2596 /* Check for thread exit. */
2597 if (! WIFSTOPPED (*wstatp))
2598 {
2599 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2600
2601 if (debug_threads)
2602 debug_printf ("LWP %d is the last lwp of process. "
2603 "Process %ld exiting.\n",
2604 pid_of (event_thread), lwpid_of (event_thread));
d86d4aaf 2605 return lwpid_of (event_thread);
611cb4a5 2606 }
0d62e5e8 2607
fa96cb38
PA
2608 return lwpid_of (event_thread);
2609}
2610
2611/* Wait for an event from child(ren) PTID. PTIDs can be:
2612 minus_one_ptid, to specify any child; a pid PTID, specifying all
2613 lwps of a thread group; or a PTID representing a single lwp. Store
2614 the stop status through the status pointer WSTAT. OPTIONS is
2615 passed to the waitpid call. Return 0 if no event was found and
2616 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2617 was found. Return the PID of the stopped child otherwise. */
2618
2619static int
2620linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2621{
2622 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2623}
2624
6bf5e0ba
PA
2625/* Count the LWP's that have had events. */
2626
2627static int
2628count_events_callback (struct inferior_list_entry *entry, void *data)
2629{
d86d4aaf 2630 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2631 struct lwp_info *lp = get_thread_lwp (thread);
9a3c8263 2632 int *count = (int *) data;
6bf5e0ba
PA
2633
2634 gdb_assert (count != NULL);
2635
582511be 2636 /* Count only resumed LWPs that have an event pending. */
8336d594 2637 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2638 && lp->status_pending_p)
6bf5e0ba
PA
2639 (*count)++;
2640
2641 return 0;
2642}
2643
2644/* Select the LWP (if any) that is currently being single-stepped. */
2645
2646static int
2647select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2648{
d86d4aaf
DE
2649 struct thread_info *thread = (struct thread_info *) entry;
2650 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba 2651
8336d594
PA
2652 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2653 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2654 && lp->status_pending_p)
2655 return 1;
2656 else
2657 return 0;
2658}
2659
b90fc188 2660/* Select the Nth LWP that has had an event. */
6bf5e0ba
PA
2661
2662static int
2663select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2664{
d86d4aaf 2665 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2666 struct lwp_info *lp = get_thread_lwp (thread);
9a3c8263 2667 int *selector = (int *) data;
6bf5e0ba
PA
2668
2669 gdb_assert (selector != NULL);
2670
582511be 2671 /* Select only resumed LWPs that have an event pending. */
91baf43f 2672 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2673 && lp->status_pending_p)
6bf5e0ba
PA
2674 if ((*selector)-- == 0)
2675 return 1;
2676
2677 return 0;
2678}
2679
6bf5e0ba
PA
2680/* Select one LWP out of those that have events pending. */
2681
2682static void
2683select_event_lwp (struct lwp_info **orig_lp)
2684{
2685 int num_events = 0;
2686 int random_selector;
582511be
PA
2687 struct thread_info *event_thread = NULL;
2688
2689 /* In all-stop, give preference to the LWP that is being
2690 single-stepped. There will be at most one, and it's the LWP that
2691 the core is most interested in. If we didn't do this, then we'd
2692 have to handle pending step SIGTRAPs somehow in case the core
2693 later continues the previously-stepped thread, otherwise we'd
2694 report the pending SIGTRAP, and the core, not having stepped the
2695 thread, wouldn't understand what the trap was for, and therefore
2696 would report it to the user as a random signal. */
2697 if (!non_stop)
6bf5e0ba 2698 {
582511be
PA
2699 event_thread
2700 = (struct thread_info *) find_inferior (&all_threads,
2701 select_singlestep_lwp_callback,
2702 NULL);
2703 if (event_thread != NULL)
2704 {
2705 if (debug_threads)
2706 debug_printf ("SEL: Select single-step %s\n",
2707 target_pid_to_str (ptid_of (event_thread)));
2708 }
6bf5e0ba 2709 }
582511be 2710 if (event_thread == NULL)
6bf5e0ba
PA
2711 {
2712 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2713 which have had events. */
6bf5e0ba 2714
b90fc188 2715 /* First see how many events we have. */
d86d4aaf 2716 find_inferior (&all_threads, count_events_callback, &num_events);
8bf3b159 2717 gdb_assert (num_events > 0);
6bf5e0ba 2718
b90fc188
PA
2719 /* Now randomly pick a LWP out of those that have had
2720 events. */
6bf5e0ba
PA
2721 random_selector = (int)
2722 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2723
2724 if (debug_threads && num_events > 1)
87ce2a04
DE
2725 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2726 num_events, random_selector);
6bf5e0ba 2727
d86d4aaf
DE
2728 event_thread
2729 = (struct thread_info *) find_inferior (&all_threads,
2730 select_event_lwp_callback,
2731 &random_selector);
6bf5e0ba
PA
2732 }
2733
d86d4aaf 2734 if (event_thread != NULL)
6bf5e0ba 2735 {
d86d4aaf
DE
2736 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2737
6bf5e0ba
PA
2738 /* Switch the event LWP. */
2739 *orig_lp = event_lp;
2740 }
2741}
2742
7984d532
PA
2743/* Decrement the suspend count of an LWP. */
2744
2745static int
2746unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2747{
d86d4aaf
DE
2748 struct thread_info *thread = (struct thread_info *) entry;
2749 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
2750
2751 /* Ignore EXCEPT. */
2752 if (lwp == except)
2753 return 0;
2754
863d01bd 2755 lwp_suspended_decr (lwp);
7984d532
PA
2756 return 0;
2757}
2758
2759/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2760 NULL. */
2761
2762static void
2763unsuspend_all_lwps (struct lwp_info *except)
2764{
d86d4aaf 2765 find_inferior (&all_threads, unsuspend_one_lwp, except);
7984d532
PA
2766}
2767
fa593d66
PA
2768static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2769static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2770 void *data);
2771static int lwp_running (struct inferior_list_entry *entry, void *data);
2772static ptid_t linux_wait_1 (ptid_t ptid,
2773 struct target_waitstatus *ourstatus,
2774 int target_options);
2775
2776/* Stabilize threads (move out of jump pads).
2777
2778 If a thread is midway collecting a fast tracepoint, we need to
2779 finish the collection and move it out of the jump pad before
2780 reporting the signal.
2781
2782 This avoids recursion while collecting (when a signal arrives
2783 midway, and the signal handler itself collects), which would trash
2784 the trace buffer. In case the user set a breakpoint in a signal
2785 handler, this avoids the backtrace showing the jump pad, etc..
2786 Most importantly, there are certain things we can't do safely if
2787 threads are stopped in a jump pad (or in its callee's). For
2788 example:
2789
2790 - starting a new trace run. A thread still collecting the
2791 previous run, could trash the trace buffer when resumed. The trace
2792 buffer control structures would have been reset but the thread had
2793 no way to tell. The thread could even midway memcpy'ing to the
2794 buffer, which would mean that when resumed, it would clobber the
2795 trace buffer that had been set for a new run.
2796
2797 - we can't rewrite/reuse the jump pads for new tracepoints
2798 safely. Say you do tstart while a thread is stopped midway while
2799 collecting. When the thread is later resumed, it finishes the
2800 collection, and returns to the jump pad, to execute the original
2801 instruction that was under the tracepoint jump at the time the
2802 older run had been started. If the jump pad had been rewritten
2803 since for something else in the new run, the thread would now
2804 execute the wrong / random instructions. */
2805
2806static void
2807linux_stabilize_threads (void)
2808{
0bfdf32f 2809 struct thread_info *saved_thread;
d86d4aaf 2810 struct thread_info *thread_stuck;
fa593d66 2811
d86d4aaf
DE
2812 thread_stuck
2813 = (struct thread_info *) find_inferior (&all_threads,
2814 stuck_in_jump_pad_callback,
2815 NULL);
2816 if (thread_stuck != NULL)
fa593d66 2817 {
b4d51a55 2818 if (debug_threads)
87ce2a04 2819 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2820 lwpid_of (thread_stuck));
fa593d66
PA
2821 return;
2822 }
2823
0bfdf32f 2824 saved_thread = current_thread;
fa593d66
PA
2825
2826 stabilizing_threads = 1;
2827
2828 /* Kick 'em all. */
d86d4aaf 2829 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
fa593d66
PA
2830
2831 /* Loop until all are stopped out of the jump pads. */
d86d4aaf 2832 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
fa593d66
PA
2833 {
2834 struct target_waitstatus ourstatus;
2835 struct lwp_info *lwp;
fa593d66
PA
2836 int wstat;
2837
2838 /* Note that we go through the full wait even loop. While
2839 moving threads out of jump pad, we need to be able to step
2840 over internal breakpoints and such. */
32fcada3 2841 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2842
2843 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2844 {
0bfdf32f 2845 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2846
2847 /* Lock it. */
863d01bd 2848 lwp_suspended_inc (lwp);
fa593d66 2849
a493e3e2 2850 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2851 || current_thread->last_resume_kind == resume_stop)
fa593d66 2852 {
2ea28649 2853 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2854 enqueue_one_deferred_signal (lwp, &wstat);
2855 }
2856 }
2857 }
2858
d86d4aaf 2859 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
fa593d66
PA
2860
2861 stabilizing_threads = 0;
2862
0bfdf32f 2863 current_thread = saved_thread;
fa593d66 2864
b4d51a55 2865 if (debug_threads)
fa593d66 2866 {
d86d4aaf
DE
2867 thread_stuck
2868 = (struct thread_info *) find_inferior (&all_threads,
2869 stuck_in_jump_pad_callback,
2870 NULL);
2871 if (thread_stuck != NULL)
87ce2a04 2872 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2873 lwpid_of (thread_stuck));
fa593d66
PA
2874 }
2875}
2876
582511be
PA
2877static void async_file_mark (void);
2878
2879/* Convenience function that is called when the kernel reports an
2880 event that is not passed out to GDB. */
2881
2882static ptid_t
2883ignore_event (struct target_waitstatus *ourstatus)
2884{
2885 /* If we got an event, there may still be others, as a single
2886 SIGCHLD can indicate more than one child stopped. This forces
2887 another target_wait call. */
2888 async_file_mark ();
2889
2890 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2891 return null_ptid;
2892}
2893
0d62e5e8 2894/* Wait for process, returns status. */
da6d8c04 2895
95954743
PA
2896static ptid_t
2897linux_wait_1 (ptid_t ptid,
2898 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2899{
e5f1222d 2900 int w;
fc7238bb 2901 struct lwp_info *event_child;
bd99dc85 2902 int options;
bd99dc85 2903 int pid;
6bf5e0ba
PA
2904 int step_over_finished;
2905 int bp_explains_trap;
2906 int maybe_internal_trap;
2907 int report_to_gdb;
219f2f23 2908 int trace_event;
c2d6af84 2909 int in_step_range;
bd99dc85 2910
87ce2a04
DE
2911 if (debug_threads)
2912 {
2913 debug_enter ();
2914 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2915 }
2916
bd99dc85
PA
2917 /* Translate generic target options into linux options. */
2918 options = __WALL;
2919 if (target_options & TARGET_WNOHANG)
2920 options |= WNOHANG;
0d62e5e8 2921
fa593d66
PA
2922 bp_explains_trap = 0;
2923 trace_event = 0;
c2d6af84 2924 in_step_range = 0;
bd99dc85
PA
2925 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2926
6bf5e0ba
PA
2927 if (ptid_equal (step_over_bkpt, null_ptid))
2928 pid = linux_wait_for_event (ptid, &w, options);
2929 else
2930 {
2931 if (debug_threads)
87ce2a04
DE
2932 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2933 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
2934 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2935 }
2936
fa96cb38 2937 if (pid == 0)
87ce2a04 2938 {
fa96cb38
PA
2939 gdb_assert (target_options & TARGET_WNOHANG);
2940
87ce2a04
DE
2941 if (debug_threads)
2942 {
fa96cb38
PA
2943 debug_printf ("linux_wait_1 ret = null_ptid, "
2944 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
2945 debug_exit ();
2946 }
fa96cb38
PA
2947
2948 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
2949 return null_ptid;
2950 }
fa96cb38
PA
2951 else if (pid == -1)
2952 {
2953 if (debug_threads)
2954 {
2955 debug_printf ("linux_wait_1 ret = null_ptid, "
2956 "TARGET_WAITKIND_NO_RESUMED\n");
2957 debug_exit ();
2958 }
bd99dc85 2959
fa96cb38
PA
2960 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2961 return null_ptid;
2962 }
0d62e5e8 2963
0bfdf32f 2964 event_child = get_thread_lwp (current_thread);
0d62e5e8 2965
fa96cb38
PA
2966 /* linux_wait_for_event only returns an exit status for the last
2967 child of a process. Report it. */
2968 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 2969 {
fa96cb38 2970 if (WIFEXITED (w))
0d62e5e8 2971 {
fa96cb38
PA
2972 ourstatus->kind = TARGET_WAITKIND_EXITED;
2973 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 2974
fa96cb38 2975 if (debug_threads)
bd99dc85 2976 {
fa96cb38
PA
2977 debug_printf ("linux_wait_1 ret = %s, exited with "
2978 "retcode %d\n",
0bfdf32f 2979 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2980 WEXITSTATUS (w));
2981 debug_exit ();
bd99dc85 2982 }
fa96cb38
PA
2983 }
2984 else
2985 {
2986 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2987 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 2988
fa96cb38
PA
2989 if (debug_threads)
2990 {
2991 debug_printf ("linux_wait_1 ret = %s, terminated with "
2992 "signal %d\n",
0bfdf32f 2993 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2994 WTERMSIG (w));
2995 debug_exit ();
2996 }
0d62e5e8 2997 }
fa96cb38 2998
0bfdf32f 2999 return ptid_of (current_thread);
da6d8c04
DJ
3000 }
3001
8090aef2
PA
3002 /* If step-over executes a breakpoint instruction, it means a
3003 gdb/gdbserver breakpoint had been planted on top of a permanent
3004 breakpoint. The PC has been adjusted by
3005 check_stopped_by_breakpoint to point at the breakpoint address.
3006 Advance the PC manually past the breakpoint, otherwise the
3007 program would keep trapping the permanent breakpoint forever. */
3008 if (!ptid_equal (step_over_bkpt, null_ptid)
15c66dd6 3009 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
8090aef2 3010 {
dd373349
AT
3011 int increment_pc = 0;
3012 int breakpoint_kind = 0;
3013 CORE_ADDR stop_pc = event_child->stop_pc;
3014
3015 breakpoint_kind = the_target->breakpoint_kind_from_pc (&stop_pc);
3016 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2
PA
3017
3018 if (debug_threads)
3019 {
3020 debug_printf ("step-over for %s executed software breakpoint\n",
3021 target_pid_to_str (ptid_of (current_thread)));
3022 }
3023
3024 if (increment_pc != 0)
3025 {
3026 struct regcache *regcache
3027 = get_thread_regcache (current_thread, 1);
3028
3029 event_child->stop_pc += increment_pc;
3030 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3031
3032 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 3033 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3034 }
3035 }
3036
6bf5e0ba
PA
3037 /* If this event was not handled before, and is not a SIGTRAP, we
3038 report it. SIGILL and SIGSEGV are also treated as traps in case
3039 a breakpoint is inserted at the current PC. If this target does
3040 not support internal breakpoints at all, we also report the
3041 SIGTRAP without further processing; it's of no concern to us. */
3042 maybe_internal_trap
3043 = (supports_breakpoints ()
3044 && (WSTOPSIG (w) == SIGTRAP
3045 || ((WSTOPSIG (w) == SIGILL
3046 || WSTOPSIG (w) == SIGSEGV)
3047 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3048
3049 if (maybe_internal_trap)
3050 {
3051 /* Handle anything that requires bookkeeping before deciding to
3052 report the event or continue waiting. */
3053
3054 /* First check if we can explain the SIGTRAP with an internal
3055 breakpoint, or if we should possibly report the event to GDB.
3056 Do this before anything that may remove or insert a
3057 breakpoint. */
3058 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3059
3060 /* We have a SIGTRAP, possibly a step-over dance has just
3061 finished. If so, tweak the state machine accordingly,
3062 reinsert breakpoints and delete any reinsert (software
3063 single-step) breakpoints. */
3064 step_over_finished = finish_step_over (event_child);
3065
3066 /* Now invoke the callbacks of any internal breakpoints there. */
3067 check_breakpoints (event_child->stop_pc);
3068
219f2f23
PA
3069 /* Handle tracepoint data collecting. This may overflow the
3070 trace buffer, and cause a tracing stop, removing
3071 breakpoints. */
3072 trace_event = handle_tracepoints (event_child);
3073
6bf5e0ba
PA
3074 if (bp_explains_trap)
3075 {
3076 /* If we stepped or ran into an internal breakpoint, we've
3077 already handled it. So next time we resume (from this
3078 PC), we should step over it. */
3079 if (debug_threads)
87ce2a04 3080 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3081
8b07ae33
PA
3082 if (breakpoint_here (event_child->stop_pc))
3083 event_child->need_step_over = 1;
6bf5e0ba
PA
3084 }
3085 }
3086 else
3087 {
3088 /* We have some other signal, possibly a step-over dance was in
3089 progress, and it should be cancelled too. */
3090 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3091 }
3092
3093 /* We have all the data we need. Either report the event to GDB, or
3094 resume threads and keep waiting for more. */
3095
3096 /* If we're collecting a fast tracepoint, finish the collection and
3097 move out of the jump pad before delivering a signal. See
3098 linux_stabilize_threads. */
3099
3100 if (WIFSTOPPED (w)
3101 && WSTOPSIG (w) != SIGTRAP
3102 && supports_fast_tracepoints ()
58b4daa5 3103 && agent_loaded_p ())
fa593d66
PA
3104 {
3105 if (debug_threads)
87ce2a04
DE
3106 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3107 "to defer or adjust it.\n",
0bfdf32f 3108 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3109
3110 /* Allow debugging the jump pad itself. */
0bfdf32f 3111 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3112 && maybe_move_out_of_jump_pad (event_child, &w))
3113 {
3114 enqueue_one_deferred_signal (event_child, &w);
3115
3116 if (debug_threads)
87ce2a04 3117 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 3118 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3119
3120 linux_resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
3121
3122 return ignore_event (ourstatus);
fa593d66
PA
3123 }
3124 }
219f2f23 3125
fa593d66
PA
3126 if (event_child->collecting_fast_tracepoint)
3127 {
3128 if (debug_threads)
87ce2a04
DE
3129 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3130 "Check if we're already there.\n",
0bfdf32f 3131 lwpid_of (current_thread),
87ce2a04 3132 event_child->collecting_fast_tracepoint);
fa593d66
PA
3133
3134 trace_event = 1;
3135
3136 event_child->collecting_fast_tracepoint
3137 = linux_fast_tracepoint_collecting (event_child, NULL);
3138
3139 if (event_child->collecting_fast_tracepoint != 1)
3140 {
3141 /* No longer need this breakpoint. */
3142 if (event_child->exit_jump_pad_bkpt != NULL)
3143 {
3144 if (debug_threads)
87ce2a04
DE
3145 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3146 "stopping all threads momentarily.\n");
fa593d66
PA
3147
3148 /* Other running threads could hit this breakpoint.
3149 We don't handle moribund locations like GDB does,
3150 instead we always pause all threads when removing
3151 breakpoints, so that any step-over or
3152 decr_pc_after_break adjustment is always taken
3153 care of while the breakpoint is still
3154 inserted. */
3155 stop_all_lwps (1, event_child);
fa593d66
PA
3156
3157 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3158 event_child->exit_jump_pad_bkpt = NULL;
3159
3160 unstop_all_lwps (1, event_child);
3161
3162 gdb_assert (event_child->suspended >= 0);
3163 }
3164 }
3165
3166 if (event_child->collecting_fast_tracepoint == 0)
3167 {
3168 if (debug_threads)
87ce2a04
DE
3169 debug_printf ("fast tracepoint finished "
3170 "collecting successfully.\n");
fa593d66
PA
3171
3172 /* We may have a deferred signal to report. */
3173 if (dequeue_one_deferred_signal (event_child, &w))
3174 {
3175 if (debug_threads)
87ce2a04 3176 debug_printf ("dequeued one signal.\n");
fa593d66 3177 }
3c11dd79 3178 else
fa593d66 3179 {
3c11dd79 3180 if (debug_threads)
87ce2a04 3181 debug_printf ("no deferred signals.\n");
fa593d66
PA
3182
3183 if (stabilizing_threads)
3184 {
3185 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3186 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3187
3188 if (debug_threads)
3189 {
3190 debug_printf ("linux_wait_1 ret = %s, stopped "
3191 "while stabilizing threads\n",
0bfdf32f 3192 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3193 debug_exit ();
3194 }
3195
0bfdf32f 3196 return ptid_of (current_thread);
fa593d66
PA
3197 }
3198 }
3199 }
6bf5e0ba
PA
3200 }
3201
e471f25b
PA
3202 /* Check whether GDB would be interested in this event. */
3203
3204 /* If GDB is not interested in this signal, don't stop other
3205 threads, and don't report it to GDB. Just resume the inferior
3206 right away. We do this for threading-related signals as well as
3207 any that GDB specifically requested we ignore. But never ignore
3208 SIGSTOP if we sent it ourselves, and do not ignore signals when
3209 stepping - they may require special handling to skip the signal
c9587f88
AT
3210 handler. Also never ignore signals that could be caused by a
3211 breakpoint. */
e471f25b
PA
3212 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3213 thread library? */
3214 if (WIFSTOPPED (w)
0bfdf32f 3215 && current_thread->last_resume_kind != resume_step
e471f25b 3216 && (
1a981360 3217#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3218 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3219 && (WSTOPSIG (w) == __SIGRTMIN
3220 || WSTOPSIG (w) == __SIGRTMIN + 1))
3221 ||
3222#endif
2ea28649 3223 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3224 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3225 && current_thread->last_resume_kind == resume_stop)
3226 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3227 {
3228 siginfo_t info, *info_p;
3229
3230 if (debug_threads)
87ce2a04 3231 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3232 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3233
0bfdf32f 3234 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3235 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3236 info_p = &info;
3237 else
3238 info_p = NULL;
863d01bd
PA
3239
3240 if (step_over_finished)
3241 {
3242 /* We cancelled this thread's step-over above. We still
3243 need to unsuspend all other LWPs, and set them back
3244 running again while the signal handler runs. */
3245 unsuspend_all_lwps (event_child);
3246
3247 /* Enqueue the pending signal info so that proceed_all_lwps
3248 doesn't lose it. */
3249 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3250
3251 proceed_all_lwps ();
3252 }
3253 else
3254 {
3255 linux_resume_one_lwp (event_child, event_child->stepping,
3256 WSTOPSIG (w), info_p);
3257 }
582511be 3258 return ignore_event (ourstatus);
e471f25b
PA
3259 }
3260
c2d6af84
PA
3261 /* Note that all addresses are always "out of the step range" when
3262 there's no range to begin with. */
3263 in_step_range = lwp_in_step_range (event_child);
3264
3265 /* If GDB wanted this thread to single step, and the thread is out
3266 of the step range, we always want to report the SIGTRAP, and let
3267 GDB handle it. Watchpoints should always be reported. So should
3268 signals we can't explain. A SIGTRAP we can't explain could be a
3269 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3270 do, we're be able to handle GDB breakpoints on top of internal
3271 breakpoints, by handling the internal breakpoint and still
3272 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3273 won't see the breakpoint hit. If we see a single-step event but
3274 the thread should be continuing, don't pass the trap to gdb.
3275 That indicates that we had previously finished a single-step but
3276 left the single-step pending -- see
3277 complete_ongoing_step_over. */
6bf5e0ba 3278 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3279 || (current_thread->last_resume_kind == resume_step
c2d6af84 3280 && !in_step_range)
15c66dd6 3281 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3282 || (!in_step_range
3283 && !bp_explains_trap
3284 && !trace_event
3285 && !step_over_finished
3286 && !(current_thread->last_resume_kind == resume_continue
3287 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3288 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3289 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3290 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
00db26fa 3291 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3292
3293 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3294
3295 /* We found no reason GDB would want us to stop. We either hit one
3296 of our own breakpoints, or finished an internal step GDB
3297 shouldn't know about. */
3298 if (!report_to_gdb)
3299 {
3300 if (debug_threads)
3301 {
3302 if (bp_explains_trap)
87ce2a04 3303 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3304 if (step_over_finished)
87ce2a04 3305 debug_printf ("Step-over finished.\n");
219f2f23 3306 if (trace_event)
87ce2a04 3307 debug_printf ("Tracepoint event.\n");
c2d6af84 3308 if (lwp_in_step_range (event_child))
87ce2a04
DE
3309 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3310 paddress (event_child->stop_pc),
3311 paddress (event_child->step_range_start),
3312 paddress (event_child->step_range_end));
6bf5e0ba
PA
3313 }
3314
3315 /* We're not reporting this breakpoint to GDB, so apply the
3316 decr_pc_after_break adjustment to the inferior's regcache
3317 ourselves. */
3318
3319 if (the_low_target.set_pc != NULL)
3320 {
3321 struct regcache *regcache
0bfdf32f 3322 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
3323 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3324 }
3325
7984d532
PA
3326 /* We may have finished stepping over a breakpoint. If so,
3327 we've stopped and suspended all LWPs momentarily except the
3328 stepping one. This is where we resume them all again. We're
3329 going to keep waiting, so use proceed, which handles stepping
3330 over the next breakpoint. */
6bf5e0ba 3331 if (debug_threads)
87ce2a04 3332 debug_printf ("proceeding all threads.\n");
7984d532
PA
3333
3334 if (step_over_finished)
3335 unsuspend_all_lwps (event_child);
3336
6bf5e0ba 3337 proceed_all_lwps ();
582511be 3338 return ignore_event (ourstatus);
6bf5e0ba
PA
3339 }
3340
3341 if (debug_threads)
3342 {
00db26fa 3343 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
ad071a30
PA
3344 {
3345 char *str;
3346
3347 str = target_waitstatus_to_string (&event_child->waitstatus);
3348 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3349 lwpid_of (get_lwp_thread (event_child)), str);
3350 xfree (str);
3351 }
0bfdf32f 3352 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3353 {
3354 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3355 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3356 else if (!lwp_in_step_range (event_child))
87ce2a04 3357 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3358 }
15c66dd6 3359 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3360 debug_printf ("Stopped by watchpoint.\n");
582511be 3361 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3362 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3363 if (debug_threads)
87ce2a04 3364 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3365 }
3366
3367 /* Alright, we're going to report a stop. */
3368
582511be 3369 if (!stabilizing_threads)
6bf5e0ba
PA
3370 {
3371 /* In all-stop, stop all threads. */
582511be
PA
3372 if (!non_stop)
3373 stop_all_lwps (0, NULL);
6bf5e0ba
PA
3374
3375 /* If we're not waiting for a specific LWP, choose an event LWP
3376 from among those that have had events. Giving equal priority
3377 to all LWPs that have had events helps prevent
3378 starvation. */
3379 if (ptid_equal (ptid, minus_one_ptid))
3380 {
3381 event_child->status_pending_p = 1;
3382 event_child->status_pending = w;
3383
3384 select_event_lwp (&event_child);
3385
0bfdf32f
GB
3386 /* current_thread and event_child must stay in sync. */
3387 current_thread = get_lwp_thread (event_child);
ee1e2d4f 3388
6bf5e0ba
PA
3389 event_child->status_pending_p = 0;
3390 w = event_child->status_pending;
3391 }
3392
c03e6ccc 3393 if (step_over_finished)
582511be
PA
3394 {
3395 if (!non_stop)
3396 {
3397 /* If we were doing a step-over, all other threads but
3398 the stepping one had been paused in start_step_over,
3399 with their suspend counts incremented. We don't want
3400 to do a full unstop/unpause, because we're in
3401 all-stop mode (so we want threads stopped), but we
3402 still need to unsuspend the other threads, to
3403 decrement their `suspended' count back. */
3404 unsuspend_all_lwps (event_child);
3405 }
3406 else
3407 {
3408 /* If we just finished a step-over, then all threads had
3409 been momentarily paused. In all-stop, that's fine,
3410 we want threads stopped by now anyway. In non-stop,
3411 we need to re-resume threads that GDB wanted to be
3412 running. */
3413 unstop_all_lwps (1, event_child);
3414 }
3415 }
c03e6ccc 3416
fa593d66 3417 /* Stabilize threads (move out of jump pads). */
582511be
PA
3418 if (!non_stop)
3419 stabilize_threads ();
6bf5e0ba
PA
3420 }
3421 else
3422 {
3423 /* If we just finished a step-over, then all threads had been
3424 momentarily paused. In all-stop, that's fine, we want
3425 threads stopped by now anyway. In non-stop, we need to
3426 re-resume threads that GDB wanted to be running. */
3427 if (step_over_finished)
7984d532 3428 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3429 }
3430
00db26fa 3431 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
de0d863e 3432 {
00db26fa
PA
3433 /* If the reported event is an exit, fork, vfork or exec, let
3434 GDB know. */
3435 *ourstatus = event_child->waitstatus;
de0d863e
DB
3436 /* Clear the event lwp's waitstatus since we handled it already. */
3437 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3438 }
3439 else
3440 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3441
582511be 3442 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3443 it was a software breakpoint, and the client doesn't know we can
3444 adjust the breakpoint ourselves. */
3445 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3446 && !swbreak_feature)
582511be
PA
3447 {
3448 int decr_pc = the_low_target.decr_pc_after_break;
3449
3450 if (decr_pc != 0)
3451 {
3452 struct regcache *regcache
3453 = get_thread_regcache (current_thread, 1);
3454 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3455 }
3456 }
3457
0bfdf32f 3458 if (current_thread->last_resume_kind == resume_stop
8336d594 3459 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3460 {
3461 /* A thread that has been requested to stop by GDB with vCont;t,
3462 and it stopped cleanly, so report as SIG0. The use of
3463 SIGSTOP is an implementation detail. */
a493e3e2 3464 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3465 }
0bfdf32f 3466 else if (current_thread->last_resume_kind == resume_stop
8336d594 3467 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3468 {
3469 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3470 but, it stopped for other reasons. */
2ea28649 3471 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3472 }
de0d863e 3473 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3474 {
2ea28649 3475 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3476 }
3477
d50171e4
PA
3478 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3479
bd99dc85 3480 if (debug_threads)
87ce2a04
DE
3481 {
3482 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 3483 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3484 ourstatus->kind, ourstatus->value.sig);
3485 debug_exit ();
3486 }
bd99dc85 3487
0bfdf32f 3488 return ptid_of (current_thread);
bd99dc85
PA
3489}
3490
3491/* Get rid of any pending event in the pipe. */
3492static void
3493async_file_flush (void)
3494{
3495 int ret;
3496 char buf;
3497
3498 do
3499 ret = read (linux_event_pipe[0], &buf, 1);
3500 while (ret >= 0 || (ret == -1 && errno == EINTR));
3501}
3502
3503/* Put something in the pipe, so the event loop wakes up. */
3504static void
3505async_file_mark (void)
3506{
3507 int ret;
3508
3509 async_file_flush ();
3510
3511 do
3512 ret = write (linux_event_pipe[1], "+", 1);
3513 while (ret == 0 || (ret == -1 && errno == EINTR));
3514
3515 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3516 be awakened anyway. */
3517}
3518
95954743
PA
3519static ptid_t
3520linux_wait (ptid_t ptid,
3521 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 3522{
95954743 3523 ptid_t event_ptid;
bd99dc85 3524
bd99dc85
PA
3525 /* Flush the async file first. */
3526 if (target_is_async_p ())
3527 async_file_flush ();
3528
582511be
PA
3529 do
3530 {
3531 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3532 }
3533 while ((target_options & TARGET_WNOHANG) == 0
3534 && ptid_equal (event_ptid, null_ptid)
3535 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3536
3537 /* If at least one stop was reported, there may be more. A single
3538 SIGCHLD can signal more than one child stop. */
3539 if (target_is_async_p ()
3540 && (target_options & TARGET_WNOHANG) != 0
95954743 3541 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
3542 async_file_mark ();
3543
3544 return event_ptid;
da6d8c04
DJ
3545}
3546
c5f62d5f 3547/* Send a signal to an LWP. */
fd500816
DJ
3548
3549static int
a1928bad 3550kill_lwp (unsigned long lwpid, int signo)
fd500816 3551{
c5f62d5f
DE
3552 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3553 fails, then we are not using nptl threads and we should be using kill. */
fd500816 3554
c5f62d5f
DE
3555#ifdef __NR_tkill
3556 {
3557 static int tkill_failed;
fd500816 3558
c5f62d5f
DE
3559 if (!tkill_failed)
3560 {
3561 int ret;
3562
3563 errno = 0;
3564 ret = syscall (__NR_tkill, lwpid, signo);
3565 if (errno != ENOSYS)
3566 return ret;
3567 tkill_failed = 1;
3568 }
3569 }
fd500816
DJ
3570#endif
3571
3572 return kill (lwpid, signo);
3573}
3574
964e4306
PA
3575void
3576linux_stop_lwp (struct lwp_info *lwp)
3577{
3578 send_sigstop (lwp);
3579}
3580
0d62e5e8 3581static void
02fc4de7 3582send_sigstop (struct lwp_info *lwp)
0d62e5e8 3583{
bd99dc85 3584 int pid;
0d62e5e8 3585
d86d4aaf 3586 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3587
0d62e5e8
DJ
3588 /* If we already have a pending stop signal for this process, don't
3589 send another. */
54a0b537 3590 if (lwp->stop_expected)
0d62e5e8 3591 {
ae13219e 3592 if (debug_threads)
87ce2a04 3593 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3594
0d62e5e8
DJ
3595 return;
3596 }
3597
3598 if (debug_threads)
87ce2a04 3599 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3600
d50171e4 3601 lwp->stop_expected = 1;
bd99dc85 3602 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3603}
3604
7984d532
PA
3605static int
3606send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7 3607{
d86d4aaf
DE
3608 struct thread_info *thread = (struct thread_info *) entry;
3609 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3610
7984d532
PA
3611 /* Ignore EXCEPT. */
3612 if (lwp == except)
3613 return 0;
3614
02fc4de7 3615 if (lwp->stopped)
7984d532 3616 return 0;
02fc4de7
PA
3617
3618 send_sigstop (lwp);
7984d532
PA
3619 return 0;
3620}
3621
3622/* Increment the suspend count of an LWP, and stop it, if not stopped
3623 yet. */
3624static int
3625suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3626 void *except)
3627{
d86d4aaf
DE
3628 struct thread_info *thread = (struct thread_info *) entry;
3629 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3630
3631 /* Ignore EXCEPT. */
3632 if (lwp == except)
3633 return 0;
3634
863d01bd 3635 lwp_suspended_inc (lwp);
7984d532
PA
3636
3637 return send_sigstop_callback (entry, except);
02fc4de7
PA
3638}
3639
95954743
PA
3640static void
3641mark_lwp_dead (struct lwp_info *lwp, int wstat)
3642{
95954743
PA
3643 /* Store the exit status for later. */
3644 lwp->status_pending_p = 1;
3645 lwp->status_pending = wstat;
3646
00db26fa
PA
3647 /* Store in waitstatus as well, as there's nothing else to process
3648 for this event. */
3649 if (WIFEXITED (wstat))
3650 {
3651 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3652 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3653 }
3654 else if (WIFSIGNALED (wstat))
3655 {
3656 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3657 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3658 }
3659
95954743
PA
3660 /* Prevent trying to stop it. */
3661 lwp->stopped = 1;
3662
3663 /* No further stops are expected from a dead lwp. */
3664 lwp->stop_expected = 0;
3665}
3666
00db26fa
PA
3667/* Return true if LWP has exited already, and has a pending exit event
3668 to report to GDB. */
3669
3670static int
3671lwp_is_marked_dead (struct lwp_info *lwp)
3672{
3673 return (lwp->status_pending_p
3674 && (WIFEXITED (lwp->status_pending)
3675 || WIFSIGNALED (lwp->status_pending)));
3676}
3677
fa96cb38
PA
3678/* Wait for all children to stop for the SIGSTOPs we just queued. */
3679
0d62e5e8 3680static void
fa96cb38 3681wait_for_sigstop (void)
0d62e5e8 3682{
0bfdf32f 3683 struct thread_info *saved_thread;
95954743 3684 ptid_t saved_tid;
fa96cb38
PA
3685 int wstat;
3686 int ret;
0d62e5e8 3687
0bfdf32f
GB
3688 saved_thread = current_thread;
3689 if (saved_thread != NULL)
3690 saved_tid = saved_thread->entry.id;
bd99dc85 3691 else
95954743 3692 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3693
d50171e4 3694 if (debug_threads)
fa96cb38 3695 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3696
fa96cb38
PA
3697 /* Passing NULL_PTID as filter indicates we want all events to be
3698 left pending. Eventually this returns when there are no
3699 unwaited-for children left. */
3700 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3701 &wstat, __WALL);
3702 gdb_assert (ret == -1);
0d62e5e8 3703
0bfdf32f
GB
3704 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3705 current_thread = saved_thread;
0d62e5e8
DJ
3706 else
3707 {
3708 if (debug_threads)
87ce2a04 3709 debug_printf ("Previously current thread died.\n");
0d62e5e8 3710
f0db101d
PA
3711 /* We can't change the current inferior behind GDB's back,
3712 otherwise, a subsequent command may apply to the wrong
3713 process. */
3714 current_thread = NULL;
0d62e5e8
DJ
3715 }
3716}
3717
fa593d66
PA
3718/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3719 move it out, because we need to report the stop event to GDB. For
3720 example, if the user puts a breakpoint in the jump pad, it's
3721 because she wants to debug it. */
3722
3723static int
3724stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3725{
d86d4aaf
DE
3726 struct thread_info *thread = (struct thread_info *) entry;
3727 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3728
863d01bd
PA
3729 if (lwp->suspended != 0)
3730 {
3731 internal_error (__FILE__, __LINE__,
3732 "LWP %ld is suspended, suspended=%d\n",
3733 lwpid_of (thread), lwp->suspended);
3734 }
fa593d66
PA
3735 gdb_assert (lwp->stopped);
3736
3737 /* Allow debugging the jump pad, gdb_collect, etc.. */
3738 return (supports_fast_tracepoints ()
58b4daa5 3739 && agent_loaded_p ()
fa593d66 3740 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3741 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3742 || thread->last_resume_kind == resume_step)
3743 && linux_fast_tracepoint_collecting (lwp, NULL));
3744}
3745
3746static void
3747move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3748{
d86d4aaf 3749 struct thread_info *thread = (struct thread_info *) entry;
f0ce0d3a 3750 struct thread_info *saved_thread;
d86d4aaf 3751 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3752 int *wstat;
3753
863d01bd
PA
3754 if (lwp->suspended != 0)
3755 {
3756 internal_error (__FILE__, __LINE__,
3757 "LWP %ld is suspended, suspended=%d\n",
3758 lwpid_of (thread), lwp->suspended);
3759 }
fa593d66
PA
3760 gdb_assert (lwp->stopped);
3761
f0ce0d3a
PA
3762 /* For gdb_breakpoint_here. */
3763 saved_thread = current_thread;
3764 current_thread = thread;
3765
fa593d66
PA
3766 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3767
3768 /* Allow debugging the jump pad, gdb_collect, etc. */
3769 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3770 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3771 && thread->last_resume_kind != resume_step
3772 && maybe_move_out_of_jump_pad (lwp, wstat))
3773 {
3774 if (debug_threads)
87ce2a04 3775 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3776 lwpid_of (thread));
fa593d66
PA
3777
3778 if (wstat)
3779 {
3780 lwp->status_pending_p = 0;
3781 enqueue_one_deferred_signal (lwp, wstat);
3782
3783 if (debug_threads)
87ce2a04
DE
3784 debug_printf ("Signal %d for LWP %ld deferred "
3785 "(in jump pad)\n",
d86d4aaf 3786 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3787 }
3788
3789 linux_resume_one_lwp (lwp, 0, 0, NULL);
3790 }
3791 else
863d01bd 3792 lwp_suspended_inc (lwp);
f0ce0d3a
PA
3793
3794 current_thread = saved_thread;
fa593d66
PA
3795}
3796
3797static int
3798lwp_running (struct inferior_list_entry *entry, void *data)
3799{
d86d4aaf
DE
3800 struct thread_info *thread = (struct thread_info *) entry;
3801 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3802
00db26fa 3803 if (lwp_is_marked_dead (lwp))
fa593d66
PA
3804 return 0;
3805 if (lwp->stopped)
3806 return 0;
3807 return 1;
3808}
3809
7984d532
PA
3810/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3811 If SUSPEND, then also increase the suspend count of every LWP,
3812 except EXCEPT. */
3813
0d62e5e8 3814static void
7984d532 3815stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 3816{
bde24c0a
PA
3817 /* Should not be called recursively. */
3818 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3819
87ce2a04
DE
3820 if (debug_threads)
3821 {
3822 debug_enter ();
3823 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3824 suspend ? "stop-and-suspend" : "stop",
3825 except != NULL
d86d4aaf 3826 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
3827 : "none");
3828 }
3829
bde24c0a
PA
3830 stopping_threads = (suspend
3831 ? STOPPING_AND_SUSPENDING_THREADS
3832 : STOPPING_THREADS);
7984d532
PA
3833
3834 if (suspend)
d86d4aaf 3835 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
7984d532 3836 else
d86d4aaf 3837 find_inferior (&all_threads, send_sigstop_callback, except);
fa96cb38 3838 wait_for_sigstop ();
bde24c0a 3839 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
3840
3841 if (debug_threads)
3842 {
3843 debug_printf ("stop_all_lwps done, setting stopping_threads "
3844 "back to !stopping\n");
3845 debug_exit ();
3846 }
0d62e5e8
DJ
3847}
3848
863d01bd
PA
3849/* Enqueue one signal in the chain of signals which need to be
3850 delivered to this process on next resume. */
3851
3852static void
3853enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3854{
8d749320 3855 struct pending_signals *p_sig = XNEW (struct pending_signals);
863d01bd 3856
863d01bd
PA
3857 p_sig->prev = lwp->pending_signals;
3858 p_sig->signal = signal;
3859 if (info == NULL)
3860 memset (&p_sig->info, 0, sizeof (siginfo_t));
3861 else
3862 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3863 lwp->pending_signals = p_sig;
3864}
3865
23f238d3
PA
3866/* Resume execution of LWP. If STEP is nonzero, single-step it. If
3867 SIGNAL is nonzero, give it that signal. */
da6d8c04 3868
ce3a066d 3869static void
23f238d3
PA
3870linux_resume_one_lwp_throw (struct lwp_info *lwp,
3871 int step, int signal, siginfo_t *info)
da6d8c04 3872{
d86d4aaf 3873 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3874 struct thread_info *saved_thread;
fa593d66 3875 int fast_tp_collecting;
c06cbd92
YQ
3876 struct process_info *proc = get_thread_process (thread);
3877
3878 /* Note that target description may not be initialised
3879 (proc->tdesc == NULL) at this point because the program hasn't
3880 stopped at the first instruction yet. It means GDBserver skips
3881 the extra traps from the wrapper program (see option --wrapper).
3882 Code in this function that requires register access should be
3883 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 3884
54a0b537 3885 if (lwp->stopped == 0)
0d62e5e8
DJ
3886 return;
3887
fa593d66
PA
3888 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3889
3890 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3891
219f2f23
PA
3892 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3893 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 3894 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
3895 {
3896 /* Collecting 'while-stepping' actions doesn't make sense
3897 anymore. */
d86d4aaf 3898 release_while_stepping_state_list (thread);
219f2f23
PA
3899 }
3900
0d62e5e8
DJ
3901 /* If we have pending signals or status, and a new signal, enqueue the
3902 signal. Also enqueue the signal if we are waiting to reinsert a
3903 breakpoint; it will be picked up again below. */
3904 if (signal != 0
fa593d66
PA
3905 && (lwp->status_pending_p
3906 || lwp->pending_signals != NULL
3907 || lwp->bp_reinsert != 0
3908 || fast_tp_collecting))
0d62e5e8 3909 {
8d749320
SM
3910 struct pending_signals *p_sig = XNEW (struct pending_signals);
3911
54a0b537 3912 p_sig->prev = lwp->pending_signals;
0d62e5e8 3913 p_sig->signal = signal;
32ca6d61
DJ
3914 if (info == NULL)
3915 memset (&p_sig->info, 0, sizeof (siginfo_t));
3916 else
3917 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3918 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3919 }
3920
d50171e4
PA
3921 if (lwp->status_pending_p)
3922 {
3923 if (debug_threads)
87ce2a04
DE
3924 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3925 " has pending status\n",
d86d4aaf 3926 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3927 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
3928 return;
3929 }
0d62e5e8 3930
0bfdf32f
GB
3931 saved_thread = current_thread;
3932 current_thread = thread;
0d62e5e8
DJ
3933
3934 if (debug_threads)
87ce2a04 3935 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
d86d4aaf 3936 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3937 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3938
3939 /* This bit needs some thinking about. If we get a signal that
3940 we must report while a single-step reinsert is still pending,
3941 we often end up resuming the thread. It might be better to
3942 (ew) allow a stack of pending events; then we could be sure that
3943 the reinsert happened right away and not lose any signals.
3944
3945 Making this stack would also shrink the window in which breakpoints are
54a0b537 3946 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3947 complete correctness, so it won't solve that problem. It may be
3948 worthwhile just to solve this one, however. */
54a0b537 3949 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3950 {
3951 if (debug_threads)
87ce2a04
DE
3952 debug_printf (" pending reinsert at 0x%s\n",
3953 paddress (lwp->bp_reinsert));
d50171e4 3954
85e00e85 3955 if (can_hardware_single_step ())
d50171e4 3956 {
fa593d66
PA
3957 if (fast_tp_collecting == 0)
3958 {
3959 if (step == 0)
3960 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3961 if (lwp->suspended)
3962 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3963 lwp->suspended);
3964 }
d50171e4
PA
3965
3966 step = 1;
3967 }
0d62e5e8
DJ
3968
3969 /* Postpone any pending signal. It was enqueued above. */
3970 signal = 0;
3971 }
3972
fa593d66
PA
3973 if (fast_tp_collecting == 1)
3974 {
3975 if (debug_threads)
87ce2a04
DE
3976 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3977 " (exit-jump-pad-bkpt)\n",
d86d4aaf 3978 lwpid_of (thread));
fa593d66
PA
3979
3980 /* Postpone any pending signal. It was enqueued above. */
3981 signal = 0;
3982 }
3983 else if (fast_tp_collecting == 2)
3984 {
3985 if (debug_threads)
87ce2a04
DE
3986 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3987 " single-stepping\n",
d86d4aaf 3988 lwpid_of (thread));
fa593d66
PA
3989
3990 if (can_hardware_single_step ())
3991 step = 1;
3992 else
38e08fca
GB
3993 {
3994 internal_error (__FILE__, __LINE__,
3995 "moving out of jump pad single-stepping"
3996 " not implemented on this target");
3997 }
fa593d66
PA
3998
3999 /* Postpone any pending signal. It was enqueued above. */
4000 signal = 0;
4001 }
4002
219f2f23
PA
4003 /* If we have while-stepping actions in this thread set it stepping.
4004 If we have a signal to deliver, it may or may not be set to
4005 SIG_IGN, we don't know. Assume so, and allow collecting
4006 while-stepping into a signal handler. A possible smart thing to
4007 do would be to set an internal breakpoint at the signal return
4008 address, continue, and carry on catching this while-stepping
4009 action only when that breakpoint is hit. A future
4010 enhancement. */
d86d4aaf 4011 if (thread->while_stepping != NULL
219f2f23
PA
4012 && can_hardware_single_step ())
4013 {
4014 if (debug_threads)
87ce2a04 4015 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 4016 lwpid_of (thread));
219f2f23
PA
4017 step = 1;
4018 }
4019
c06cbd92 4020 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
0d62e5e8 4021 {
0bfdf32f 4022 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be
PA
4023
4024 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4025
4026 if (debug_threads)
4027 {
4028 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4029 (long) lwp->stop_pc);
4030 }
0d62e5e8
DJ
4031 }
4032
fa593d66
PA
4033 /* If we have pending signals, consume one unless we are trying to
4034 reinsert a breakpoint or we're trying to finish a fast tracepoint
4035 collect. */
4036 if (lwp->pending_signals != NULL
4037 && lwp->bp_reinsert == 0
4038 && fast_tp_collecting == 0)
0d62e5e8
DJ
4039 {
4040 struct pending_signals **p_sig;
4041
54a0b537 4042 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
4043 while ((*p_sig)->prev != NULL)
4044 p_sig = &(*p_sig)->prev;
4045
4046 signal = (*p_sig)->signal;
32ca6d61 4047 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 4048 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4049 &(*p_sig)->info);
32ca6d61 4050
0d62e5e8
DJ
4051 free (*p_sig);
4052 *p_sig = NULL;
4053 }
4054
aa5ca48f
DE
4055 if (the_low_target.prepare_to_resume != NULL)
4056 the_low_target.prepare_to_resume (lwp);
4057
d86d4aaf 4058 regcache_invalidate_thread (thread);
da6d8c04 4059 errno = 0;
54a0b537 4060 lwp->stepping = step;
d86d4aaf 4061 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
b8e1b30e 4062 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4063 /* Coerce to a uintptr_t first to avoid potential gcc warning
4064 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4065 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4066
0bfdf32f 4067 current_thread = saved_thread;
da6d8c04 4068 if (errno)
23f238d3
PA
4069 perror_with_name ("resuming thread");
4070
4071 /* Successfully resumed. Clear state that no longer makes sense,
4072 and mark the LWP as running. Must not do this before resuming
4073 otherwise if that fails other code will be confused. E.g., we'd
4074 later try to stop the LWP and hang forever waiting for a stop
4075 status. Note that we must not throw after this is cleared,
4076 otherwise handle_zombie_lwp_error would get confused. */
4077 lwp->stopped = 0;
4078 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4079}
4080
4081/* Called when we try to resume a stopped LWP and that errors out. If
4082 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4083 or about to become), discard the error, clear any pending status
4084 the LWP may have, and return true (we'll collect the exit status
4085 soon enough). Otherwise, return false. */
4086
4087static int
4088check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4089{
4090 struct thread_info *thread = get_lwp_thread (lp);
4091
4092 /* If we get an error after resuming the LWP successfully, we'd
4093 confuse !T state for the LWP being gone. */
4094 gdb_assert (lp->stopped);
4095
4096 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4097 because even if ptrace failed with ESRCH, the tracee may be "not
4098 yet fully dead", but already refusing ptrace requests. In that
4099 case the tracee has 'R (Running)' state for a little bit
4100 (observed in Linux 3.18). See also the note on ESRCH in the
4101 ptrace(2) man page. Instead, check whether the LWP has any state
4102 other than ptrace-stopped. */
4103
4104 /* Don't assume anything if /proc/PID/status can't be read. */
4105 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4106 {
23f238d3
PA
4107 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4108 lp->status_pending_p = 0;
4109 return 1;
4110 }
4111 return 0;
4112}
4113
4114/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4115 disappears while we try to resume it. */
3221518c 4116
23f238d3
PA
4117static void
4118linux_resume_one_lwp (struct lwp_info *lwp,
4119 int step, int signal, siginfo_t *info)
4120{
4121 TRY
4122 {
4123 linux_resume_one_lwp_throw (lwp, step, signal, info);
4124 }
4125 CATCH (ex, RETURN_MASK_ERROR)
4126 {
4127 if (!check_ptrace_stopped_lwp_gone (lwp))
4128 throw_exception (ex);
3221518c 4129 }
23f238d3 4130 END_CATCH
da6d8c04
DJ
4131}
4132
2bd7c093
PA
4133struct thread_resume_array
4134{
4135 struct thread_resume *resume;
4136 size_t n;
4137};
64386c31 4138
ebcf782c
DE
4139/* This function is called once per thread via find_inferior.
4140 ARG is a pointer to a thread_resume_array struct.
4141 We look up the thread specified by ENTRY in ARG, and mark the thread
4142 with a pointer to the appropriate resume request.
5544ad89
DJ
4143
4144 This algorithm is O(threads * resume elements), but resume elements
4145 is small (and will remain small at least until GDB supports thread
4146 suspension). */
ebcf782c 4147
2bd7c093
PA
4148static int
4149linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 4150{
d86d4aaf
DE
4151 struct thread_info *thread = (struct thread_info *) entry;
4152 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4153 int ndx;
2bd7c093 4154 struct thread_resume_array *r;
64386c31 4155
9a3c8263 4156 r = (struct thread_resume_array *) arg;
64386c31 4157
2bd7c093 4158 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
4159 {
4160 ptid_t ptid = r->resume[ndx].thread;
4161 if (ptid_equal (ptid, minus_one_ptid)
4162 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
4163 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4164 of PID'. */
d86d4aaf 4165 || (ptid_get_pid (ptid) == pid_of (thread)
0c9070b3
YQ
4166 && (ptid_is_pid (ptid)
4167 || ptid_get_lwp (ptid) == -1)))
95954743 4168 {
d50171e4 4169 if (r->resume[ndx].kind == resume_stop
8336d594 4170 && thread->last_resume_kind == resume_stop)
d50171e4
PA
4171 {
4172 if (debug_threads)
87ce2a04
DE
4173 debug_printf ("already %s LWP %ld at GDB's request\n",
4174 (thread->last_status.kind
4175 == TARGET_WAITKIND_STOPPED)
4176 ? "stopped"
4177 : "stopping",
d86d4aaf 4178 lwpid_of (thread));
d50171e4
PA
4179
4180 continue;
4181 }
4182
95954743 4183 lwp->resume = &r->resume[ndx];
8336d594 4184 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4185
c2d6af84
PA
4186 lwp->step_range_start = lwp->resume->step_range_start;
4187 lwp->step_range_end = lwp->resume->step_range_end;
4188
fa593d66
PA
4189 /* If we had a deferred signal to report, dequeue one now.
4190 This can happen if LWP gets more than one signal while
4191 trying to get out of a jump pad. */
4192 if (lwp->stopped
4193 && !lwp->status_pending_p
4194 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4195 {
4196 lwp->status_pending_p = 1;
4197
4198 if (debug_threads)
87ce2a04
DE
4199 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4200 "leaving status pending.\n",
d86d4aaf
DE
4201 WSTOPSIG (lwp->status_pending),
4202 lwpid_of (thread));
fa593d66
PA
4203 }
4204
95954743
PA
4205 return 0;
4206 }
4207 }
2bd7c093
PA
4208
4209 /* No resume action for this thread. */
4210 lwp->resume = NULL;
64386c31 4211
2bd7c093 4212 return 0;
5544ad89
DJ
4213}
4214
20ad9378
DE
4215/* find_inferior callback for linux_resume.
4216 Set *FLAG_P if this lwp has an interesting status pending. */
5544ad89 4217
bd99dc85
PA
4218static int
4219resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 4220{
d86d4aaf
DE
4221 struct thread_info *thread = (struct thread_info *) entry;
4222 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4223
bd99dc85
PA
4224 /* LWPs which will not be resumed are not interesting, because
4225 we might not wait for them next time through linux_wait. */
2bd7c093 4226 if (lwp->resume == NULL)
bd99dc85 4227 return 0;
64386c31 4228
582511be 4229 if (thread_still_has_status_pending_p (thread))
d50171e4
PA
4230 * (int *) flag_p = 1;
4231
4232 return 0;
4233}
4234
4235/* Return 1 if this lwp that GDB wants running is stopped at an
4236 internal breakpoint that we need to step over. It assumes that any
4237 required STOP_PC adjustment has already been propagated to the
4238 inferior's regcache. */
4239
4240static int
4241need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4242{
d86d4aaf
DE
4243 struct thread_info *thread = (struct thread_info *) entry;
4244 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4245 struct thread_info *saved_thread;
d50171e4 4246 CORE_ADDR pc;
c06cbd92
YQ
4247 struct process_info *proc = get_thread_process (thread);
4248
4249 /* GDBserver is skipping the extra traps from the wrapper program,
4250 don't have to do step over. */
4251 if (proc->tdesc == NULL)
4252 return 0;
d50171e4
PA
4253
4254 /* LWPs which will not be resumed are not interesting, because we
4255 might not wait for them next time through linux_wait. */
4256
4257 if (!lwp->stopped)
4258 {
4259 if (debug_threads)
87ce2a04 4260 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4261 lwpid_of (thread));
d50171e4
PA
4262 return 0;
4263 }
4264
8336d594 4265 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4266 {
4267 if (debug_threads)
87ce2a04
DE
4268 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4269 " stopped\n",
d86d4aaf 4270 lwpid_of (thread));
d50171e4
PA
4271 return 0;
4272 }
4273
7984d532
PA
4274 gdb_assert (lwp->suspended >= 0);
4275
4276 if (lwp->suspended)
4277 {
4278 if (debug_threads)
87ce2a04 4279 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4280 lwpid_of (thread));
7984d532
PA
4281 return 0;
4282 }
4283
d50171e4
PA
4284 if (!lwp->need_step_over)
4285 {
4286 if (debug_threads)
d86d4aaf 4287 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
d50171e4 4288 }
5544ad89 4289
bd99dc85 4290 if (lwp->status_pending_p)
d50171e4
PA
4291 {
4292 if (debug_threads)
87ce2a04
DE
4293 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4294 " status.\n",
d86d4aaf 4295 lwpid_of (thread));
d50171e4
PA
4296 return 0;
4297 }
4298
4299 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4300 or we have. */
4301 pc = get_pc (lwp);
4302
4303 /* If the PC has changed since we stopped, then don't do anything,
4304 and let the breakpoint/tracepoint be hit. This happens if, for
4305 instance, GDB handled the decr_pc_after_break subtraction itself,
4306 GDB is OOL stepping this thread, or the user has issued a "jump"
4307 command, or poked thread's registers herself. */
4308 if (pc != lwp->stop_pc)
4309 {
4310 if (debug_threads)
87ce2a04
DE
4311 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4312 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4313 lwpid_of (thread),
4314 paddress (lwp->stop_pc), paddress (pc));
d50171e4
PA
4315
4316 lwp->need_step_over = 0;
4317 return 0;
4318 }
4319
0bfdf32f
GB
4320 saved_thread = current_thread;
4321 current_thread = thread;
d50171e4 4322
8b07ae33 4323 /* We can only step over breakpoints we know about. */
fa593d66 4324 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4325 {
8b07ae33 4326 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4327 though. If the condition is being evaluated on the target's side
4328 and it evaluate to false, step over this breakpoint as well. */
4329 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4330 && gdb_condition_true_at_breakpoint (pc)
4331 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4332 {
4333 if (debug_threads)
87ce2a04
DE
4334 debug_printf ("Need step over [LWP %ld]? yes, but found"
4335 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4336 lwpid_of (thread), paddress (pc));
d50171e4 4337
0bfdf32f 4338 current_thread = saved_thread;
8b07ae33
PA
4339 return 0;
4340 }
4341 else
4342 {
4343 if (debug_threads)
87ce2a04
DE
4344 debug_printf ("Need step over [LWP %ld]? yes, "
4345 "found breakpoint at 0x%s\n",
d86d4aaf 4346 lwpid_of (thread), paddress (pc));
d50171e4 4347
8b07ae33
PA
4348 /* We've found an lwp that needs stepping over --- return 1 so
4349 that find_inferior stops looking. */
0bfdf32f 4350 current_thread = saved_thread;
8b07ae33
PA
4351
4352 /* If the step over is cancelled, this is set again. */
4353 lwp->need_step_over = 0;
4354 return 1;
4355 }
d50171e4
PA
4356 }
4357
0bfdf32f 4358 current_thread = saved_thread;
d50171e4
PA
4359
4360 if (debug_threads)
87ce2a04
DE
4361 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4362 " at 0x%s\n",
d86d4aaf 4363 lwpid_of (thread), paddress (pc));
c6ecbae5 4364
bd99dc85 4365 return 0;
5544ad89
DJ
4366}
4367
d50171e4
PA
4368/* Start a step-over operation on LWP. When LWP stopped at a
4369 breakpoint, to make progress, we need to remove the breakpoint out
4370 of the way. If we let other threads run while we do that, they may
4371 pass by the breakpoint location and miss hitting it. To avoid
4372 that, a step-over momentarily stops all threads while LWP is
4373 single-stepped while the breakpoint is temporarily uninserted from
4374 the inferior. When the single-step finishes, we reinsert the
4375 breakpoint, and let all threads that are supposed to be running,
4376 run again.
4377
4378 On targets that don't support hardware single-step, we don't
4379 currently support full software single-stepping. Instead, we only
4380 support stepping over the thread event breakpoint, by asking the
4381 low target where to place a reinsert breakpoint. Since this
4382 routine assumes the breakpoint being stepped over is a thread event
4383 breakpoint, it usually assumes the return address of the current
4384 function is a good enough place to set the reinsert breakpoint. */
4385
4386static int
4387start_step_over (struct lwp_info *lwp)
4388{
d86d4aaf 4389 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4390 struct thread_info *saved_thread;
d50171e4
PA
4391 CORE_ADDR pc;
4392 int step;
4393
4394 if (debug_threads)
87ce2a04 4395 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4396 lwpid_of (thread));
d50171e4 4397
7984d532 4398 stop_all_lwps (1, lwp);
863d01bd
PA
4399
4400 if (lwp->suspended != 0)
4401 {
4402 internal_error (__FILE__, __LINE__,
4403 "LWP %ld suspended=%d\n", lwpid_of (thread),
4404 lwp->suspended);
4405 }
d50171e4
PA
4406
4407 if (debug_threads)
87ce2a04 4408 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4409
4410 /* Note, we should always reach here with an already adjusted PC,
4411 either by GDB (if we're resuming due to GDB's request), or by our
4412 caller, if we just finished handling an internal breakpoint GDB
4413 shouldn't care about. */
4414 pc = get_pc (lwp);
4415
0bfdf32f
GB
4416 saved_thread = current_thread;
4417 current_thread = thread;
d50171e4
PA
4418
4419 lwp->bp_reinsert = pc;
4420 uninsert_breakpoints_at (pc);
fa593d66 4421 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
4422
4423 if (can_hardware_single_step ())
4424 {
4425 step = 1;
4426 }
4427 else
4428 {
4429 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4430 set_reinsert_breakpoint (raddr);
4431 step = 0;
4432 }
4433
0bfdf32f 4434 current_thread = saved_thread;
d50171e4
PA
4435
4436 linux_resume_one_lwp (lwp, step, 0, NULL);
4437
4438 /* Require next event from this LWP. */
d86d4aaf 4439 step_over_bkpt = thread->entry.id;
d50171e4
PA
4440 return 1;
4441}
4442
4443/* Finish a step-over. Reinsert the breakpoint we had uninserted in
4444 start_step_over, if still there, and delete any reinsert
4445 breakpoints we've set, on non hardware single-step targets. */
4446
4447static int
4448finish_step_over (struct lwp_info *lwp)
4449{
4450 if (lwp->bp_reinsert != 0)
4451 {
4452 if (debug_threads)
87ce2a04 4453 debug_printf ("Finished step over.\n");
d50171e4
PA
4454
4455 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4456 may be no breakpoint to reinsert there by now. */
4457 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4458 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4459
4460 lwp->bp_reinsert = 0;
4461
4462 /* Delete any software-single-step reinsert breakpoints. No
4463 longer needed. We don't have to worry about other threads
4464 hitting this trap, and later not being able to explain it,
4465 because we were stepping over a breakpoint, and we hold all
4466 threads but LWP stopped while doing that. */
4467 if (!can_hardware_single_step ())
4468 delete_reinsert_breakpoints ();
4469
4470 step_over_bkpt = null_ptid;
4471 return 1;
4472 }
4473 else
4474 return 0;
4475}
4476
863d01bd
PA
4477/* If there's a step over in progress, wait until all threads stop
4478 (that is, until the stepping thread finishes its step), and
4479 unsuspend all lwps. The stepping thread ends with its status
4480 pending, which is processed later when we get back to processing
4481 events. */
4482
4483static void
4484complete_ongoing_step_over (void)
4485{
4486 if (!ptid_equal (step_over_bkpt, null_ptid))
4487 {
4488 struct lwp_info *lwp;
4489 int wstat;
4490 int ret;
4491
4492 if (debug_threads)
4493 debug_printf ("detach: step over in progress, finish it first\n");
4494
4495 /* Passing NULL_PTID as filter indicates we want all events to
4496 be left pending. Eventually this returns when there are no
4497 unwaited-for children left. */
4498 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4499 &wstat, __WALL);
4500 gdb_assert (ret == -1);
4501
4502 lwp = find_lwp_pid (step_over_bkpt);
4503 if (lwp != NULL)
4504 finish_step_over (lwp);
4505 step_over_bkpt = null_ptid;
4506 unsuspend_all_lwps (lwp);
4507 }
4508}
4509
5544ad89
DJ
4510/* This function is called once per thread. We check the thread's resume
4511 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 4512 stopped; and what signal, if any, it should be sent.
5544ad89 4513
bd99dc85
PA
4514 For threads which we aren't explicitly told otherwise, we preserve
4515 the stepping flag; this is used for stepping over gdbserver-placed
4516 breakpoints.
4517
4518 If pending_flags was set in any thread, we queue any needed
4519 signals, since we won't actually resume. We already have a pending
4520 event to report, so we don't need to preserve any step requests;
4521 they should be re-issued if necessary. */
4522
4523static int
4524linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 4525{
d86d4aaf
DE
4526 struct thread_info *thread = (struct thread_info *) entry;
4527 struct lwp_info *lwp = get_thread_lwp (thread);
bd99dc85 4528 int step;
d50171e4
PA
4529 int leave_all_stopped = * (int *) arg;
4530 int leave_pending;
5544ad89 4531
2bd7c093 4532 if (lwp->resume == NULL)
bd99dc85 4533 return 0;
5544ad89 4534
bd99dc85 4535 if (lwp->resume->kind == resume_stop)
5544ad89 4536 {
bd99dc85 4537 if (debug_threads)
d86d4aaf 4538 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4539
4540 if (!lwp->stopped)
4541 {
4542 if (debug_threads)
d86d4aaf 4543 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4544
d50171e4
PA
4545 /* Stop the thread, and wait for the event asynchronously,
4546 through the event loop. */
02fc4de7 4547 send_sigstop (lwp);
bd99dc85
PA
4548 }
4549 else
4550 {
4551 if (debug_threads)
87ce2a04 4552 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4553 lwpid_of (thread));
d50171e4
PA
4554
4555 /* The LWP may have been stopped in an internal event that
4556 was not meant to be notified back to GDB (e.g., gdbserver
4557 breakpoint), so we should be reporting a stop event in
4558 this case too. */
4559
4560 /* If the thread already has a pending SIGSTOP, this is a
4561 no-op. Otherwise, something later will presumably resume
4562 the thread and this will cause it to cancel any pending
4563 operation, due to last_resume_kind == resume_stop. If
4564 the thread already has a pending status to report, we
4565 will still report it the next time we wait - see
4566 status_pending_p_callback. */
1a981360
PA
4567
4568 /* If we already have a pending signal to report, then
4569 there's no need to queue a SIGSTOP, as this means we're
4570 midway through moving the LWP out of the jumppad, and we
4571 will report the pending signal as soon as that is
4572 finished. */
4573 if (lwp->pending_signals_to_report == NULL)
4574 send_sigstop (lwp);
bd99dc85 4575 }
32ca6d61 4576
bd99dc85
PA
4577 /* For stop requests, we're done. */
4578 lwp->resume = NULL;
fc7238bb 4579 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4580 return 0;
5544ad89
DJ
4581 }
4582
bd99dc85 4583 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4584 then don't resume it - we can just report the pending status.
4585 Likewise if it is suspended, because e.g., another thread is
4586 stepping past a breakpoint. Make sure to queue any signals that
4587 would otherwise be sent. In all-stop mode, we do this decision
4588 based on if *any* thread has a pending status. If there's a
4589 thread that needs the step-over-breakpoint dance, then don't
4590 resume any other thread but that particular one. */
4591 leave_pending = (lwp->suspended
4592 || lwp->status_pending_p
4593 || leave_all_stopped);
5544ad89 4594
d50171e4 4595 if (!leave_pending)
bd99dc85
PA
4596 {
4597 if (debug_threads)
d86d4aaf 4598 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4599
d50171e4 4600 step = (lwp->resume->kind == resume_step);
2acc282a 4601 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
4602 }
4603 else
4604 {
4605 if (debug_threads)
d86d4aaf 4606 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5544ad89 4607
bd99dc85
PA
4608 /* If we have a new signal, enqueue the signal. */
4609 if (lwp->resume->sig != 0)
4610 {
8d749320
SM
4611 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4612
bd99dc85
PA
4613 p_sig->prev = lwp->pending_signals;
4614 p_sig->signal = lwp->resume->sig;
bd99dc85
PA
4615
4616 /* If this is the same signal we were previously stopped by,
4617 make sure to queue its siginfo. We can ignore the return
4618 value of ptrace; if it fails, we'll skip
4619 PTRACE_SETSIGINFO. */
4620 if (WIFSTOPPED (lwp->last_status)
4621 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
d86d4aaf 4622 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4623 &p_sig->info);
bd99dc85
PA
4624
4625 lwp->pending_signals = p_sig;
4626 }
4627 }
5544ad89 4628
fc7238bb 4629 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4630 lwp->resume = NULL;
5544ad89 4631 return 0;
0d62e5e8
DJ
4632}
4633
4634static void
2bd7c093 4635linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 4636{
2bd7c093 4637 struct thread_resume_array array = { resume_info, n };
d86d4aaf 4638 struct thread_info *need_step_over = NULL;
d50171e4
PA
4639 int any_pending;
4640 int leave_all_stopped;
c6ecbae5 4641
87ce2a04
DE
4642 if (debug_threads)
4643 {
4644 debug_enter ();
4645 debug_printf ("linux_resume:\n");
4646 }
4647
2bd7c093 4648 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 4649
d50171e4
PA
4650 /* If there is a thread which would otherwise be resumed, which has
4651 a pending status, then don't resume any threads - we can just
4652 report the pending status. Make sure to queue any signals that
4653 would otherwise be sent. In non-stop mode, we'll apply this
4654 logic to each thread individually. We consume all pending events
4655 before considering to start a step-over (in all-stop). */
4656 any_pending = 0;
bd99dc85 4657 if (!non_stop)
d86d4aaf 4658 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
d50171e4
PA
4659
4660 /* If there is a thread which would otherwise be resumed, which is
4661 stopped at a breakpoint that needs stepping over, then don't
4662 resume any threads - have it step over the breakpoint with all
4663 other threads stopped, then resume all threads again. Make sure
4664 to queue any signals that would otherwise be delivered or
4665 queued. */
4666 if (!any_pending && supports_breakpoints ())
4667 need_step_over
d86d4aaf
DE
4668 = (struct thread_info *) find_inferior (&all_threads,
4669 need_step_over_p, NULL);
d50171e4
PA
4670
4671 leave_all_stopped = (need_step_over != NULL || any_pending);
4672
4673 if (debug_threads)
4674 {
4675 if (need_step_over != NULL)
87ce2a04 4676 debug_printf ("Not resuming all, need step over\n");
d50171e4 4677 else if (any_pending)
87ce2a04
DE
4678 debug_printf ("Not resuming, all-stop and found "
4679 "an LWP with pending status\n");
d50171e4 4680 else
87ce2a04 4681 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4682 }
4683
4684 /* Even if we're leaving threads stopped, queue all signals we'd
4685 otherwise deliver. */
4686 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4687
4688 if (need_step_over)
d86d4aaf 4689 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4690
4691 if (debug_threads)
4692 {
4693 debug_printf ("linux_resume done\n");
4694 debug_exit ();
4695 }
d50171e4
PA
4696}
4697
4698/* This function is called once per thread. We check the thread's
4699 last resume request, which will tell us whether to resume, step, or
4700 leave the thread stopped. Any signal the client requested to be
4701 delivered has already been enqueued at this point.
4702
4703 If any thread that GDB wants running is stopped at an internal
4704 breakpoint that needs stepping over, we start a step-over operation
4705 on that particular thread, and leave all others stopped. */
4706
7984d532
PA
4707static int
4708proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 4709{
d86d4aaf
DE
4710 struct thread_info *thread = (struct thread_info *) entry;
4711 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4712 int step;
4713
7984d532
PA
4714 if (lwp == except)
4715 return 0;
d50171e4
PA
4716
4717 if (debug_threads)
d86d4aaf 4718 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4719
4720 if (!lwp->stopped)
4721 {
4722 if (debug_threads)
d86d4aaf 4723 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
7984d532 4724 return 0;
d50171e4
PA
4725 }
4726
02fc4de7
PA
4727 if (thread->last_resume_kind == resume_stop
4728 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4729 {
4730 if (debug_threads)
87ce2a04 4731 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4732 lwpid_of (thread));
7984d532 4733 return 0;
d50171e4
PA
4734 }
4735
4736 if (lwp->status_pending_p)
4737 {
4738 if (debug_threads)
87ce2a04 4739 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4740 lwpid_of (thread));
7984d532 4741 return 0;
d50171e4
PA
4742 }
4743
7984d532
PA
4744 gdb_assert (lwp->suspended >= 0);
4745
d50171e4
PA
4746 if (lwp->suspended)
4747 {
4748 if (debug_threads)
d86d4aaf 4749 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
7984d532 4750 return 0;
d50171e4
PA
4751 }
4752
1a981360
PA
4753 if (thread->last_resume_kind == resume_stop
4754 && lwp->pending_signals_to_report == NULL
4755 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
4756 {
4757 /* We haven't reported this LWP as stopped yet (otherwise, the
4758 last_status.kind check above would catch it, and we wouldn't
4759 reach here. This LWP may have been momentarily paused by a
4760 stop_all_lwps call while handling for example, another LWP's
4761 step-over. In that case, the pending expected SIGSTOP signal
4762 that was queued at vCont;t handling time will have already
4763 been consumed by wait_for_sigstop, and so we need to requeue
4764 another one here. Note that if the LWP already has a SIGSTOP
4765 pending, this is a no-op. */
4766
4767 if (debug_threads)
87ce2a04
DE
4768 debug_printf ("Client wants LWP %ld to stop. "
4769 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4770 lwpid_of (thread));
02fc4de7
PA
4771
4772 send_sigstop (lwp);
4773 }
4774
863d01bd
PA
4775 if (thread->last_resume_kind == resume_step)
4776 {
4777 if (debug_threads)
4778 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4779 lwpid_of (thread));
4780 step = 1;
4781 }
4782 else if (lwp->bp_reinsert != 0)
4783 {
4784 if (debug_threads)
4785 debug_printf (" stepping LWP %ld, reinsert set\n",
4786 lwpid_of (thread));
4787 step = 1;
4788 }
4789 else
4790 step = 0;
4791
d50171e4 4792 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4793 return 0;
4794}
4795
4796static int
4797unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4798{
d86d4aaf
DE
4799 struct thread_info *thread = (struct thread_info *) entry;
4800 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4801
4802 if (lwp == except)
4803 return 0;
4804
863d01bd 4805 lwp_suspended_decr (lwp);
7984d532
PA
4806
4807 return proceed_one_lwp (entry, except);
d50171e4
PA
4808}
4809
4810/* When we finish a step-over, set threads running again. If there's
4811 another thread that may need a step-over, now's the time to start
4812 it. Eventually, we'll move all threads past their breakpoints. */
4813
4814static void
4815proceed_all_lwps (void)
4816{
d86d4aaf 4817 struct thread_info *need_step_over;
d50171e4
PA
4818
4819 /* If there is a thread which would otherwise be resumed, which is
4820 stopped at a breakpoint that needs stepping over, then don't
4821 resume any threads - have it step over the breakpoint with all
4822 other threads stopped, then resume all threads again. */
4823
4824 if (supports_breakpoints ())
4825 {
4826 need_step_over
d86d4aaf
DE
4827 = (struct thread_info *) find_inferior (&all_threads,
4828 need_step_over_p, NULL);
d50171e4
PA
4829
4830 if (need_step_over != NULL)
4831 {
4832 if (debug_threads)
87ce2a04
DE
4833 debug_printf ("proceed_all_lwps: found "
4834 "thread %ld needing a step-over\n",
4835 lwpid_of (need_step_over));
d50171e4 4836
d86d4aaf 4837 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4838 return;
4839 }
4840 }
5544ad89 4841
d50171e4 4842 if (debug_threads)
87ce2a04 4843 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 4844
d86d4aaf 4845 find_inferior (&all_threads, proceed_one_lwp, NULL);
d50171e4
PA
4846}
4847
4848/* Stopped LWPs that the client wanted to be running, that don't have
4849 pending statuses, are set to run again, except for EXCEPT, if not
4850 NULL. This undoes a stop_all_lwps call. */
4851
4852static void
7984d532 4853unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 4854{
5544ad89
DJ
4855 if (debug_threads)
4856 {
87ce2a04 4857 debug_enter ();
d50171e4 4858 if (except)
87ce2a04 4859 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 4860 lwpid_of (get_lwp_thread (except)));
5544ad89 4861 else
87ce2a04 4862 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
4863 }
4864
7984d532 4865 if (unsuspend)
d86d4aaf 4866 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
7984d532 4867 else
d86d4aaf 4868 find_inferior (&all_threads, proceed_one_lwp, except);
87ce2a04
DE
4869
4870 if (debug_threads)
4871 {
4872 debug_printf ("unstop_all_lwps done\n");
4873 debug_exit ();
4874 }
0d62e5e8
DJ
4875}
4876
58caa3dc
DJ
4877
4878#ifdef HAVE_LINUX_REGSETS
4879
1faeff08
MR
4880#define use_linux_regsets 1
4881
030031ee
PA
4882/* Returns true if REGSET has been disabled. */
4883
4884static int
4885regset_disabled (struct regsets_info *info, struct regset_info *regset)
4886{
4887 return (info->disabled_regsets != NULL
4888 && info->disabled_regsets[regset - info->regsets]);
4889}
4890
4891/* Disable REGSET. */
4892
4893static void
4894disable_regset (struct regsets_info *info, struct regset_info *regset)
4895{
4896 int dr_offset;
4897
4898 dr_offset = regset - info->regsets;
4899 if (info->disabled_regsets == NULL)
224c3ddb 4900 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
4901 info->disabled_regsets[dr_offset] = 1;
4902}
4903
58caa3dc 4904static int
3aee8918
PA
4905regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4906 struct regcache *regcache)
58caa3dc
DJ
4907{
4908 struct regset_info *regset;
e9d25b98 4909 int saw_general_regs = 0;
95954743 4910 int pid;
1570b33e 4911 struct iovec iov;
58caa3dc 4912
0bfdf32f 4913 pid = lwpid_of (current_thread);
28eef672 4914 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4915 {
1570b33e
L
4916 void *buf, *data;
4917 int nt_type, res;
58caa3dc 4918
030031ee 4919 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 4920 continue;
58caa3dc 4921
bca929d3 4922 buf = xmalloc (regset->size);
1570b33e
L
4923
4924 nt_type = regset->nt_type;
4925 if (nt_type)
4926 {
4927 iov.iov_base = buf;
4928 iov.iov_len = regset->size;
4929 data = (void *) &iov;
4930 }
4931 else
4932 data = buf;
4933
dfb64f85 4934#ifndef __sparc__
f15f9948 4935 res = ptrace (regset->get_request, pid,
b8e1b30e 4936 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4937#else
1570b33e 4938 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4939#endif
58caa3dc
DJ
4940 if (res < 0)
4941 {
4942 if (errno == EIO)
4943 {
52fa2412 4944 /* If we get EIO on a regset, do not try it again for
3aee8918 4945 this process mode. */
030031ee 4946 disable_regset (regsets_info, regset);
58caa3dc 4947 }
e5a9158d
AA
4948 else if (errno == ENODATA)
4949 {
4950 /* ENODATA may be returned if the regset is currently
4951 not "active". This can happen in normal operation,
4952 so suppress the warning in this case. */
4953 }
58caa3dc
DJ
4954 else
4955 {
0d62e5e8 4956 char s[256];
95954743
PA
4957 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4958 pid);
0d62e5e8 4959 perror (s);
58caa3dc
DJ
4960 }
4961 }
098dbe61
AA
4962 else
4963 {
4964 if (regset->type == GENERAL_REGS)
4965 saw_general_regs = 1;
4966 regset->store_function (regcache, buf);
4967 }
fdeb2a12 4968 free (buf);
58caa3dc 4969 }
e9d25b98
DJ
4970 if (saw_general_regs)
4971 return 0;
4972 else
4973 return 1;
58caa3dc
DJ
4974}
4975
4976static int
3aee8918
PA
4977regsets_store_inferior_registers (struct regsets_info *regsets_info,
4978 struct regcache *regcache)
58caa3dc
DJ
4979{
4980 struct regset_info *regset;
e9d25b98 4981 int saw_general_regs = 0;
95954743 4982 int pid;
1570b33e 4983 struct iovec iov;
58caa3dc 4984
0bfdf32f 4985 pid = lwpid_of (current_thread);
28eef672 4986 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4987 {
1570b33e
L
4988 void *buf, *data;
4989 int nt_type, res;
58caa3dc 4990
feea5f36
AA
4991 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4992 || regset->fill_function == NULL)
28eef672 4993 continue;
58caa3dc 4994
bca929d3 4995 buf = xmalloc (regset->size);
545587ee
DJ
4996
4997 /* First fill the buffer with the current register set contents,
4998 in case there are any items in the kernel's regset that are
4999 not in gdbserver's regcache. */
1570b33e
L
5000
5001 nt_type = regset->nt_type;
5002 if (nt_type)
5003 {
5004 iov.iov_base = buf;
5005 iov.iov_len = regset->size;
5006 data = (void *) &iov;
5007 }
5008 else
5009 data = buf;
5010
dfb64f85 5011#ifndef __sparc__
f15f9948 5012 res = ptrace (regset->get_request, pid,
b8e1b30e 5013 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5014#else
689cc2ae 5015 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5016#endif
545587ee
DJ
5017
5018 if (res == 0)
5019 {
5020 /* Then overlay our cached registers on that. */
442ea881 5021 regset->fill_function (regcache, buf);
545587ee
DJ
5022
5023 /* Only now do we write the register set. */
dfb64f85 5024#ifndef __sparc__
f15f9948 5025 res = ptrace (regset->set_request, pid,
b8e1b30e 5026 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5027#else
1570b33e 5028 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5029#endif
545587ee
DJ
5030 }
5031
58caa3dc
DJ
5032 if (res < 0)
5033 {
5034 if (errno == EIO)
5035 {
52fa2412 5036 /* If we get EIO on a regset, do not try it again for
3aee8918 5037 this process mode. */
030031ee 5038 disable_regset (regsets_info, regset);
58caa3dc 5039 }
3221518c
UW
5040 else if (errno == ESRCH)
5041 {
1b3f6016
PA
5042 /* At this point, ESRCH should mean the process is
5043 already gone, in which case we simply ignore attempts
5044 to change its registers. See also the related
5045 comment in linux_resume_one_lwp. */
fdeb2a12 5046 free (buf);
3221518c
UW
5047 return 0;
5048 }
58caa3dc
DJ
5049 else
5050 {
ce3a066d 5051 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5052 }
5053 }
e9d25b98
DJ
5054 else if (regset->type == GENERAL_REGS)
5055 saw_general_regs = 1;
09ec9b38 5056 free (buf);
58caa3dc 5057 }
e9d25b98
DJ
5058 if (saw_general_regs)
5059 return 0;
5060 else
5061 return 1;
58caa3dc
DJ
5062}
5063
1faeff08 5064#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5065
1faeff08 5066#define use_linux_regsets 0
3aee8918
PA
5067#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5068#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5069
58caa3dc 5070#endif
1faeff08
MR
5071
5072/* Return 1 if register REGNO is supported by one of the regset ptrace
5073 calls or 0 if it has to be transferred individually. */
5074
5075static int
3aee8918 5076linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5077{
5078 unsigned char mask = 1 << (regno % 8);
5079 size_t index = regno / 8;
5080
5081 return (use_linux_regsets
3aee8918
PA
5082 && (regs_info->regset_bitmap == NULL
5083 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5084}
5085
58caa3dc 5086#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
5087
5088int
3aee8918 5089register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5090{
5091 int addr;
5092
3aee8918 5093 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5094 error ("Invalid register number %d.", regnum);
5095
3aee8918 5096 addr = usrregs->regmap[regnum];
1faeff08
MR
5097
5098 return addr;
5099}
5100
5101/* Fetch one register. */
5102static void
3aee8918
PA
5103fetch_register (const struct usrregs_info *usrregs,
5104 struct regcache *regcache, int regno)
1faeff08
MR
5105{
5106 CORE_ADDR regaddr;
5107 int i, size;
5108 char *buf;
5109 int pid;
5110
3aee8918 5111 if (regno >= usrregs->num_regs)
1faeff08
MR
5112 return;
5113 if ((*the_low_target.cannot_fetch_register) (regno))
5114 return;
5115
3aee8918 5116 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5117 if (regaddr == -1)
5118 return;
5119
3aee8918
PA
5120 size = ((register_size (regcache->tdesc, regno)
5121 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5122 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5123 buf = (char *) alloca (size);
1faeff08 5124
0bfdf32f 5125 pid = lwpid_of (current_thread);
1faeff08
MR
5126 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5127 {
5128 errno = 0;
5129 *(PTRACE_XFER_TYPE *) (buf + i) =
5130 ptrace (PTRACE_PEEKUSER, pid,
5131 /* Coerce to a uintptr_t first to avoid potential gcc warning
5132 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5133 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5134 regaddr += sizeof (PTRACE_XFER_TYPE);
5135 if (errno != 0)
5136 error ("reading register %d: %s", regno, strerror (errno));
5137 }
5138
5139 if (the_low_target.supply_ptrace_register)
5140 the_low_target.supply_ptrace_register (regcache, regno, buf);
5141 else
5142 supply_register (regcache, regno, buf);
5143}
5144
5145/* Store one register. */
5146static void
3aee8918
PA
5147store_register (const struct usrregs_info *usrregs,
5148 struct regcache *regcache, int regno)
1faeff08
MR
5149{
5150 CORE_ADDR regaddr;
5151 int i, size;
5152 char *buf;
5153 int pid;
5154
3aee8918 5155 if (regno >= usrregs->num_regs)
1faeff08
MR
5156 return;
5157 if ((*the_low_target.cannot_store_register) (regno))
5158 return;
5159
3aee8918 5160 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5161 if (regaddr == -1)
5162 return;
5163
3aee8918
PA
5164 size = ((register_size (regcache->tdesc, regno)
5165 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5166 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5167 buf = (char *) alloca (size);
1faeff08
MR
5168 memset (buf, 0, size);
5169
5170 if (the_low_target.collect_ptrace_register)
5171 the_low_target.collect_ptrace_register (regcache, regno, buf);
5172 else
5173 collect_register (regcache, regno, buf);
5174
0bfdf32f 5175 pid = lwpid_of (current_thread);
1faeff08
MR
5176 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5177 {
5178 errno = 0;
5179 ptrace (PTRACE_POKEUSER, pid,
5180 /* Coerce to a uintptr_t first to avoid potential gcc warning
5181 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5182 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5183 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5184 if (errno != 0)
5185 {
5186 /* At this point, ESRCH should mean the process is
5187 already gone, in which case we simply ignore attempts
5188 to change its registers. See also the related
5189 comment in linux_resume_one_lwp. */
5190 if (errno == ESRCH)
5191 return;
5192
5193 if ((*the_low_target.cannot_store_register) (regno) == 0)
5194 error ("writing register %d: %s", regno, strerror (errno));
5195 }
5196 regaddr += sizeof (PTRACE_XFER_TYPE);
5197 }
5198}
5199
5200/* Fetch all registers, or just one, from the child process.
5201 If REGNO is -1, do this for all registers, skipping any that are
5202 assumed to have been retrieved by regsets_fetch_inferior_registers,
5203 unless ALL is non-zero.
5204 Otherwise, REGNO specifies which register (so we can save time). */
5205static void
3aee8918
PA
5206usr_fetch_inferior_registers (const struct regs_info *regs_info,
5207 struct regcache *regcache, int regno, int all)
1faeff08 5208{
3aee8918
PA
5209 struct usrregs_info *usr = regs_info->usrregs;
5210
1faeff08
MR
5211 if (regno == -1)
5212 {
3aee8918
PA
5213 for (regno = 0; regno < usr->num_regs; regno++)
5214 if (all || !linux_register_in_regsets (regs_info, regno))
5215 fetch_register (usr, regcache, regno);
1faeff08
MR
5216 }
5217 else
3aee8918 5218 fetch_register (usr, regcache, regno);
1faeff08
MR
5219}
5220
5221/* Store our register values back into the inferior.
5222 If REGNO is -1, do this for all registers, skipping any that are
5223 assumed to have been saved by regsets_store_inferior_registers,
5224 unless ALL is non-zero.
5225 Otherwise, REGNO specifies which register (so we can save time). */
5226static void
3aee8918
PA
5227usr_store_inferior_registers (const struct regs_info *regs_info,
5228 struct regcache *regcache, int regno, int all)
1faeff08 5229{
3aee8918
PA
5230 struct usrregs_info *usr = regs_info->usrregs;
5231
1faeff08
MR
5232 if (regno == -1)
5233 {
3aee8918
PA
5234 for (regno = 0; regno < usr->num_regs; regno++)
5235 if (all || !linux_register_in_regsets (regs_info, regno))
5236 store_register (usr, regcache, regno);
1faeff08
MR
5237 }
5238 else
3aee8918 5239 store_register (usr, regcache, regno);
1faeff08
MR
5240}
5241
5242#else /* !HAVE_LINUX_USRREGS */
5243
3aee8918
PA
5244#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5245#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 5246
58caa3dc 5247#endif
1faeff08
MR
5248
5249
5250void
5251linux_fetch_registers (struct regcache *regcache, int regno)
5252{
5253 int use_regsets;
5254 int all = 0;
3aee8918 5255 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5256
5257 if (regno == -1)
5258 {
3aee8918
PA
5259 if (the_low_target.fetch_register != NULL
5260 && regs_info->usrregs != NULL)
5261 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
5262 (*the_low_target.fetch_register) (regcache, regno);
5263
3aee8918
PA
5264 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5265 if (regs_info->usrregs != NULL)
5266 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5267 }
5268 else
5269 {
c14dfd32
PA
5270 if (the_low_target.fetch_register != NULL
5271 && (*the_low_target.fetch_register) (regcache, regno))
5272 return;
5273
3aee8918 5274 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5275 if (use_regsets)
3aee8918
PA
5276 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5277 regcache);
5278 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5279 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5280 }
58caa3dc
DJ
5281}
5282
5283void
442ea881 5284linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 5285{
1faeff08
MR
5286 int use_regsets;
5287 int all = 0;
3aee8918 5288 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5289
5290 if (regno == -1)
5291 {
3aee8918
PA
5292 all = regsets_store_inferior_registers (regs_info->regsets_info,
5293 regcache);
5294 if (regs_info->usrregs != NULL)
5295 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5296 }
5297 else
5298 {
3aee8918 5299 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5300 if (use_regsets)
3aee8918
PA
5301 all = regsets_store_inferior_registers (regs_info->regsets_info,
5302 regcache);
5303 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5304 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5305 }
58caa3dc
DJ
5306}
5307
da6d8c04 5308
da6d8c04
DJ
5309/* Copy LEN bytes from inferior's memory starting at MEMADDR
5310 to debugger memory starting at MYADDR. */
5311
c3e735a6 5312static int
f450004a 5313linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 5314{
0bfdf32f 5315 int pid = lwpid_of (current_thread);
4934b29e
MR
5316 register PTRACE_XFER_TYPE *buffer;
5317 register CORE_ADDR addr;
5318 register int count;
5319 char filename[64];
da6d8c04 5320 register int i;
4934b29e 5321 int ret;
fd462a61 5322 int fd;
fd462a61
DJ
5323
5324 /* Try using /proc. Don't bother for one word. */
5325 if (len >= 3 * sizeof (long))
5326 {
4934b29e
MR
5327 int bytes;
5328
fd462a61
DJ
5329 /* We could keep this file open and cache it - possibly one per
5330 thread. That requires some juggling, but is even faster. */
95954743 5331 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5332 fd = open (filename, O_RDONLY | O_LARGEFILE);
5333 if (fd == -1)
5334 goto no_proc;
5335
5336 /* If pread64 is available, use it. It's faster if the kernel
5337 supports it (only one syscall), and it's 64-bit safe even on
5338 32-bit platforms (for instance, SPARC debugging a SPARC64
5339 application). */
5340#ifdef HAVE_PREAD64
4934b29e 5341 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5342#else
4934b29e
MR
5343 bytes = -1;
5344 if (lseek (fd, memaddr, SEEK_SET) != -1)
5345 bytes = read (fd, myaddr, len);
fd462a61 5346#endif
fd462a61
DJ
5347
5348 close (fd);
4934b29e
MR
5349 if (bytes == len)
5350 return 0;
5351
5352 /* Some data was read, we'll try to get the rest with ptrace. */
5353 if (bytes > 0)
5354 {
5355 memaddr += bytes;
5356 myaddr += bytes;
5357 len -= bytes;
5358 }
fd462a61 5359 }
da6d8c04 5360
fd462a61 5361 no_proc:
4934b29e
MR
5362 /* Round starting address down to longword boundary. */
5363 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5364 /* Round ending address up; get number of longwords that makes. */
5365 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5366 / sizeof (PTRACE_XFER_TYPE));
5367 /* Allocate buffer of that many longwords. */
8d749320 5368 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5369
da6d8c04 5370 /* Read all the longwords */
4934b29e 5371 errno = 0;
da6d8c04
DJ
5372 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5373 {
14ce3065
DE
5374 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5375 about coercing an 8 byte integer to a 4 byte pointer. */
5376 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5377 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5378 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5379 if (errno)
4934b29e 5380 break;
da6d8c04 5381 }
4934b29e 5382 ret = errno;
da6d8c04
DJ
5383
5384 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5385 if (i > 0)
5386 {
5387 i *= sizeof (PTRACE_XFER_TYPE);
5388 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5389 memcpy (myaddr,
5390 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5391 i < len ? i : len);
5392 }
c3e735a6 5393
4934b29e 5394 return ret;
da6d8c04
DJ
5395}
5396
93ae6fdc
PA
5397/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5398 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5399 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5400
ce3a066d 5401static int
f450004a 5402linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
5403{
5404 register int i;
5405 /* Round starting address down to longword boundary. */
5406 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5407 /* Round ending address up; get number of longwords that makes. */
5408 register int count
493e2a69
MS
5409 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5410 / sizeof (PTRACE_XFER_TYPE);
5411
da6d8c04 5412 /* Allocate buffer of that many longwords. */
8d749320 5413 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5414
0bfdf32f 5415 int pid = lwpid_of (current_thread);
da6d8c04 5416
f0ae6fc3
PA
5417 if (len == 0)
5418 {
5419 /* Zero length write always succeeds. */
5420 return 0;
5421 }
5422
0d62e5e8
DJ
5423 if (debug_threads)
5424 {
58d6951d 5425 /* Dump up to four bytes. */
bf47e248
PA
5426 char str[4 * 2 + 1];
5427 char *p = str;
5428 int dump = len < 4 ? len : 4;
5429
5430 for (i = 0; i < dump; i++)
5431 {
5432 sprintf (p, "%02x", myaddr[i]);
5433 p += 2;
5434 }
5435 *p = '\0';
5436
5437 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5438 str, (long) memaddr, pid);
0d62e5e8
DJ
5439 }
5440
da6d8c04
DJ
5441 /* Fill start and end extra bytes of buffer with existing memory data. */
5442
93ae6fdc 5443 errno = 0;
14ce3065
DE
5444 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5445 about coercing an 8 byte integer to a 4 byte pointer. */
5446 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5447 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5448 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5449 if (errno)
5450 return errno;
da6d8c04
DJ
5451
5452 if (count > 1)
5453 {
93ae6fdc 5454 errno = 0;
da6d8c04 5455 buffer[count - 1]
95954743 5456 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5457 /* Coerce to a uintptr_t first to avoid potential gcc warning
5458 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5459 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5460 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5461 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5462 if (errno)
5463 return errno;
da6d8c04
DJ
5464 }
5465
93ae6fdc 5466 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5467
493e2a69
MS
5468 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5469 myaddr, len);
da6d8c04
DJ
5470
5471 /* Write the entire buffer. */
5472
5473 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5474 {
5475 errno = 0;
14ce3065
DE
5476 ptrace (PTRACE_POKETEXT, pid,
5477 /* Coerce to a uintptr_t first to avoid potential gcc warning
5478 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5479 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5480 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5481 if (errno)
5482 return errno;
5483 }
5484
5485 return 0;
5486}
2f2893d9
DJ
5487
5488static void
5489linux_look_up_symbols (void)
5490{
0d62e5e8 5491#ifdef USE_THREAD_DB
95954743
PA
5492 struct process_info *proc = current_process ();
5493
fe978cb0 5494 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5495 return;
5496
96d7229d
LM
5497 /* If the kernel supports tracing clones, then we don't need to
5498 use the magic thread event breakpoint to learn about
5499 threads. */
5500 thread_db_init (!linux_supports_traceclone ());
0d62e5e8
DJ
5501#endif
5502}
5503
e5379b03 5504static void
ef57601b 5505linux_request_interrupt (void)
e5379b03 5506{
a1928bad 5507 extern unsigned long signal_pid;
e5379b03 5508
78708b7c
PA
5509 /* Send a SIGINT to the process group. This acts just like the user
5510 typed a ^C on the controlling terminal. */
5511 kill (-signal_pid, SIGINT);
e5379b03
DJ
5512}
5513
aa691b87
RM
5514/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5515 to debugger memory starting at MYADDR. */
5516
5517static int
f450004a 5518linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
5519{
5520 char filename[PATH_MAX];
5521 int fd, n;
0bfdf32f 5522 int pid = lwpid_of (current_thread);
aa691b87 5523
6cebaf6e 5524 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5525
5526 fd = open (filename, O_RDONLY);
5527 if (fd < 0)
5528 return -1;
5529
5530 if (offset != (CORE_ADDR) 0
5531 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5532 n = -1;
5533 else
5534 n = read (fd, myaddr, len);
5535
5536 close (fd);
5537
5538 return n;
5539}
5540
d993e290
PA
5541/* These breakpoint and watchpoint related wrapper functions simply
5542 pass on the function call if the target has registered a
5543 corresponding function. */
e013ee27
OF
5544
5545static int
802e8e6d
PA
5546linux_supports_z_point_type (char z_type)
5547{
5548 return (the_low_target.supports_z_point_type != NULL
5549 && the_low_target.supports_z_point_type (z_type));
5550}
5551
5552static int
5553linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5554 int size, struct raw_breakpoint *bp)
e013ee27 5555{
c8f4bfdd
YQ
5556 if (type == raw_bkpt_type_sw)
5557 return insert_memory_breakpoint (bp);
5558 else if (the_low_target.insert_point != NULL)
802e8e6d 5559 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5560 else
5561 /* Unsupported (see target.h). */
5562 return 1;
5563}
5564
5565static int
802e8e6d
PA
5566linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5567 int size, struct raw_breakpoint *bp)
e013ee27 5568{
c8f4bfdd
YQ
5569 if (type == raw_bkpt_type_sw)
5570 return remove_memory_breakpoint (bp);
5571 else if (the_low_target.remove_point != NULL)
802e8e6d 5572 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
5573 else
5574 /* Unsupported (see target.h). */
5575 return 1;
5576}
5577
3e572f71
PA
5578/* Implement the to_stopped_by_sw_breakpoint target_ops
5579 method. */
5580
5581static int
5582linux_stopped_by_sw_breakpoint (void)
5583{
5584 struct lwp_info *lwp = get_thread_lwp (current_thread);
5585
5586 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5587}
5588
5589/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5590 method. */
5591
5592static int
5593linux_supports_stopped_by_sw_breakpoint (void)
5594{
5595 return USE_SIGTRAP_SIGINFO;
5596}
5597
5598/* Implement the to_stopped_by_hw_breakpoint target_ops
5599 method. */
5600
5601static int
5602linux_stopped_by_hw_breakpoint (void)
5603{
5604 struct lwp_info *lwp = get_thread_lwp (current_thread);
5605
5606 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5607}
5608
5609/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5610 method. */
5611
5612static int
5613linux_supports_stopped_by_hw_breakpoint (void)
5614{
5615 return USE_SIGTRAP_SIGINFO;
5616}
5617
70b90b91 5618/* Implement the supports_hardware_single_step target_ops method. */
45614f15
YQ
5619
5620static int
70b90b91 5621linux_supports_hardware_single_step (void)
45614f15 5622{
45614f15
YQ
5623 return can_hardware_single_step ();
5624}
5625
e013ee27
OF
5626static int
5627linux_stopped_by_watchpoint (void)
5628{
0bfdf32f 5629 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5630
15c66dd6 5631 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5632}
5633
5634static CORE_ADDR
5635linux_stopped_data_address (void)
5636{
0bfdf32f 5637 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5638
5639 return lwp->stopped_data_address;
e013ee27
OF
5640}
5641
db0dfaa0
LM
5642#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5643 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5644 && defined(PT_TEXT_END_ADDR)
5645
5646/* This is only used for targets that define PT_TEXT_ADDR,
5647 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5648 the target has different ways of acquiring this information, like
5649 loadmaps. */
52fb6437
NS
5650
5651/* Under uClinux, programs are loaded at non-zero offsets, which we need
5652 to tell gdb about. */
5653
5654static int
5655linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5656{
52fb6437 5657 unsigned long text, text_end, data;
62828379 5658 int pid = lwpid_of (current_thread);
52fb6437
NS
5659
5660 errno = 0;
5661
b8e1b30e
LM
5662 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5663 (PTRACE_TYPE_ARG4) 0);
5664 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5665 (PTRACE_TYPE_ARG4) 0);
5666 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5667 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5668
5669 if (errno == 0)
5670 {
5671 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5672 used by gdb) are relative to the beginning of the program,
5673 with the data segment immediately following the text segment.
5674 However, the actual runtime layout in memory may put the data
5675 somewhere else, so when we send gdb a data base-address, we
5676 use the real data base address and subtract the compile-time
5677 data base-address from it (which is just the length of the
5678 text segment). BSS immediately follows data in both
5679 cases. */
52fb6437
NS
5680 *text_p = text;
5681 *data_p = data - (text_end - text);
1b3f6016 5682
52fb6437
NS
5683 return 1;
5684 }
52fb6437
NS
5685 return 0;
5686}
5687#endif
5688
07e059b5
VP
5689static int
5690linux_qxfer_osdata (const char *annex,
1b3f6016
PA
5691 unsigned char *readbuf, unsigned const char *writebuf,
5692 CORE_ADDR offset, int len)
07e059b5 5693{
d26e3629 5694 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5695}
5696
d0722149
DE
5697/* Convert a native/host siginfo object, into/from the siginfo in the
5698 layout of the inferiors' architecture. */
5699
5700static void
a5362b9a 5701siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
5702{
5703 int done = 0;
5704
5705 if (the_low_target.siginfo_fixup != NULL)
5706 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5707
5708 /* If there was no callback, or the callback didn't do anything,
5709 then just do a straight memcpy. */
5710 if (!done)
5711 {
5712 if (direction == 1)
a5362b9a 5713 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5714 else
a5362b9a 5715 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5716 }
5717}
5718
4aa995e1
PA
5719static int
5720linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5721 unsigned const char *writebuf, CORE_ADDR offset, int len)
5722{
d0722149 5723 int pid;
a5362b9a
TS
5724 siginfo_t siginfo;
5725 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5726
0bfdf32f 5727 if (current_thread == NULL)
4aa995e1
PA
5728 return -1;
5729
0bfdf32f 5730 pid = lwpid_of (current_thread);
4aa995e1
PA
5731
5732 if (debug_threads)
87ce2a04
DE
5733 debug_printf ("%s siginfo for lwp %d.\n",
5734 readbuf != NULL ? "Reading" : "Writing",
5735 pid);
4aa995e1 5736
0adea5f7 5737 if (offset >= sizeof (siginfo))
4aa995e1
PA
5738 return -1;
5739
b8e1b30e 5740 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5741 return -1;
5742
d0722149
DE
5743 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5744 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5745 inferior with a 64-bit GDBSERVER should look the same as debugging it
5746 with a 32-bit GDBSERVER, we need to convert it. */
5747 siginfo_fixup (&siginfo, inf_siginfo, 0);
5748
4aa995e1
PA
5749 if (offset + len > sizeof (siginfo))
5750 len = sizeof (siginfo) - offset;
5751
5752 if (readbuf != NULL)
d0722149 5753 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5754 else
5755 {
d0722149
DE
5756 memcpy (inf_siginfo + offset, writebuf, len);
5757
5758 /* Convert back to ptrace layout before flushing it out. */
5759 siginfo_fixup (&siginfo, inf_siginfo, 1);
5760
b8e1b30e 5761 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5762 return -1;
5763 }
5764
5765 return len;
5766}
5767
bd99dc85
PA
5768/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5769 so we notice when children change state; as the handler for the
5770 sigsuspend in my_waitpid. */
5771
5772static void
5773sigchld_handler (int signo)
5774{
5775 int old_errno = errno;
5776
5777 if (debug_threads)
e581f2b4
PA
5778 {
5779 do
5780 {
5781 /* fprintf is not async-signal-safe, so call write
5782 directly. */
5783 if (write (2, "sigchld_handler\n",
5784 sizeof ("sigchld_handler\n") - 1) < 0)
5785 break; /* just ignore */
5786 } while (0);
5787 }
bd99dc85
PA
5788
5789 if (target_is_async_p ())
5790 async_file_mark (); /* trigger a linux_wait */
5791
5792 errno = old_errno;
5793}
5794
5795static int
5796linux_supports_non_stop (void)
5797{
5798 return 1;
5799}
5800
5801static int
5802linux_async (int enable)
5803{
7089dca4 5804 int previous = target_is_async_p ();
bd99dc85 5805
8336d594 5806 if (debug_threads)
87ce2a04
DE
5807 debug_printf ("linux_async (%d), previous=%d\n",
5808 enable, previous);
8336d594 5809
bd99dc85
PA
5810 if (previous != enable)
5811 {
5812 sigset_t mask;
5813 sigemptyset (&mask);
5814 sigaddset (&mask, SIGCHLD);
5815
5816 sigprocmask (SIG_BLOCK, &mask, NULL);
5817
5818 if (enable)
5819 {
5820 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
5821 {
5822 linux_event_pipe[0] = -1;
5823 linux_event_pipe[1] = -1;
5824 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5825
5826 warning ("creating event pipe failed.");
5827 return previous;
5828 }
bd99dc85
PA
5829
5830 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5831 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5832
5833 /* Register the event loop handler. */
5834 add_file_handler (linux_event_pipe[0],
5835 handle_target_event, NULL);
5836
5837 /* Always trigger a linux_wait. */
5838 async_file_mark ();
5839 }
5840 else
5841 {
5842 delete_file_handler (linux_event_pipe[0]);
5843
5844 close (linux_event_pipe[0]);
5845 close (linux_event_pipe[1]);
5846 linux_event_pipe[0] = -1;
5847 linux_event_pipe[1] = -1;
5848 }
5849
5850 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5851 }
5852
5853 return previous;
5854}
5855
5856static int
5857linux_start_non_stop (int nonstop)
5858{
5859 /* Register or unregister from event-loop accordingly. */
5860 linux_async (nonstop);
aa96c426
GB
5861
5862 if (target_is_async_p () != (nonstop != 0))
5863 return -1;
5864
bd99dc85
PA
5865 return 0;
5866}
5867
cf8fd78b
PA
5868static int
5869linux_supports_multi_process (void)
5870{
5871 return 1;
5872}
5873
89245bc0
DB
5874/* Check if fork events are supported. */
5875
5876static int
5877linux_supports_fork_events (void)
5878{
5879 return linux_supports_tracefork ();
5880}
5881
5882/* Check if vfork events are supported. */
5883
5884static int
5885linux_supports_vfork_events (void)
5886{
5887 return linux_supports_tracefork ();
5888}
5889
94585166
DB
5890/* Check if exec events are supported. */
5891
5892static int
5893linux_supports_exec_events (void)
5894{
5895 return linux_supports_traceexec ();
5896}
5897
de0d863e
DB
5898/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5899 options for the specified lwp. */
5900
5901static int
5902reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5903 void *args)
5904{
5905 struct thread_info *thread = (struct thread_info *) entry;
5906 struct lwp_info *lwp = get_thread_lwp (thread);
5907
5908 if (!lwp->stopped)
5909 {
5910 /* Stop the lwp so we can modify its ptrace options. */
5911 lwp->must_set_ptrace_flags = 1;
5912 linux_stop_lwp (lwp);
5913 }
5914 else
5915 {
5916 /* Already stopped; go ahead and set the ptrace options. */
5917 struct process_info *proc = find_process_pid (pid_of (thread));
5918 int options = linux_low_ptrace_options (proc->attached);
5919
5920 linux_enable_event_reporting (lwpid_of (thread), options);
5921 lwp->must_set_ptrace_flags = 0;
5922 }
5923
5924 return 0;
5925}
5926
5927/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5928 ptrace flags for all inferiors. This is in case the new GDB connection
5929 doesn't support the same set of events that the previous one did. */
5930
5931static void
5932linux_handle_new_gdb_connection (void)
5933{
5934 pid_t pid;
5935
5936 /* Request that all the lwps reset their ptrace options. */
5937 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5938}
5939
03583c20
UW
5940static int
5941linux_supports_disable_randomization (void)
5942{
5943#ifdef HAVE_PERSONALITY
5944 return 1;
5945#else
5946 return 0;
5947#endif
5948}
efcbbd14 5949
d1feda86
YQ
5950static int
5951linux_supports_agent (void)
5952{
5953 return 1;
5954}
5955
c2d6af84
PA
5956static int
5957linux_supports_range_stepping (void)
5958{
5959 if (*the_low_target.supports_range_stepping == NULL)
5960 return 0;
5961
5962 return (*the_low_target.supports_range_stepping) ();
5963}
5964
efcbbd14
UW
5965/* Enumerate spufs IDs for process PID. */
5966static int
5967spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5968{
5969 int pos = 0;
5970 int written = 0;
5971 char path[128];
5972 DIR *dir;
5973 struct dirent *entry;
5974
5975 sprintf (path, "/proc/%ld/fd", pid);
5976 dir = opendir (path);
5977 if (!dir)
5978 return -1;
5979
5980 rewinddir (dir);
5981 while ((entry = readdir (dir)) != NULL)
5982 {
5983 struct stat st;
5984 struct statfs stfs;
5985 int fd;
5986
5987 fd = atoi (entry->d_name);
5988 if (!fd)
5989 continue;
5990
5991 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5992 if (stat (path, &st) != 0)
5993 continue;
5994 if (!S_ISDIR (st.st_mode))
5995 continue;
5996
5997 if (statfs (path, &stfs) != 0)
5998 continue;
5999 if (stfs.f_type != SPUFS_MAGIC)
6000 continue;
6001
6002 if (pos >= offset && pos + 4 <= offset + len)
6003 {
6004 *(unsigned int *)(buf + pos - offset) = fd;
6005 written += 4;
6006 }
6007 pos += 4;
6008 }
6009
6010 closedir (dir);
6011 return written;
6012}
6013
6014/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6015 object type, using the /proc file system. */
6016static int
6017linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6018 unsigned const char *writebuf,
6019 CORE_ADDR offset, int len)
6020{
0bfdf32f 6021 long pid = lwpid_of (current_thread);
efcbbd14
UW
6022 char buf[128];
6023 int fd = 0;
6024 int ret = 0;
6025
6026 if (!writebuf && !readbuf)
6027 return -1;
6028
6029 if (!*annex)
6030 {
6031 if (!readbuf)
6032 return -1;
6033 else
6034 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6035 }
6036
6037 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6038 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6039 if (fd <= 0)
6040 return -1;
6041
6042 if (offset != 0
6043 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6044 {
6045 close (fd);
6046 return 0;
6047 }
6048
6049 if (writebuf)
6050 ret = write (fd, writebuf, (size_t) len);
6051 else
6052 ret = read (fd, readbuf, (size_t) len);
6053
6054 close (fd);
6055 return ret;
6056}
6057
723b724b 6058#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6059struct target_loadseg
6060{
6061 /* Core address to which the segment is mapped. */
6062 Elf32_Addr addr;
6063 /* VMA recorded in the program header. */
6064 Elf32_Addr p_vaddr;
6065 /* Size of this segment in memory. */
6066 Elf32_Word p_memsz;
6067};
6068
723b724b 6069# if defined PT_GETDSBT
78d85199
YQ
6070struct target_loadmap
6071{
6072 /* Protocol version number, must be zero. */
6073 Elf32_Word version;
6074 /* Pointer to the DSBT table, its size, and the DSBT index. */
6075 unsigned *dsbt_table;
6076 unsigned dsbt_size, dsbt_index;
6077 /* Number of segments in this map. */
6078 Elf32_Word nsegs;
6079 /* The actual memory map. */
6080 struct target_loadseg segs[/*nsegs*/];
6081};
723b724b
MF
6082# define LINUX_LOADMAP PT_GETDSBT
6083# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6084# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6085# else
6086struct target_loadmap
6087{
6088 /* Protocol version number, must be zero. */
6089 Elf32_Half version;
6090 /* Number of segments in this map. */
6091 Elf32_Half nsegs;
6092 /* The actual memory map. */
6093 struct target_loadseg segs[/*nsegs*/];
6094};
6095# define LINUX_LOADMAP PTRACE_GETFDPIC
6096# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6097# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6098# endif
78d85199 6099
78d85199
YQ
6100static int
6101linux_read_loadmap (const char *annex, CORE_ADDR offset,
6102 unsigned char *myaddr, unsigned int len)
6103{
0bfdf32f 6104 int pid = lwpid_of (current_thread);
78d85199
YQ
6105 int addr = -1;
6106 struct target_loadmap *data = NULL;
6107 unsigned int actual_length, copy_length;
6108
6109 if (strcmp (annex, "exec") == 0)
723b724b 6110 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6111 else if (strcmp (annex, "interp") == 0)
723b724b 6112 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6113 else
6114 return -1;
6115
723b724b 6116 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6117 return -1;
6118
6119 if (data == NULL)
6120 return -1;
6121
6122 actual_length = sizeof (struct target_loadmap)
6123 + sizeof (struct target_loadseg) * data->nsegs;
6124
6125 if (offset < 0 || offset > actual_length)
6126 return -1;
6127
6128 copy_length = actual_length - offset < len ? actual_length - offset : len;
6129 memcpy (myaddr, (char *) data + offset, copy_length);
6130 return copy_length;
6131}
723b724b
MF
6132#else
6133# define linux_read_loadmap NULL
6134#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6135
1570b33e
L
6136static void
6137linux_process_qsupported (const char *query)
6138{
6139 if (the_low_target.process_qsupported != NULL)
6140 the_low_target.process_qsupported (query);
6141}
6142
219f2f23
PA
6143static int
6144linux_supports_tracepoints (void)
6145{
6146 if (*the_low_target.supports_tracepoints == NULL)
6147 return 0;
6148
6149 return (*the_low_target.supports_tracepoints) ();
6150}
6151
6152static CORE_ADDR
6153linux_read_pc (struct regcache *regcache)
6154{
6155 if (the_low_target.get_pc == NULL)
6156 return 0;
6157
6158 return (*the_low_target.get_pc) (regcache);
6159}
6160
6161static void
6162linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6163{
6164 gdb_assert (the_low_target.set_pc != NULL);
6165
6166 (*the_low_target.set_pc) (regcache, pc);
6167}
6168
8336d594
PA
6169static int
6170linux_thread_stopped (struct thread_info *thread)
6171{
6172 return get_thread_lwp (thread)->stopped;
6173}
6174
6175/* This exposes stop-all-threads functionality to other modules. */
6176
6177static void
7984d532 6178linux_pause_all (int freeze)
8336d594 6179{
7984d532
PA
6180 stop_all_lwps (freeze, NULL);
6181}
6182
6183/* This exposes unstop-all-threads functionality to other gdbserver
6184 modules. */
6185
6186static void
6187linux_unpause_all (int unfreeze)
6188{
6189 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6190}
6191
90d74c30
PA
6192static int
6193linux_prepare_to_access_memory (void)
6194{
6195 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6196 running LWP. */
6197 if (non_stop)
6198 linux_pause_all (1);
6199 return 0;
6200}
6201
6202static void
0146f85b 6203linux_done_accessing_memory (void)
90d74c30
PA
6204{
6205 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6206 running LWP. */
6207 if (non_stop)
6208 linux_unpause_all (1);
6209}
6210
fa593d66
PA
6211static int
6212linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6213 CORE_ADDR collector,
6214 CORE_ADDR lockaddr,
6215 ULONGEST orig_size,
6216 CORE_ADDR *jump_entry,
405f8e94
SS
6217 CORE_ADDR *trampoline,
6218 ULONGEST *trampoline_size,
fa593d66
PA
6219 unsigned char *jjump_pad_insn,
6220 ULONGEST *jjump_pad_insn_size,
6221 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
6222 CORE_ADDR *adjusted_insn_addr_end,
6223 char *err)
fa593d66
PA
6224{
6225 return (*the_low_target.install_fast_tracepoint_jump_pad)
6226 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
6227 jump_entry, trampoline, trampoline_size,
6228 jjump_pad_insn, jjump_pad_insn_size,
6229 adjusted_insn_addr, adjusted_insn_addr_end,
6230 err);
fa593d66
PA
6231}
6232
6a271cae
PA
6233static struct emit_ops *
6234linux_emit_ops (void)
6235{
6236 if (the_low_target.emit_ops != NULL)
6237 return (*the_low_target.emit_ops) ();
6238 else
6239 return NULL;
6240}
6241
405f8e94
SS
6242static int
6243linux_get_min_fast_tracepoint_insn_len (void)
6244{
6245 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6246}
6247
2268b414
JK
6248/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6249
6250static int
6251get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6252 CORE_ADDR *phdr_memaddr, int *num_phdr)
6253{
6254 char filename[PATH_MAX];
6255 int fd;
6256 const int auxv_size = is_elf64
6257 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6258 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6259
6260 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6261
6262 fd = open (filename, O_RDONLY);
6263 if (fd < 0)
6264 return 1;
6265
6266 *phdr_memaddr = 0;
6267 *num_phdr = 0;
6268 while (read (fd, buf, auxv_size) == auxv_size
6269 && (*phdr_memaddr == 0 || *num_phdr == 0))
6270 {
6271 if (is_elf64)
6272 {
6273 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6274
6275 switch (aux->a_type)
6276 {
6277 case AT_PHDR:
6278 *phdr_memaddr = aux->a_un.a_val;
6279 break;
6280 case AT_PHNUM:
6281 *num_phdr = aux->a_un.a_val;
6282 break;
6283 }
6284 }
6285 else
6286 {
6287 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6288
6289 switch (aux->a_type)
6290 {
6291 case AT_PHDR:
6292 *phdr_memaddr = aux->a_un.a_val;
6293 break;
6294 case AT_PHNUM:
6295 *num_phdr = aux->a_un.a_val;
6296 break;
6297 }
6298 }
6299 }
6300
6301 close (fd);
6302
6303 if (*phdr_memaddr == 0 || *num_phdr == 0)
6304 {
6305 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6306 "phdr_memaddr = %ld, phdr_num = %d",
6307 (long) *phdr_memaddr, *num_phdr);
6308 return 2;
6309 }
6310
6311 return 0;
6312}
6313
6314/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6315
6316static CORE_ADDR
6317get_dynamic (const int pid, const int is_elf64)
6318{
6319 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6320 int num_phdr, i;
2268b414 6321 unsigned char *phdr_buf;
db1ff28b 6322 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6323
6324 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6325 return 0;
6326
6327 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6328 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6329
6330 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6331 return 0;
6332
6333 /* Compute relocation: it is expected to be 0 for "regular" executables,
6334 non-zero for PIE ones. */
6335 relocation = -1;
db1ff28b
JK
6336 for (i = 0; relocation == -1 && i < num_phdr; i++)
6337 if (is_elf64)
6338 {
6339 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6340
6341 if (p->p_type == PT_PHDR)
6342 relocation = phdr_memaddr - p->p_vaddr;
6343 }
6344 else
6345 {
6346 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6347
6348 if (p->p_type == PT_PHDR)
6349 relocation = phdr_memaddr - p->p_vaddr;
6350 }
6351
2268b414
JK
6352 if (relocation == -1)
6353 {
e237a7e2
JK
6354 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6355 any real world executables, including PIE executables, have always
6356 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6357 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6358 or present DT_DEBUG anyway (fpc binaries are statically linked).
6359
6360 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6361
6362 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6363
2268b414
JK
6364 return 0;
6365 }
6366
db1ff28b
JK
6367 for (i = 0; i < num_phdr; i++)
6368 {
6369 if (is_elf64)
6370 {
6371 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6372
6373 if (p->p_type == PT_DYNAMIC)
6374 return p->p_vaddr + relocation;
6375 }
6376 else
6377 {
6378 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6379
db1ff28b
JK
6380 if (p->p_type == PT_DYNAMIC)
6381 return p->p_vaddr + relocation;
6382 }
6383 }
2268b414
JK
6384
6385 return 0;
6386}
6387
6388/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6389 can be 0 if the inferior does not yet have the library list initialized.
6390 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6391 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6392
6393static CORE_ADDR
6394get_r_debug (const int pid, const int is_elf64)
6395{
6396 CORE_ADDR dynamic_memaddr;
6397 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6398 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6399 CORE_ADDR map = -1;
2268b414
JK
6400
6401 dynamic_memaddr = get_dynamic (pid, is_elf64);
6402 if (dynamic_memaddr == 0)
367ba2c2 6403 return map;
2268b414
JK
6404
6405 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6406 {
6407 if (is_elf64)
6408 {
6409 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6410#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6411 union
6412 {
6413 Elf64_Xword map;
6414 unsigned char buf[sizeof (Elf64_Xword)];
6415 }
6416 rld_map;
a738da3a
MF
6417#endif
6418#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6419 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6420 {
6421 if (linux_read_memory (dyn->d_un.d_val,
6422 rld_map.buf, sizeof (rld_map.buf)) == 0)
6423 return rld_map.map;
6424 else
6425 break;
6426 }
75f62ce7 6427#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6428#ifdef DT_MIPS_RLD_MAP_REL
6429 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6430 {
6431 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6432 rld_map.buf, sizeof (rld_map.buf)) == 0)
6433 return rld_map.map;
6434 else
6435 break;
6436 }
6437#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6438
367ba2c2
MR
6439 if (dyn->d_tag == DT_DEBUG && map == -1)
6440 map = dyn->d_un.d_val;
2268b414
JK
6441
6442 if (dyn->d_tag == DT_NULL)
6443 break;
6444 }
6445 else
6446 {
6447 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6448#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6449 union
6450 {
6451 Elf32_Word map;
6452 unsigned char buf[sizeof (Elf32_Word)];
6453 }
6454 rld_map;
a738da3a
MF
6455#endif
6456#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6457 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6458 {
6459 if (linux_read_memory (dyn->d_un.d_val,
6460 rld_map.buf, sizeof (rld_map.buf)) == 0)
6461 return rld_map.map;
6462 else
6463 break;
6464 }
75f62ce7 6465#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6466#ifdef DT_MIPS_RLD_MAP_REL
6467 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6468 {
6469 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6470 rld_map.buf, sizeof (rld_map.buf)) == 0)
6471 return rld_map.map;
6472 else
6473 break;
6474 }
6475#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6476
367ba2c2
MR
6477 if (dyn->d_tag == DT_DEBUG && map == -1)
6478 map = dyn->d_un.d_val;
2268b414
JK
6479
6480 if (dyn->d_tag == DT_NULL)
6481 break;
6482 }
6483
6484 dynamic_memaddr += dyn_size;
6485 }
6486
367ba2c2 6487 return map;
2268b414
JK
6488}
6489
6490/* Read one pointer from MEMADDR in the inferior. */
6491
6492static int
6493read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6494{
485f1ee4
PA
6495 int ret;
6496
6497 /* Go through a union so this works on either big or little endian
6498 hosts, when the inferior's pointer size is smaller than the size
6499 of CORE_ADDR. It is assumed the inferior's endianness is the
6500 same of the superior's. */
6501 union
6502 {
6503 CORE_ADDR core_addr;
6504 unsigned int ui;
6505 unsigned char uc;
6506 } addr;
6507
6508 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6509 if (ret == 0)
6510 {
6511 if (ptr_size == sizeof (CORE_ADDR))
6512 *ptr = addr.core_addr;
6513 else if (ptr_size == sizeof (unsigned int))
6514 *ptr = addr.ui;
6515 else
6516 gdb_assert_not_reached ("unhandled pointer size");
6517 }
6518 return ret;
2268b414
JK
6519}
6520
6521struct link_map_offsets
6522 {
6523 /* Offset and size of r_debug.r_version. */
6524 int r_version_offset;
6525
6526 /* Offset and size of r_debug.r_map. */
6527 int r_map_offset;
6528
6529 /* Offset to l_addr field in struct link_map. */
6530 int l_addr_offset;
6531
6532 /* Offset to l_name field in struct link_map. */
6533 int l_name_offset;
6534
6535 /* Offset to l_ld field in struct link_map. */
6536 int l_ld_offset;
6537
6538 /* Offset to l_next field in struct link_map. */
6539 int l_next_offset;
6540
6541 /* Offset to l_prev field in struct link_map. */
6542 int l_prev_offset;
6543 };
6544
fb723180 6545/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
6546
6547static int
6548linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6549 unsigned const char *writebuf,
6550 CORE_ADDR offset, int len)
6551{
6552 char *document;
6553 unsigned document_len;
fe978cb0 6554 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6555 char filename[PATH_MAX];
6556 int pid, is_elf64;
6557
6558 static const struct link_map_offsets lmo_32bit_offsets =
6559 {
6560 0, /* r_version offset. */
6561 4, /* r_debug.r_map offset. */
6562 0, /* l_addr offset in link_map. */
6563 4, /* l_name offset in link_map. */
6564 8, /* l_ld offset in link_map. */
6565 12, /* l_next offset in link_map. */
6566 16 /* l_prev offset in link_map. */
6567 };
6568
6569 static const struct link_map_offsets lmo_64bit_offsets =
6570 {
6571 0, /* r_version offset. */
6572 8, /* r_debug.r_map offset. */
6573 0, /* l_addr offset in link_map. */
6574 8, /* l_name offset in link_map. */
6575 16, /* l_ld offset in link_map. */
6576 24, /* l_next offset in link_map. */
6577 32 /* l_prev offset in link_map. */
6578 };
6579 const struct link_map_offsets *lmo;
214d508e 6580 unsigned int machine;
b1fbec62
GB
6581 int ptr_size;
6582 CORE_ADDR lm_addr = 0, lm_prev = 0;
6583 int allocated = 1024;
6584 char *p;
6585 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6586 int header_done = 0;
2268b414
JK
6587
6588 if (writebuf != NULL)
6589 return -2;
6590 if (readbuf == NULL)
6591 return -1;
6592
0bfdf32f 6593 pid = lwpid_of (current_thread);
2268b414 6594 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6595 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6596 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6597 ptr_size = is_elf64 ? 8 : 4;
2268b414 6598
b1fbec62
GB
6599 while (annex[0] != '\0')
6600 {
6601 const char *sep;
6602 CORE_ADDR *addrp;
6603 int len;
2268b414 6604
b1fbec62
GB
6605 sep = strchr (annex, '=');
6606 if (sep == NULL)
6607 break;
0c5bf5a9 6608
b1fbec62 6609 len = sep - annex;
61012eef 6610 if (len == 5 && startswith (annex, "start"))
b1fbec62 6611 addrp = &lm_addr;
61012eef 6612 else if (len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6613 addrp = &lm_prev;
6614 else
6615 {
6616 annex = strchr (sep, ';');
6617 if (annex == NULL)
6618 break;
6619 annex++;
6620 continue;
6621 }
6622
6623 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6624 }
b1fbec62
GB
6625
6626 if (lm_addr == 0)
2268b414 6627 {
b1fbec62
GB
6628 int r_version = 0;
6629
6630 if (priv->r_debug == 0)
6631 priv->r_debug = get_r_debug (pid, is_elf64);
6632
6633 /* We failed to find DT_DEBUG. Such situation will not change
6634 for this inferior - do not retry it. Report it to GDB as
6635 E01, see for the reasons at the GDB solib-svr4.c side. */
6636 if (priv->r_debug == (CORE_ADDR) -1)
6637 return -1;
6638
6639 if (priv->r_debug != 0)
2268b414 6640 {
b1fbec62
GB
6641 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6642 (unsigned char *) &r_version,
6643 sizeof (r_version)) != 0
6644 || r_version != 1)
6645 {
6646 warning ("unexpected r_debug version %d", r_version);
6647 }
6648 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6649 &lm_addr, ptr_size) != 0)
6650 {
6651 warning ("unable to read r_map from 0x%lx",
6652 (long) priv->r_debug + lmo->r_map_offset);
6653 }
2268b414 6654 }
b1fbec62 6655 }
2268b414 6656
224c3ddb 6657 document = (char *) xmalloc (allocated);
b1fbec62
GB
6658 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6659 p = document + strlen (document);
6660
6661 while (lm_addr
6662 && read_one_ptr (lm_addr + lmo->l_name_offset,
6663 &l_name, ptr_size) == 0
6664 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6665 &l_addr, ptr_size) == 0
6666 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6667 &l_ld, ptr_size) == 0
6668 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6669 &l_prev, ptr_size) == 0
6670 && read_one_ptr (lm_addr + lmo->l_next_offset,
6671 &l_next, ptr_size) == 0)
6672 {
6673 unsigned char libname[PATH_MAX];
6674
6675 if (lm_prev != l_prev)
2268b414 6676 {
b1fbec62
GB
6677 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6678 (long) lm_prev, (long) l_prev);
6679 break;
2268b414
JK
6680 }
6681
d878444c
JK
6682 /* Ignore the first entry even if it has valid name as the first entry
6683 corresponds to the main executable. The first entry should not be
6684 skipped if the dynamic loader was loaded late by a static executable
6685 (see solib-svr4.c parameter ignore_first). But in such case the main
6686 executable does not have PT_DYNAMIC present and this function already
6687 exited above due to failed get_r_debug. */
6688 if (lm_prev == 0)
2268b414 6689 {
d878444c
JK
6690 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6691 p = p + strlen (p);
6692 }
6693 else
6694 {
6695 /* Not checking for error because reading may stop before
6696 we've got PATH_MAX worth of characters. */
6697 libname[0] = '\0';
6698 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6699 libname[sizeof (libname) - 1] = '\0';
6700 if (libname[0] != '\0')
2268b414 6701 {
d878444c
JK
6702 /* 6x the size for xml_escape_text below. */
6703 size_t len = 6 * strlen ((char *) libname);
6704 char *name;
2268b414 6705
d878444c
JK
6706 if (!header_done)
6707 {
6708 /* Terminate `<library-list-svr4'. */
6709 *p++ = '>';
6710 header_done = 1;
6711 }
2268b414 6712
db1ff28b 6713 while (allocated < p - document + len + 200)
d878444c
JK
6714 {
6715 /* Expand to guarantee sufficient storage. */
6716 uintptr_t document_len = p - document;
2268b414 6717
224c3ddb 6718 document = (char *) xrealloc (document, 2 * allocated);
d878444c
JK
6719 allocated *= 2;
6720 p = document + document_len;
6721 }
6722
6723 name = xml_escape_text ((char *) libname);
6724 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
db1ff28b 6725 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
d878444c
JK
6726 name, (unsigned long) lm_addr,
6727 (unsigned long) l_addr, (unsigned long) l_ld);
6728 free (name);
6729 }
0afae3cf 6730 }
b1fbec62
GB
6731
6732 lm_prev = lm_addr;
6733 lm_addr = l_next;
2268b414
JK
6734 }
6735
b1fbec62
GB
6736 if (!header_done)
6737 {
6738 /* Empty list; terminate `<library-list-svr4'. */
6739 strcpy (p, "/>");
6740 }
6741 else
6742 strcpy (p, "</library-list-svr4>");
6743
2268b414
JK
6744 document_len = strlen (document);
6745 if (offset < document_len)
6746 document_len -= offset;
6747 else
6748 document_len = 0;
6749 if (len > document_len)
6750 len = document_len;
6751
6752 memcpy (readbuf, document + offset, len);
6753 xfree (document);
6754
6755 return len;
6756}
6757
9accd112
MM
6758#ifdef HAVE_LINUX_BTRACE
6759
969c39fb 6760/* See to_disable_btrace target method. */
9accd112 6761
969c39fb
MM
6762static int
6763linux_low_disable_btrace (struct btrace_target_info *tinfo)
6764{
6765 enum btrace_error err;
6766
6767 err = linux_disable_btrace (tinfo);
6768 return (err == BTRACE_ERR_NONE ? 0 : -1);
6769}
6770
b20a6524
MM
6771/* Encode an Intel(R) Processor Trace configuration. */
6772
6773static void
6774linux_low_encode_pt_config (struct buffer *buffer,
6775 const struct btrace_data_pt_config *config)
6776{
6777 buffer_grow_str (buffer, "<pt-config>\n");
6778
6779 switch (config->cpu.vendor)
6780 {
6781 case CV_INTEL:
6782 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6783 "model=\"%u\" stepping=\"%u\"/>\n",
6784 config->cpu.family, config->cpu.model,
6785 config->cpu.stepping);
6786 break;
6787
6788 default:
6789 break;
6790 }
6791
6792 buffer_grow_str (buffer, "</pt-config>\n");
6793}
6794
6795/* Encode a raw buffer. */
6796
6797static void
6798linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6799 unsigned int size)
6800{
6801 if (size == 0)
6802 return;
6803
6804 /* We use hex encoding - see common/rsp-low.h. */
6805 buffer_grow_str (buffer, "<raw>\n");
6806
6807 while (size-- > 0)
6808 {
6809 char elem[2];
6810
6811 elem[0] = tohex ((*data >> 4) & 0xf);
6812 elem[1] = tohex (*data++ & 0xf);
6813
6814 buffer_grow (buffer, elem, 2);
6815 }
6816
6817 buffer_grow_str (buffer, "</raw>\n");
6818}
6819
969c39fb
MM
6820/* See to_read_btrace target method. */
6821
6822static int
9accd112
MM
6823linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6824 int type)
6825{
734b0e4b 6826 struct btrace_data btrace;
9accd112 6827 struct btrace_block *block;
969c39fb 6828 enum btrace_error err;
9accd112
MM
6829 int i;
6830
734b0e4b
MM
6831 btrace_data_init (&btrace);
6832
969c39fb
MM
6833 err = linux_read_btrace (&btrace, tinfo, type);
6834 if (err != BTRACE_ERR_NONE)
6835 {
6836 if (err == BTRACE_ERR_OVERFLOW)
6837 buffer_grow_str0 (buffer, "E.Overflow.");
6838 else
6839 buffer_grow_str0 (buffer, "E.Generic Error.");
6840
b20a6524 6841 goto err;
969c39fb 6842 }
9accd112 6843
734b0e4b
MM
6844 switch (btrace.format)
6845 {
6846 case BTRACE_FORMAT_NONE:
6847 buffer_grow_str0 (buffer, "E.No Trace.");
b20a6524 6848 goto err;
734b0e4b
MM
6849
6850 case BTRACE_FORMAT_BTS:
6851 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6852 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 6853
734b0e4b
MM
6854 for (i = 0;
6855 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6856 i++)
6857 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6858 paddress (block->begin), paddress (block->end));
9accd112 6859
734b0e4b
MM
6860 buffer_grow_str0 (buffer, "</btrace>\n");
6861 break;
6862
b20a6524
MM
6863 case BTRACE_FORMAT_PT:
6864 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6865 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6866 buffer_grow_str (buffer, "<pt>\n");
6867
6868 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 6869
b20a6524
MM
6870 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6871 btrace.variant.pt.size);
6872
6873 buffer_grow_str (buffer, "</pt>\n");
6874 buffer_grow_str0 (buffer, "</btrace>\n");
6875 break;
6876
6877 default:
6878 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6879 goto err;
734b0e4b 6880 }
969c39fb 6881
734b0e4b 6882 btrace_data_fini (&btrace);
969c39fb 6883 return 0;
b20a6524
MM
6884
6885err:
6886 btrace_data_fini (&btrace);
6887 return -1;
9accd112 6888}
f4abbc16
MM
6889
6890/* See to_btrace_conf target method. */
6891
6892static int
6893linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6894 struct buffer *buffer)
6895{
6896 const struct btrace_config *conf;
6897
6898 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6899 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6900
6901 conf = linux_btrace_conf (tinfo);
6902 if (conf != NULL)
6903 {
6904 switch (conf->format)
6905 {
6906 case BTRACE_FORMAT_NONE:
6907 break;
6908
6909 case BTRACE_FORMAT_BTS:
d33501a5
MM
6910 buffer_xml_printf (buffer, "<bts");
6911 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6912 buffer_xml_printf (buffer, " />\n");
f4abbc16 6913 break;
b20a6524
MM
6914
6915 case BTRACE_FORMAT_PT:
6916 buffer_xml_printf (buffer, "<pt");
6917 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6918 buffer_xml_printf (buffer, "/>\n");
6919 break;
f4abbc16
MM
6920 }
6921 }
6922
6923 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6924 return 0;
6925}
9accd112
MM
6926#endif /* HAVE_LINUX_BTRACE */
6927
7b669087
GB
6928/* See nat/linux-nat.h. */
6929
6930ptid_t
6931current_lwp_ptid (void)
6932{
6933 return ptid_of (current_thread);
6934}
6935
dd373349
AT
6936/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
6937
6938static int
6939linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
6940{
6941 if (the_low_target.breakpoint_kind_from_pc != NULL)
6942 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
6943 else
1652a986 6944 return default_breakpoint_kind_from_pc (pcptr);
dd373349
AT
6945}
6946
6947/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
6948
6949static const gdb_byte *
6950linux_sw_breakpoint_from_kind (int kind, int *size)
6951{
6952 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
6953
6954 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
6955}
6956
ce3a066d
DJ
6957static struct target_ops linux_target_ops = {
6958 linux_create_inferior,
c06cbd92 6959 linux_arch_setup,
ce3a066d
DJ
6960 linux_attach,
6961 linux_kill,
6ad8ae5c 6962 linux_detach,
8336d594 6963 linux_mourn,
444d6139 6964 linux_join,
ce3a066d
DJ
6965 linux_thread_alive,
6966 linux_resume,
6967 linux_wait,
6968 linux_fetch_registers,
6969 linux_store_registers,
90d74c30 6970 linux_prepare_to_access_memory,
0146f85b 6971 linux_done_accessing_memory,
ce3a066d
DJ
6972 linux_read_memory,
6973 linux_write_memory,
2f2893d9 6974 linux_look_up_symbols,
ef57601b 6975 linux_request_interrupt,
aa691b87 6976 linux_read_auxv,
802e8e6d 6977 linux_supports_z_point_type,
d993e290
PA
6978 linux_insert_point,
6979 linux_remove_point,
3e572f71
PA
6980 linux_stopped_by_sw_breakpoint,
6981 linux_supports_stopped_by_sw_breakpoint,
6982 linux_stopped_by_hw_breakpoint,
6983 linux_supports_stopped_by_hw_breakpoint,
70b90b91 6984 linux_supports_hardware_single_step,
e013ee27
OF
6985 linux_stopped_by_watchpoint,
6986 linux_stopped_data_address,
db0dfaa0
LM
6987#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6988 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6989 && defined(PT_TEXT_END_ADDR)
52fb6437 6990 linux_read_offsets,
dae5f5cf
DJ
6991#else
6992 NULL,
6993#endif
6994#ifdef USE_THREAD_DB
6995 thread_db_get_tls_address,
6996#else
6997 NULL,
52fb6437 6998#endif
efcbbd14 6999 linux_qxfer_spu,
59a016f0 7000 hostio_last_error_from_errno,
07e059b5 7001 linux_qxfer_osdata,
4aa995e1 7002 linux_xfer_siginfo,
bd99dc85
PA
7003 linux_supports_non_stop,
7004 linux_async,
7005 linux_start_non_stop,
cdbfd419 7006 linux_supports_multi_process,
89245bc0
DB
7007 linux_supports_fork_events,
7008 linux_supports_vfork_events,
94585166 7009 linux_supports_exec_events,
de0d863e 7010 linux_handle_new_gdb_connection,
cdbfd419 7011#ifdef USE_THREAD_DB
dc146f7c 7012 thread_db_handle_monitor_command,
cdbfd419 7013#else
dc146f7c 7014 NULL,
cdbfd419 7015#endif
d26e3629 7016 linux_common_core_of_thread,
78d85199 7017 linux_read_loadmap,
219f2f23
PA
7018 linux_process_qsupported,
7019 linux_supports_tracepoints,
7020 linux_read_pc,
8336d594
PA
7021 linux_write_pc,
7022 linux_thread_stopped,
7984d532 7023 NULL,
711e434b 7024 linux_pause_all,
7984d532 7025 linux_unpause_all,
fa593d66 7026 linux_stabilize_threads,
6a271cae 7027 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
7028 linux_emit_ops,
7029 linux_supports_disable_randomization,
405f8e94 7030 linux_get_min_fast_tracepoint_insn_len,
2268b414 7031 linux_qxfer_libraries_svr4,
d1feda86 7032 linux_supports_agent,
9accd112
MM
7033#ifdef HAVE_LINUX_BTRACE
7034 linux_supports_btrace,
0568462b 7035 linux_enable_btrace,
969c39fb 7036 linux_low_disable_btrace,
9accd112 7037 linux_low_read_btrace,
f4abbc16 7038 linux_low_btrace_conf,
9accd112
MM
7039#else
7040 NULL,
7041 NULL,
7042 NULL,
7043 NULL,
f4abbc16 7044 NULL,
9accd112 7045#endif
c2d6af84 7046 linux_supports_range_stepping,
e57f1de3 7047 linux_proc_pid_to_exec_file,
14d2069a
GB
7048 linux_mntns_open_cloexec,
7049 linux_mntns_unlink,
7050 linux_mntns_readlink,
dd373349
AT
7051 linux_breakpoint_kind_from_pc,
7052 linux_sw_breakpoint_from_kind
ce3a066d
DJ
7053};
7054
0d62e5e8
DJ
7055static void
7056linux_init_signals ()
7057{
7058 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
7059 to find what the cancel signal actually is. */
1a981360 7060#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 7061 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 7062#endif
0d62e5e8
DJ
7063}
7064
3aee8918
PA
7065#ifdef HAVE_LINUX_REGSETS
7066void
7067initialize_regsets_info (struct regsets_info *info)
7068{
7069 for (info->num_regsets = 0;
7070 info->regsets[info->num_regsets].size >= 0;
7071 info->num_regsets++)
7072 ;
3aee8918
PA
7073}
7074#endif
7075
da6d8c04
DJ
7076void
7077initialize_low (void)
7078{
bd99dc85 7079 struct sigaction sigchld_action;
dd373349 7080
bd99dc85 7081 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 7082 set_target_ops (&linux_target_ops);
dd373349 7083
0d62e5e8 7084 linux_init_signals ();
aa7c7447 7085 linux_ptrace_init_warnings ();
bd99dc85
PA
7086
7087 sigchld_action.sa_handler = sigchld_handler;
7088 sigemptyset (&sigchld_action.sa_mask);
7089 sigchld_action.sa_flags = SA_RESTART;
7090 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7091
7092 initialize_low_arch ();
89245bc0
DB
7093
7094 linux_check_ptrace_features ();
da6d8c04 7095}
This page took 1.501196 seconds and 4 git commands to generate.