2012-01-17 Pedro Alves <palves@redhat.com>
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
0b302171 2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
d26e3629 21#include "linux-osdata.h"
da6d8c04 22
58caa3dc 23#include <sys/wait.h>
da6d8c04
DJ
24#include <stdio.h>
25#include <sys/param.h>
da6d8c04 26#include <sys/ptrace.h>
af96c192 27#include "linux-ptrace.h"
e3deef73 28#include "linux-procfs.h"
da6d8c04
DJ
29#include <signal.h>
30#include <sys/ioctl.h>
31#include <fcntl.h>
d07c63e7 32#include <string.h>
0a30fbc4
DJ
33#include <stdlib.h>
34#include <unistd.h>
fa6a77dc 35#include <errno.h>
fd500816 36#include <sys/syscall.h>
f9387fc3 37#include <sched.h>
07e059b5
VP
38#include <ctype.h>
39#include <pwd.h>
40#include <sys/types.h>
41#include <dirent.h>
efcbbd14
UW
42#include <sys/stat.h>
43#include <sys/vfs.h>
1570b33e 44#include <sys/uio.h>
957f3f49
DE
45#ifndef ELFMAG0
46/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
47 then ELFMAG0 will have been defined. If it didn't get included by
48 gdb_proc_service.h then including it will likely introduce a duplicate
49 definition of elf_fpregset_t. */
50#include <elf.h>
51#endif
efcbbd14
UW
52
53#ifndef SPUFS_MAGIC
54#define SPUFS_MAGIC 0x23c9b64e
55#endif
da6d8c04 56
03583c20
UW
57#ifdef HAVE_PERSONALITY
58# include <sys/personality.h>
59# if !HAVE_DECL_ADDR_NO_RANDOMIZE
60# define ADDR_NO_RANDOMIZE 0x0040000
61# endif
62#endif
63
fd462a61
DJ
64#ifndef O_LARGEFILE
65#define O_LARGEFILE 0
66#endif
67
ec8ebe72
DE
68#ifndef W_STOPCODE
69#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
70#endif
71
1a981360
PA
72/* This is the kernel's hard limit. Not to be confused with
73 SIGRTMIN. */
74#ifndef __SIGRTMIN
75#define __SIGRTMIN 32
76#endif
77
42c81e2a
DJ
78#ifdef __UCLIBC__
79#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
80#define HAS_NOMMU
81#endif
82#endif
83
24a09b5f
DJ
84/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
85 representation of the thread ID.
611cb4a5 86
54a0b537 87 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
95954743
PA
88 the same as the LWP ID.
89
90 ``all_processes'' is keyed by the "overall process ID", which
91 GNU/Linux calls tgid, "thread group ID". */
0d62e5e8 92
54a0b537 93struct inferior_list all_lwps;
0d62e5e8 94
24a09b5f
DJ
95/* A list of all unknown processes which receive stop signals. Some other
96 process will presumably claim each of these as forked children
97 momentarily. */
98
99struct inferior_list stopped_pids;
100
0d62e5e8
DJ
101/* FIXME this is a bit of a hack, and could be removed. */
102int stopping_threads;
103
104/* FIXME make into a target method? */
24a09b5f 105int using_threads = 1;
24a09b5f 106
fa593d66
PA
107/* True if we're presently stabilizing threads (moving them out of
108 jump pads). */
109static int stabilizing_threads;
110
95954743
PA
111/* This flag is true iff we've just created or attached to our first
112 inferior but it has not stopped yet. As soon as it does, we need
113 to call the low target's arch_setup callback. Doing this only on
114 the first inferior avoids reinializing the architecture on every
115 inferior, and avoids messing with the register caches of the
116 already running inferiors. NOTE: this assumes all inferiors under
117 control of gdbserver have the same architecture. */
d61ddec4
UW
118static int new_inferior;
119
2acc282a 120static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 121 int step, int signal, siginfo_t *info);
2bd7c093 122static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
123static void stop_all_lwps (int suspend, struct lwp_info *except);
124static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
95954743 125static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
95954743 126static void *add_lwp (ptid_t ptid);
c35fafde 127static int linux_stopped_by_watchpoint (void);
95954743 128static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 129static void proceed_all_lwps (void);
d50171e4
PA
130static int finish_step_over (struct lwp_info *lwp);
131static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
132static int kill_lwp (unsigned long lwpid, int signo);
1e7fc18c 133static void linux_enable_event_reporting (int pid);
d50171e4
PA
134
135/* True if the low target can hardware single-step. Such targets
136 don't need a BREAKPOINT_REINSERT_ADDR callback. */
137
138static int
139can_hardware_single_step (void)
140{
141 return (the_low_target.breakpoint_reinsert_addr == NULL);
142}
143
144/* True if the low target supports memory breakpoints. If so, we'll
145 have a GET_PC implementation. */
146
147static int
148supports_breakpoints (void)
149{
150 return (the_low_target.get_pc != NULL);
151}
0d62e5e8 152
fa593d66
PA
153/* Returns true if this target can support fast tracepoints. This
154 does not mean that the in-process agent has been loaded in the
155 inferior. */
156
157static int
158supports_fast_tracepoints (void)
159{
160 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
161}
162
0d62e5e8
DJ
163struct pending_signals
164{
165 int signal;
32ca6d61 166 siginfo_t info;
0d62e5e8
DJ
167 struct pending_signals *prev;
168};
611cb4a5 169
14ce3065
DE
170#define PTRACE_ARG3_TYPE void *
171#define PTRACE_ARG4_TYPE void *
c6ecbae5 172#define PTRACE_XFER_TYPE long
da6d8c04 173
58caa3dc 174#ifdef HAVE_LINUX_REGSETS
52fa2412
UW
175static char *disabled_regsets;
176static int num_regsets;
58caa3dc
DJ
177#endif
178
bd99dc85
PA
179/* The read/write ends of the pipe registered as waitable file in the
180 event loop. */
181static int linux_event_pipe[2] = { -1, -1 };
182
183/* True if we're currently in async mode. */
184#define target_is_async_p() (linux_event_pipe[0] != -1)
185
02fc4de7 186static void send_sigstop (struct lwp_info *lwp);
bd99dc85
PA
187static void wait_for_sigstop (struct inferior_list_entry *entry);
188
d0722149
DE
189/* Accepts an integer PID; Returns a string representing a file that
190 can be opened to get info for the child process.
191 Space for the result is malloc'd, caller must free. */
192
193char *
194linux_child_pid_to_exec_file (int pid)
195{
196 char *name1, *name2;
197
198 name1 = xmalloc (MAXPATHLEN);
199 name2 = xmalloc (MAXPATHLEN);
200 memset (name2, 0, MAXPATHLEN);
201
202 sprintf (name1, "/proc/%d/exe", pid);
203 if (readlink (name1, name2, MAXPATHLEN) > 0)
204 {
205 free (name1);
206 return name2;
207 }
208 else
209 {
210 free (name2);
211 return name1;
212 }
213}
214
215/* Return non-zero if HEADER is a 64-bit ELF file. */
216
217static int
957f3f49 218elf_64_header_p (const Elf64_Ehdr *header)
d0722149
DE
219{
220 return (header->e_ident[EI_MAG0] == ELFMAG0
221 && header->e_ident[EI_MAG1] == ELFMAG1
222 && header->e_ident[EI_MAG2] == ELFMAG2
223 && header->e_ident[EI_MAG3] == ELFMAG3
224 && header->e_ident[EI_CLASS] == ELFCLASS64);
225}
226
227/* Return non-zero if FILE is a 64-bit ELF file,
228 zero if the file is not a 64-bit ELF file,
229 and -1 if the file is not accessible or doesn't exist. */
230
231int
232elf_64_file_p (const char *file)
233{
957f3f49 234 Elf64_Ehdr header;
d0722149
DE
235 int fd;
236
237 fd = open (file, O_RDONLY);
238 if (fd < 0)
239 return -1;
240
241 if (read (fd, &header, sizeof (header)) != sizeof (header))
242 {
243 close (fd);
244 return 0;
245 }
246 close (fd);
247
248 return elf_64_header_p (&header);
249}
250
bd99dc85
PA
251static void
252delete_lwp (struct lwp_info *lwp)
253{
254 remove_thread (get_lwp_thread (lwp));
255 remove_inferior (&all_lwps, &lwp->head);
aa5ca48f 256 free (lwp->arch_private);
bd99dc85
PA
257 free (lwp);
258}
259
95954743
PA
260/* Add a process to the common process list, and set its private
261 data. */
262
263static struct process_info *
264linux_add_process (int pid, int attached)
265{
266 struct process_info *proc;
267
268 /* Is this the first process? If so, then set the arch. */
269 if (all_processes.head == NULL)
270 new_inferior = 1;
271
272 proc = add_process (pid, attached);
273 proc->private = xcalloc (1, sizeof (*proc->private));
274
aa5ca48f
DE
275 if (the_low_target.new_process != NULL)
276 proc->private->arch_private = the_low_target.new_process ();
277
95954743
PA
278 return proc;
279}
280
07d4f67e
DE
281/* Wrapper function for waitpid which handles EINTR, and emulates
282 __WALL for systems where that is not available. */
283
284static int
285my_waitpid (int pid, int *status, int flags)
286{
287 int ret, out_errno;
288
289 if (debug_threads)
290 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
291
292 if (flags & __WALL)
293 {
294 sigset_t block_mask, org_mask, wake_mask;
295 int wnohang;
296
297 wnohang = (flags & WNOHANG) != 0;
298 flags &= ~(__WALL | __WCLONE);
299 flags |= WNOHANG;
300
301 /* Block all signals while here. This avoids knowing about
302 LinuxThread's signals. */
303 sigfillset (&block_mask);
304 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
305
306 /* ... except during the sigsuspend below. */
307 sigemptyset (&wake_mask);
308
309 while (1)
310 {
311 /* Since all signals are blocked, there's no need to check
312 for EINTR here. */
313 ret = waitpid (pid, status, flags);
314 out_errno = errno;
315
316 if (ret == -1 && out_errno != ECHILD)
317 break;
318 else if (ret > 0)
319 break;
320
321 if (flags & __WCLONE)
322 {
323 /* We've tried both flavors now. If WNOHANG is set,
324 there's nothing else to do, just bail out. */
325 if (wnohang)
326 break;
327
328 if (debug_threads)
329 fprintf (stderr, "blocking\n");
330
331 /* Block waiting for signals. */
332 sigsuspend (&wake_mask);
333 }
334
335 flags ^= __WCLONE;
336 }
337
338 sigprocmask (SIG_SETMASK, &org_mask, NULL);
339 }
340 else
341 {
342 do
343 ret = waitpid (pid, status, flags);
344 while (ret == -1 && errno == EINTR);
345 out_errno = errno;
346 }
347
348 if (debug_threads)
349 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
350 pid, flags, status ? *status : -1, ret);
351
352 errno = out_errno;
353 return ret;
354}
355
bd99dc85
PA
356/* Handle a GNU/Linux extended wait response. If we see a clone
357 event, we need to add the new LWP to our list (and not report the
358 trap to higher layers). */
0d62e5e8 359
24a09b5f 360static void
54a0b537 361handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
362{
363 int event = wstat >> 16;
54a0b537 364 struct lwp_info *new_lwp;
24a09b5f
DJ
365
366 if (event == PTRACE_EVENT_CLONE)
367 {
95954743 368 ptid_t ptid;
24a09b5f 369 unsigned long new_pid;
836acd6d 370 int ret, status = W_STOPCODE (SIGSTOP);
24a09b5f 371
bd99dc85 372 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
24a09b5f
DJ
373
374 /* If we haven't already seen the new PID stop, wait for it now. */
375 if (! pull_pid_from_list (&stopped_pids, new_pid))
376 {
377 /* The new child has a pending SIGSTOP. We can't affect it until it
378 hits the SIGSTOP, but we're already attached. */
379
97438e3f 380 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
381
382 if (ret == -1)
383 perror_with_name ("waiting for new child");
384 else if (ret != new_pid)
385 warning ("wait returned unexpected PID %d", ret);
da5898ce 386 else if (!WIFSTOPPED (status))
24a09b5f
DJ
387 warning ("wait returned unexpected status 0x%x", status);
388 }
389
1e7fc18c 390 linux_enable_event_reporting (new_pid);
24a09b5f 391
95954743
PA
392 ptid = ptid_build (pid_of (event_child), new_pid, 0);
393 new_lwp = (struct lwp_info *) add_lwp (ptid);
394 add_thread (ptid, new_lwp);
24a09b5f 395
e27d73f6
DE
396 /* Either we're going to immediately resume the new thread
397 or leave it stopped. linux_resume_one_lwp is a nop if it
398 thinks the thread is currently running, so set this first
399 before calling linux_resume_one_lwp. */
400 new_lwp->stopped = 1;
401
da5898ce
DJ
402 /* Normally we will get the pending SIGSTOP. But in some cases
403 we might get another signal delivered to the group first.
f21cc1a2 404 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
405 if (WSTOPSIG (status) == SIGSTOP)
406 {
d50171e4
PA
407 if (stopping_threads)
408 new_lwp->stop_pc = get_stop_pc (new_lwp);
409 else
e27d73f6 410 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 411 }
24a09b5f 412 else
da5898ce 413 {
54a0b537 414 new_lwp->stop_expected = 1;
d50171e4 415
da5898ce
DJ
416 if (stopping_threads)
417 {
d50171e4 418 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
419 new_lwp->status_pending_p = 1;
420 new_lwp->status_pending = status;
da5898ce
DJ
421 }
422 else
423 /* Pass the signal on. This is what GDB does - except
424 shouldn't we really report it instead? */
e27d73f6 425 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 426 }
24a09b5f
DJ
427
428 /* Always resume the current thread. If we are stopping
429 threads, it will have a pending SIGSTOP; we may as well
430 collect it now. */
2acc282a 431 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
432 }
433}
434
d50171e4
PA
435/* Return the PC as read from the regcache of LWP, without any
436 adjustment. */
437
438static CORE_ADDR
439get_pc (struct lwp_info *lwp)
440{
441 struct thread_info *saved_inferior;
442 struct regcache *regcache;
443 CORE_ADDR pc;
444
445 if (the_low_target.get_pc == NULL)
446 return 0;
447
448 saved_inferior = current_inferior;
449 current_inferior = get_lwp_thread (lwp);
450
451 regcache = get_thread_regcache (current_inferior, 1);
452 pc = (*the_low_target.get_pc) (regcache);
453
454 if (debug_threads)
455 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
456
457 current_inferior = saved_inferior;
458 return pc;
459}
460
461/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
462 The SIGTRAP could mean several things.
463
464 On i386, where decr_pc_after_break is non-zero:
465 If we were single-stepping this process using PTRACE_SINGLESTEP,
466 we will get only the one SIGTRAP (even if the instruction we
467 stepped over was a breakpoint). The value of $eip will be the
468 next instruction.
469 If we continue the process using PTRACE_CONT, we will get a
470 SIGTRAP when we hit a breakpoint. The value of $eip will be
471 the instruction after the breakpoint (i.e. needs to be
472 decremented). If we report the SIGTRAP to GDB, we must also
473 report the undecremented PC. If we cancel the SIGTRAP, we
474 must resume at the decremented PC.
475
476 (Presumably, not yet tested) On a non-decr_pc_after_break machine
477 with hardware or kernel single-step:
478 If we single-step over a breakpoint instruction, our PC will
479 point at the following instruction. If we continue and hit a
480 breakpoint instruction, our PC will point at the breakpoint
481 instruction. */
482
483static CORE_ADDR
d50171e4 484get_stop_pc (struct lwp_info *lwp)
0d62e5e8 485{
d50171e4
PA
486 CORE_ADDR stop_pc;
487
488 if (the_low_target.get_pc == NULL)
489 return 0;
0d62e5e8 490
d50171e4
PA
491 stop_pc = get_pc (lwp);
492
bdabb078
PA
493 if (WSTOPSIG (lwp->last_status) == SIGTRAP
494 && !lwp->stepping
495 && !lwp->stopped_by_watchpoint
496 && lwp->last_status >> 16 == 0)
47c0c975
DE
497 stop_pc -= the_low_target.decr_pc_after_break;
498
499 if (debug_threads)
500 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
501
502 return stop_pc;
0d62e5e8 503}
ce3a066d 504
0d62e5e8 505static void *
95954743 506add_lwp (ptid_t ptid)
611cb4a5 507{
54a0b537 508 struct lwp_info *lwp;
0d62e5e8 509
54a0b537
PA
510 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
511 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 512
95954743 513 lwp->head.id = ptid;
0d62e5e8 514
aa5ca48f
DE
515 if (the_low_target.new_thread != NULL)
516 lwp->arch_private = the_low_target.new_thread ();
517
54a0b537 518 add_inferior_to_list (&all_lwps, &lwp->head);
0d62e5e8 519
54a0b537 520 return lwp;
0d62e5e8 521}
611cb4a5 522
da6d8c04
DJ
523/* Start an inferior process and returns its pid.
524 ALLARGS is a vector of program-name and args. */
525
ce3a066d
DJ
526static int
527linux_create_inferior (char *program, char **allargs)
da6d8c04 528{
03583c20
UW
529#ifdef HAVE_PERSONALITY
530 int personality_orig = 0, personality_set = 0;
531#endif
a6dbe5df 532 struct lwp_info *new_lwp;
da6d8c04 533 int pid;
95954743 534 ptid_t ptid;
da6d8c04 535
03583c20
UW
536#ifdef HAVE_PERSONALITY
537 if (disable_randomization)
538 {
539 errno = 0;
540 personality_orig = personality (0xffffffff);
541 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
542 {
543 personality_set = 1;
544 personality (personality_orig | ADDR_NO_RANDOMIZE);
545 }
546 if (errno != 0 || (personality_set
547 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
548 warning ("Error disabling address space randomization: %s",
549 strerror (errno));
550 }
551#endif
552
42c81e2a 553#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
554 pid = vfork ();
555#else
da6d8c04 556 pid = fork ();
52fb6437 557#endif
da6d8c04
DJ
558 if (pid < 0)
559 perror_with_name ("fork");
560
561 if (pid == 0)
562 {
563 ptrace (PTRACE_TRACEME, 0, 0, 0);
564
1a981360 565#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 566 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 567#endif
0d62e5e8 568
a9fa9f7d
DJ
569 setpgid (0, 0);
570
e0f9f062
DE
571 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
572 stdout to stderr so that inferior i/o doesn't corrupt the connection.
573 Also, redirect stdin to /dev/null. */
574 if (remote_connection_is_stdio ())
575 {
576 close (0);
577 open ("/dev/null", O_RDONLY);
578 dup2 (2, 1);
3e52c33d
JK
579 if (write (2, "stdin/stdout redirected\n",
580 sizeof ("stdin/stdout redirected\n") - 1) < 0)
581 /* Errors ignored. */;
e0f9f062
DE
582 }
583
2b876972
DJ
584 execv (program, allargs);
585 if (errno == ENOENT)
586 execvp (program, allargs);
da6d8c04
DJ
587
588 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 589 strerror (errno));
da6d8c04
DJ
590 fflush (stderr);
591 _exit (0177);
592 }
593
03583c20
UW
594#ifdef HAVE_PERSONALITY
595 if (personality_set)
596 {
597 errno = 0;
598 personality (personality_orig);
599 if (errno != 0)
600 warning ("Error restoring address space randomization: %s",
601 strerror (errno));
602 }
603#endif
604
95954743
PA
605 linux_add_process (pid, 0);
606
607 ptid = ptid_build (pid, pid, 0);
608 new_lwp = add_lwp (ptid);
609 add_thread (ptid, new_lwp);
a6dbe5df 610 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 611
a9fa9f7d 612 return pid;
da6d8c04
DJ
613}
614
615/* Attach to an inferior process. */
616
95954743
PA
617static void
618linux_attach_lwp_1 (unsigned long lwpid, int initial)
da6d8c04 619{
95954743 620 ptid_t ptid;
54a0b537 621 struct lwp_info *new_lwp;
611cb4a5 622
95954743 623 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
da6d8c04 624 {
95954743 625 if (!initial)
2d717e4f
DJ
626 {
627 /* If we fail to attach to an LWP, just warn. */
95954743 628 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
2d717e4f
DJ
629 strerror (errno), errno);
630 fflush (stderr);
631 return;
632 }
633 else
634 /* If we fail to attach to a process, report an error. */
95954743 635 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
43d5792c 636 strerror (errno), errno);
da6d8c04
DJ
637 }
638
95954743 639 if (initial)
e3deef73
LM
640 /* If lwp is the tgid, we handle adding existing threads later.
641 Otherwise we just add lwp without bothering about any other
642 threads. */
95954743
PA
643 ptid = ptid_build (lwpid, lwpid, 0);
644 else
645 {
646 /* Note that extracting the pid from the current inferior is
647 safe, since we're always called in the context of the same
648 process as this new thread. */
649 int pid = pid_of (get_thread_lwp (current_inferior));
650 ptid = ptid_build (pid, lwpid, 0);
651 }
24a09b5f 652
95954743
PA
653 new_lwp = (struct lwp_info *) add_lwp (ptid);
654 add_thread (ptid, new_lwp);
0d62e5e8 655
a6dbe5df
PA
656 /* We need to wait for SIGSTOP before being able to make the next
657 ptrace call on this LWP. */
658 new_lwp->must_set_ptrace_flags = 1;
659
0d62e5e8 660 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
661 brings it to a halt.
662
663 There are several cases to consider here:
664
665 1) gdbserver has already attached to the process and is being notified
1b3f6016 666 of a new thread that is being created.
d50171e4
PA
667 In this case we should ignore that SIGSTOP and resume the
668 process. This is handled below by setting stop_expected = 1,
8336d594 669 and the fact that add_thread sets last_resume_kind ==
d50171e4 670 resume_continue.
0e21c1ec
DE
671
672 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
673 to it via attach_inferior.
674 In this case we want the process thread to stop.
d50171e4
PA
675 This is handled by having linux_attach set last_resume_kind ==
676 resume_stop after we return.
e3deef73
LM
677
678 If the pid we are attaching to is also the tgid, we attach to and
679 stop all the existing threads. Otherwise, we attach to pid and
680 ignore any other threads in the same group as this pid.
0e21c1ec
DE
681
682 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
683 existing threads.
684 In this case we want the thread to stop.
685 FIXME: This case is currently not properly handled.
686 We should wait for the SIGSTOP but don't. Things work apparently
687 because enough time passes between when we ptrace (ATTACH) and when
688 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
689
690 On the other hand, if we are currently trying to stop all threads, we
691 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 692 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
693 end of the list, and so the new thread has not yet reached
694 wait_for_sigstop (but will). */
d50171e4 695 new_lwp->stop_expected = 1;
0d62e5e8
DJ
696}
697
95954743
PA
698void
699linux_attach_lwp (unsigned long lwpid)
700{
701 linux_attach_lwp_1 (lwpid, 0);
702}
703
e3deef73
LM
704/* Attach to PID. If PID is the tgid, attach to it and all
705 of its threads. */
706
0d62e5e8 707int
a1928bad 708linux_attach (unsigned long pid)
0d62e5e8 709{
e3deef73
LM
710 /* Attach to PID. We will check for other threads
711 soon. */
95954743 712 linux_attach_lwp_1 (pid, 1);
95954743 713 linux_add_process (pid, 1);
0d62e5e8 714
bd99dc85
PA
715 if (!non_stop)
716 {
8336d594
PA
717 struct thread_info *thread;
718
719 /* Don't ignore the initial SIGSTOP if we just attached to this
720 process. It will be collected by wait shortly. */
721 thread = find_thread_ptid (ptid_build (pid, pid, 0));
722 thread->last_resume_kind = resume_stop;
bd99dc85 723 }
0d62e5e8 724
e3deef73
LM
725 if (linux_proc_get_tgid (pid) == pid)
726 {
727 DIR *dir;
728 char pathname[128];
729
730 sprintf (pathname, "/proc/%ld/task", pid);
731
732 dir = opendir (pathname);
733
734 if (!dir)
735 {
736 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
737 fflush (stderr);
738 }
739 else
740 {
741 /* At this point we attached to the tgid. Scan the task for
742 existing threads. */
743 unsigned long lwp;
744 int new_threads_found;
745 int iterations = 0;
746 struct dirent *dp;
747
748 while (iterations < 2)
749 {
750 new_threads_found = 0;
751 /* Add all the other threads. While we go through the
752 threads, new threads may be spawned. Cycle through
753 the list of threads until we have done two iterations without
754 finding new threads. */
755 while ((dp = readdir (dir)) != NULL)
756 {
757 /* Fetch one lwp. */
758 lwp = strtoul (dp->d_name, NULL, 10);
759
760 /* Is this a new thread? */
761 if (lwp
762 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
763 {
764 linux_attach_lwp_1 (lwp, 0);
765 new_threads_found++;
766
767 if (debug_threads)
768 fprintf (stderr, "\
769Found and attached to new lwp %ld\n", lwp);
770 }
771 }
772
773 if (!new_threads_found)
774 iterations++;
775 else
776 iterations = 0;
777
778 rewinddir (dir);
779 }
780 closedir (dir);
781 }
782 }
783
95954743
PA
784 return 0;
785}
786
787struct counter
788{
789 int pid;
790 int count;
791};
792
793static int
794second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
795{
796 struct counter *counter = args;
797
798 if (ptid_get_pid (entry->id) == counter->pid)
799 {
800 if (++counter->count > 1)
801 return 1;
802 }
d61ddec4 803
da6d8c04
DJ
804 return 0;
805}
806
95954743
PA
807static int
808last_thread_of_process_p (struct thread_info *thread)
809{
810 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
811 int pid = ptid_get_pid (ptid);
812 struct counter counter = { pid , 0 };
da6d8c04 813
95954743
PA
814 return (find_inferior (&all_threads,
815 second_thread_of_pid_p, &counter) == NULL);
816}
817
818/* Kill the inferior lwp. */
819
820static int
821linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
da6d8c04 822{
0d62e5e8 823 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 824 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 825 int wstat;
95954743
PA
826 int pid = * (int *) args;
827
828 if (ptid_get_pid (entry->id) != pid)
829 return 0;
0d62e5e8 830
fd500816
DJ
831 /* We avoid killing the first thread here, because of a Linux kernel (at
832 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
833 the children get a chance to be reaped, it will remain a zombie
834 forever. */
95954743 835
12b42a12 836 if (lwpid_of (lwp) == pid)
95954743
PA
837 {
838 if (debug_threads)
839 fprintf (stderr, "lkop: is last of process %s\n",
840 target_pid_to_str (entry->id));
841 return 0;
842 }
fd500816 843
0d62e5e8
DJ
844 do
845 {
bd99dc85 846 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
0d62e5e8
DJ
847
848 /* Make sure it died. The loop is most likely unnecessary. */
95954743 849 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
bd99dc85 850 } while (pid > 0 && WIFSTOPPED (wstat));
95954743
PA
851
852 return 0;
da6d8c04
DJ
853}
854
95954743
PA
855static int
856linux_kill (int pid)
0d62e5e8 857{
95954743 858 struct process_info *process;
54a0b537 859 struct lwp_info *lwp;
fd500816 860 int wstat;
95954743 861 int lwpid;
fd500816 862
95954743
PA
863 process = find_process_pid (pid);
864 if (process == NULL)
865 return -1;
9d606399 866
f9e39928
PA
867 /* If we're killing a running inferior, make sure it is stopped
868 first, as PTRACE_KILL will not work otherwise. */
7984d532 869 stop_all_lwps (0, NULL);
f9e39928 870
95954743 871 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
fd500816 872
54a0b537 873 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 874 thread in the list, so do so now. */
95954743 875 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 876
784867a5 877 if (lwp == NULL)
fd500816 878 {
784867a5
JK
879 if (debug_threads)
880 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
881 lwpid_of (lwp), pid);
882 }
883 else
884 {
885 if (debug_threads)
886 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
887 lwpid_of (lwp), pid);
fd500816 888
784867a5
JK
889 do
890 {
891 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
892
893 /* Make sure it died. The loop is most likely unnecessary. */
894 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
895 } while (lwpid > 0 && WIFSTOPPED (wstat));
896 }
2d717e4f 897
8336d594 898 the_target->mourn (process);
f9e39928
PA
899
900 /* Since we presently can only stop all lwps of all processes, we
901 need to unstop lwps of other processes. */
7984d532 902 unstop_all_lwps (0, NULL);
95954743 903 return 0;
0d62e5e8
DJ
904}
905
95954743
PA
906static int
907linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
908{
909 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 910 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
911 int pid = * (int *) args;
912
913 if (ptid_get_pid (entry->id) != pid)
914 return 0;
6ad8ae5c 915
ae13219e
DJ
916 /* If this process is stopped but is expecting a SIGSTOP, then make
917 sure we take care of that now. This isn't absolutely guaranteed
918 to collect the SIGSTOP, but is fairly likely to. */
54a0b537 919 if (lwp->stop_expected)
ae13219e 920 {
bd99dc85 921 int wstat;
ae13219e 922 /* Clear stop_expected, so that the SIGSTOP will be reported. */
54a0b537 923 lwp->stop_expected = 0;
f9e39928 924 linux_resume_one_lwp (lwp, 0, 0, NULL);
95954743 925 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
ae13219e
DJ
926 }
927
928 /* Flush any pending changes to the process's registers. */
929 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 930 get_lwp_thread (lwp));
ae13219e
DJ
931
932 /* Finally, let it resume. */
82bfbe7e
PA
933 if (the_low_target.prepare_to_resume != NULL)
934 the_low_target.prepare_to_resume (lwp);
bd99dc85
PA
935 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
936
937 delete_lwp (lwp);
95954743 938 return 0;
6ad8ae5c
DJ
939}
940
95954743
PA
941static int
942linux_detach (int pid)
943{
944 struct process_info *process;
945
946 process = find_process_pid (pid);
947 if (process == NULL)
948 return -1;
949
f9e39928
PA
950 /* Stop all threads before detaching. First, ptrace requires that
951 the thread is stopped to sucessfully detach. Second, thread_db
952 may need to uninstall thread event breakpoints from memory, which
953 only works with a stopped process anyway. */
7984d532 954 stop_all_lwps (0, NULL);
f9e39928 955
ca5c370d 956#ifdef USE_THREAD_DB
8336d594 957 thread_db_detach (process);
ca5c370d
PA
958#endif
959
fa593d66
PA
960 /* Stabilize threads (move out of jump pads). */
961 stabilize_threads ();
962
95954743 963 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
964
965 the_target->mourn (process);
f9e39928
PA
966
967 /* Since we presently can only stop all lwps of all processes, we
968 need to unstop lwps of other processes. */
7984d532 969 unstop_all_lwps (0, NULL);
f9e39928
PA
970 return 0;
971}
972
973/* Remove all LWPs that belong to process PROC from the lwp list. */
974
975static int
976delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
977{
978 struct lwp_info *lwp = (struct lwp_info *) entry;
979 struct process_info *process = proc;
980
981 if (pid_of (lwp) == pid_of (process))
982 delete_lwp (lwp);
983
dd6953e1 984 return 0;
6ad8ae5c
DJ
985}
986
8336d594
PA
987static void
988linux_mourn (struct process_info *process)
989{
990 struct process_info_private *priv;
991
992#ifdef USE_THREAD_DB
993 thread_db_mourn (process);
994#endif
995
f9e39928
PA
996 find_inferior (&all_lwps, delete_lwp_callback, process);
997
8336d594
PA
998 /* Freeing all private data. */
999 priv = process->private;
1000 free (priv->arch_private);
1001 free (priv);
1002 process->private = NULL;
505106cd
PA
1003
1004 remove_process (process);
8336d594
PA
1005}
1006
444d6139 1007static void
95954743 1008linux_join (int pid)
444d6139 1009{
444d6139
PA
1010 int status, ret;
1011
1012 do {
95954743 1013 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1014 if (WIFEXITED (status) || WIFSIGNALED (status))
1015 break;
1016 } while (ret != -1 || errno != ECHILD);
1017}
1018
6ad8ae5c 1019/* Return nonzero if the given thread is still alive. */
0d62e5e8 1020static int
95954743 1021linux_thread_alive (ptid_t ptid)
0d62e5e8 1022{
95954743
PA
1023 struct lwp_info *lwp = find_lwp_pid (ptid);
1024
1025 /* We assume we always know if a thread exits. If a whole process
1026 exited but we still haven't been able to report it to GDB, we'll
1027 hold on to the last lwp of the dead process. */
1028 if (lwp != NULL)
1029 return !lwp->dead;
0d62e5e8
DJ
1030 else
1031 return 0;
1032}
1033
6bf5e0ba 1034/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1035static int
d50171e4 1036status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1037{
54a0b537 1038 struct lwp_info *lwp = (struct lwp_info *) entry;
95954743 1039 ptid_t ptid = * (ptid_t *) arg;
7984d532 1040 struct thread_info *thread;
95954743
PA
1041
1042 /* Check if we're only interested in events from a specific process
1043 or its lwps. */
1044 if (!ptid_equal (minus_one_ptid, ptid)
1045 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1046 return 0;
0d62e5e8 1047
d50171e4
PA
1048 thread = get_lwp_thread (lwp);
1049
1050 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1051 report any status pending the LWP may have. */
8336d594 1052 if (thread->last_resume_kind == resume_stop
7984d532 1053 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4 1054 return 0;
0d62e5e8 1055
d50171e4 1056 return lwp->status_pending_p;
0d62e5e8
DJ
1057}
1058
95954743
PA
1059static int
1060same_lwp (struct inferior_list_entry *entry, void *data)
1061{
1062 ptid_t ptid = *(ptid_t *) data;
1063 int lwp;
1064
1065 if (ptid_get_lwp (ptid) != 0)
1066 lwp = ptid_get_lwp (ptid);
1067 else
1068 lwp = ptid_get_pid (ptid);
1069
1070 if (ptid_get_lwp (entry->id) == lwp)
1071 return 1;
1072
1073 return 0;
1074}
1075
1076struct lwp_info *
1077find_lwp_pid (ptid_t ptid)
1078{
1079 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1080}
1081
bd99dc85 1082static struct lwp_info *
95954743 1083linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
611cb4a5 1084{
0d62e5e8 1085 int ret;
95954743 1086 int to_wait_for = -1;
bd99dc85 1087 struct lwp_info *child = NULL;
0d62e5e8 1088
bd99dc85 1089 if (debug_threads)
95954743
PA
1090 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1091
1092 if (ptid_equal (ptid, minus_one_ptid))
1093 to_wait_for = -1; /* any child */
1094 else
1095 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
0d62e5e8 1096
bd99dc85 1097 options |= __WALL;
0d62e5e8 1098
bd99dc85 1099retry:
0d62e5e8 1100
bd99dc85
PA
1101 ret = my_waitpid (to_wait_for, wstatp, options);
1102 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1103 return NULL;
1104 else if (ret == -1)
1105 perror_with_name ("waitpid");
0d62e5e8
DJ
1106
1107 if (debug_threads
1108 && (!WIFSTOPPED (*wstatp)
1109 || (WSTOPSIG (*wstatp) != 32
1110 && WSTOPSIG (*wstatp) != 33)))
1111 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1112
95954743 1113 child = find_lwp_pid (pid_to_ptid (ret));
0d62e5e8 1114
24a09b5f
DJ
1115 /* If we didn't find a process, one of two things presumably happened:
1116 - A process we started and then detached from has exited. Ignore it.
1117 - A process we are controlling has forked and the new child's stop
1118 was reported to us by the kernel. Save its PID. */
bd99dc85 1119 if (child == NULL && WIFSTOPPED (*wstatp))
24a09b5f
DJ
1120 {
1121 add_pid_to_list (&stopped_pids, ret);
1122 goto retry;
1123 }
bd99dc85 1124 else if (child == NULL)
24a09b5f
DJ
1125 goto retry;
1126
bd99dc85 1127 child->stopped = 1;
0d62e5e8 1128
bd99dc85 1129 child->last_status = *wstatp;
32ca6d61 1130
d61ddec4
UW
1131 /* Architecture-specific setup after inferior is running.
1132 This needs to happen after we have attached to the inferior
1133 and it is stopped for the first time, but before we access
1134 any inferior registers. */
1135 if (new_inferior)
1136 {
1137 the_low_target.arch_setup ();
52fa2412
UW
1138#ifdef HAVE_LINUX_REGSETS
1139 memset (disabled_regsets, 0, num_regsets);
1140#endif
d61ddec4
UW
1141 new_inferior = 0;
1142 }
1143
c3adc08c
PA
1144 /* Fetch the possibly triggered data watchpoint info and store it in
1145 CHILD.
1146
1147 On some archs, like x86, that use debug registers to set
1148 watchpoints, it's possible that the way to know which watched
1149 address trapped, is to check the register that is used to select
1150 which address to watch. Problem is, between setting the
1151 watchpoint and reading back which data address trapped, the user
1152 may change the set of watchpoints, and, as a consequence, GDB
1153 changes the debug registers in the inferior. To avoid reading
1154 back a stale stopped-data-address when that happens, we cache in
1155 LP the fact that a watchpoint trapped, and the corresponding data
1156 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1157 changes the debug registers meanwhile, we have the cached data we
1158 can rely on. */
1159
1160 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1161 {
1162 if (the_low_target.stopped_by_watchpoint == NULL)
1163 {
1164 child->stopped_by_watchpoint = 0;
1165 }
1166 else
1167 {
1168 struct thread_info *saved_inferior;
1169
1170 saved_inferior = current_inferior;
1171 current_inferior = get_lwp_thread (child);
1172
1173 child->stopped_by_watchpoint
1174 = the_low_target.stopped_by_watchpoint ();
1175
1176 if (child->stopped_by_watchpoint)
1177 {
1178 if (the_low_target.stopped_data_address != NULL)
1179 child->stopped_data_address
1180 = the_low_target.stopped_data_address ();
1181 else
1182 child->stopped_data_address = 0;
1183 }
1184
1185 current_inferior = saved_inferior;
1186 }
1187 }
1188
d50171e4
PA
1189 /* Store the STOP_PC, with adjustment applied. This depends on the
1190 architecture being defined already (so that CHILD has a valid
1191 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1192 not). */
1193 if (WIFSTOPPED (*wstatp))
1194 child->stop_pc = get_stop_pc (child);
1195
0d62e5e8 1196 if (debug_threads
47c0c975
DE
1197 && WIFSTOPPED (*wstatp)
1198 && the_low_target.get_pc != NULL)
0d62e5e8 1199 {
896c7fbb 1200 struct thread_info *saved_inferior = current_inferior;
bce522a2 1201 struct regcache *regcache;
47c0c975
DE
1202 CORE_ADDR pc;
1203
d50171e4 1204 current_inferior = get_lwp_thread (child);
bce522a2 1205 regcache = get_thread_regcache (current_inferior, 1);
442ea881 1206 pc = (*the_low_target.get_pc) (regcache);
47c0c975 1207 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
896c7fbb 1208 current_inferior = saved_inferior;
0d62e5e8 1209 }
bd99dc85
PA
1210
1211 return child;
0d62e5e8 1212}
611cb4a5 1213
219f2f23
PA
1214/* This function should only be called if the LWP got a SIGTRAP.
1215
1216 Handle any tracepoint steps or hits. Return true if a tracepoint
1217 event was handled, 0 otherwise. */
1218
1219static int
1220handle_tracepoints (struct lwp_info *lwp)
1221{
1222 struct thread_info *tinfo = get_lwp_thread (lwp);
1223 int tpoint_related_event = 0;
1224
7984d532
PA
1225 /* If this tracepoint hit causes a tracing stop, we'll immediately
1226 uninsert tracepoints. To do this, we temporarily pause all
1227 threads, unpatch away, and then unpause threads. We need to make
1228 sure the unpausing doesn't resume LWP too. */
1229 lwp->suspended++;
1230
219f2f23
PA
1231 /* And we need to be sure that any all-threads-stopping doesn't try
1232 to move threads out of the jump pads, as it could deadlock the
1233 inferior (LWP could be in the jump pad, maybe even holding the
1234 lock.) */
1235
1236 /* Do any necessary step collect actions. */
1237 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1238
fa593d66
PA
1239 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1240
219f2f23
PA
1241 /* See if we just hit a tracepoint and do its main collect
1242 actions. */
1243 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1244
7984d532
PA
1245 lwp->suspended--;
1246
1247 gdb_assert (lwp->suspended == 0);
fa593d66 1248 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1249
219f2f23
PA
1250 if (tpoint_related_event)
1251 {
1252 if (debug_threads)
1253 fprintf (stderr, "got a tracepoint event\n");
1254 return 1;
1255 }
1256
1257 return 0;
1258}
1259
fa593d66
PA
1260/* Convenience wrapper. Returns true if LWP is presently collecting a
1261 fast tracepoint. */
1262
1263static int
1264linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1265 struct fast_tpoint_collect_status *status)
1266{
1267 CORE_ADDR thread_area;
1268
1269 if (the_low_target.get_thread_area == NULL)
1270 return 0;
1271
1272 /* Get the thread area address. This is used to recognize which
1273 thread is which when tracing with the in-process agent library.
1274 We don't read anything from the address, and treat it as opaque;
1275 it's the address itself that we assume is unique per-thread. */
1276 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1277 return 0;
1278
1279 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1280}
1281
1282/* The reason we resume in the caller, is because we want to be able
1283 to pass lwp->status_pending as WSTAT, and we need to clear
1284 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1285 refuses to resume. */
1286
1287static int
1288maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1289{
1290 struct thread_info *saved_inferior;
1291
1292 saved_inferior = current_inferior;
1293 current_inferior = get_lwp_thread (lwp);
1294
1295 if ((wstat == NULL
1296 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1297 && supports_fast_tracepoints ()
1298 && in_process_agent_loaded ())
1299 {
1300 struct fast_tpoint_collect_status status;
1301 int r;
1302
1303 if (debug_threads)
1304 fprintf (stderr, "\
1305Checking whether LWP %ld needs to move out of the jump pad.\n",
1306 lwpid_of (lwp));
1307
1308 r = linux_fast_tracepoint_collecting (lwp, &status);
1309
1310 if (wstat == NULL
1311 || (WSTOPSIG (*wstat) != SIGILL
1312 && WSTOPSIG (*wstat) != SIGFPE
1313 && WSTOPSIG (*wstat) != SIGSEGV
1314 && WSTOPSIG (*wstat) != SIGBUS))
1315 {
1316 lwp->collecting_fast_tracepoint = r;
1317
1318 if (r != 0)
1319 {
1320 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1321 {
1322 /* Haven't executed the original instruction yet.
1323 Set breakpoint there, and wait till it's hit,
1324 then single-step until exiting the jump pad. */
1325 lwp->exit_jump_pad_bkpt
1326 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1327 }
1328
1329 if (debug_threads)
1330 fprintf (stderr, "\
1331Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1332 lwpid_of (lwp));
0cccb683 1333 current_inferior = saved_inferior;
fa593d66
PA
1334
1335 return 1;
1336 }
1337 }
1338 else
1339 {
1340 /* If we get a synchronous signal while collecting, *and*
1341 while executing the (relocated) original instruction,
1342 reset the PC to point at the tpoint address, before
1343 reporting to GDB. Otherwise, it's an IPA lib bug: just
1344 report the signal to GDB, and pray for the best. */
1345
1346 lwp->collecting_fast_tracepoint = 0;
1347
1348 if (r != 0
1349 && (status.adjusted_insn_addr <= lwp->stop_pc
1350 && lwp->stop_pc < status.adjusted_insn_addr_end))
1351 {
1352 siginfo_t info;
1353 struct regcache *regcache;
1354
1355 /* The si_addr on a few signals references the address
1356 of the faulting instruction. Adjust that as
1357 well. */
1358 if ((WSTOPSIG (*wstat) == SIGILL
1359 || WSTOPSIG (*wstat) == SIGFPE
1360 || WSTOPSIG (*wstat) == SIGBUS
1361 || WSTOPSIG (*wstat) == SIGSEGV)
1362 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1363 /* Final check just to make sure we don't clobber
1364 the siginfo of non-kernel-sent signals. */
1365 && (uintptr_t) info.si_addr == lwp->stop_pc)
1366 {
1367 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1368 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1369 }
1370
1371 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1372 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1373 lwp->stop_pc = status.tpoint_addr;
1374
1375 /* Cancel any fast tracepoint lock this thread was
1376 holding. */
1377 force_unlock_trace_buffer ();
1378 }
1379
1380 if (lwp->exit_jump_pad_bkpt != NULL)
1381 {
1382 if (debug_threads)
1383 fprintf (stderr,
1384 "Cancelling fast exit-jump-pad: removing bkpt. "
1385 "stopping all threads momentarily.\n");
1386
1387 stop_all_lwps (1, lwp);
1388 cancel_breakpoints ();
1389
1390 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1391 lwp->exit_jump_pad_bkpt = NULL;
1392
1393 unstop_all_lwps (1, lwp);
1394
1395 gdb_assert (lwp->suspended >= 0);
1396 }
1397 }
1398 }
1399
1400 if (debug_threads)
1401 fprintf (stderr, "\
1402Checking whether LWP %ld needs to move out of the jump pad...no\n",
1403 lwpid_of (lwp));
0cccb683
YQ
1404
1405 current_inferior = saved_inferior;
fa593d66
PA
1406 return 0;
1407}
1408
1409/* Enqueue one signal in the "signals to report later when out of the
1410 jump pad" list. */
1411
1412static void
1413enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1414{
1415 struct pending_signals *p_sig;
1416
1417 if (debug_threads)
1418 fprintf (stderr, "\
1419Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1420
1421 if (debug_threads)
1422 {
1423 struct pending_signals *sig;
1424
1425 for (sig = lwp->pending_signals_to_report;
1426 sig != NULL;
1427 sig = sig->prev)
1428 fprintf (stderr,
1429 " Already queued %d\n",
1430 sig->signal);
1431
1432 fprintf (stderr, " (no more currently queued signals)\n");
1433 }
1434
1a981360
PA
1435 /* Don't enqueue non-RT signals if they are already in the deferred
1436 queue. (SIGSTOP being the easiest signal to see ending up here
1437 twice) */
1438 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1439 {
1440 struct pending_signals *sig;
1441
1442 for (sig = lwp->pending_signals_to_report;
1443 sig != NULL;
1444 sig = sig->prev)
1445 {
1446 if (sig->signal == WSTOPSIG (*wstat))
1447 {
1448 if (debug_threads)
1449 fprintf (stderr,
1450 "Not requeuing already queued non-RT signal %d"
1451 " for LWP %ld\n",
1452 sig->signal,
1453 lwpid_of (lwp));
1454 return;
1455 }
1456 }
1457 }
1458
fa593d66
PA
1459 p_sig = xmalloc (sizeof (*p_sig));
1460 p_sig->prev = lwp->pending_signals_to_report;
1461 p_sig->signal = WSTOPSIG (*wstat);
1462 memset (&p_sig->info, 0, sizeof (siginfo_t));
1463 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1464
1465 lwp->pending_signals_to_report = p_sig;
1466}
1467
1468/* Dequeue one signal from the "signals to report later when out of
1469 the jump pad" list. */
1470
1471static int
1472dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1473{
1474 if (lwp->pending_signals_to_report != NULL)
1475 {
1476 struct pending_signals **p_sig;
1477
1478 p_sig = &lwp->pending_signals_to_report;
1479 while ((*p_sig)->prev != NULL)
1480 p_sig = &(*p_sig)->prev;
1481
1482 *wstat = W_STOPCODE ((*p_sig)->signal);
1483 if ((*p_sig)->info.si_signo != 0)
1484 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1485 free (*p_sig);
1486 *p_sig = NULL;
1487
1488 if (debug_threads)
1489 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1490 WSTOPSIG (*wstat), lwpid_of (lwp));
1491
1492 if (debug_threads)
1493 {
1494 struct pending_signals *sig;
1495
1496 for (sig = lwp->pending_signals_to_report;
1497 sig != NULL;
1498 sig = sig->prev)
1499 fprintf (stderr,
1500 " Still queued %d\n",
1501 sig->signal);
1502
1503 fprintf (stderr, " (no more queued signals)\n");
1504 }
1505
1506 return 1;
1507 }
1508
1509 return 0;
1510}
1511
d50171e4
PA
1512/* Arrange for a breakpoint to be hit again later. We don't keep the
1513 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1514 will handle the current event, eventually we will resume this LWP,
1515 and this breakpoint will trap again. */
1516
1517static int
1518cancel_breakpoint (struct lwp_info *lwp)
1519{
1520 struct thread_info *saved_inferior;
d50171e4
PA
1521
1522 /* There's nothing to do if we don't support breakpoints. */
1523 if (!supports_breakpoints ())
1524 return 0;
1525
d50171e4
PA
1526 /* breakpoint_at reads from current inferior. */
1527 saved_inferior = current_inferior;
1528 current_inferior = get_lwp_thread (lwp);
1529
1530 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1531 {
1532 if (debug_threads)
1533 fprintf (stderr,
1534 "CB: Push back breakpoint for %s\n",
fc7238bb 1535 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1536
1537 /* Back up the PC if necessary. */
1538 if (the_low_target.decr_pc_after_break)
1539 {
1540 struct regcache *regcache
fc7238bb 1541 = get_thread_regcache (current_inferior, 1);
d50171e4
PA
1542 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1543 }
1544
1545 current_inferior = saved_inferior;
1546 return 1;
1547 }
1548 else
1549 {
1550 if (debug_threads)
1551 fprintf (stderr,
1552 "CB: No breakpoint found at %s for [%s]\n",
1553 paddress (lwp->stop_pc),
fc7238bb 1554 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1555 }
1556
1557 current_inferior = saved_inferior;
1558 return 0;
1559}
1560
1561/* When the event-loop is doing a step-over, this points at the thread
1562 being stepped. */
1563ptid_t step_over_bkpt;
1564
bd99dc85
PA
1565/* Wait for an event from child PID. If PID is -1, wait for any
1566 child. Store the stop status through the status pointer WSTAT.
1567 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1568 event was found and OPTIONS contains WNOHANG. Return the PID of
1569 the stopped child otherwise. */
1570
0d62e5e8 1571static int
95954743 1572linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
0d62e5e8 1573{
d50171e4
PA
1574 struct lwp_info *event_child, *requested_child;
1575
d50171e4
PA
1576 event_child = NULL;
1577 requested_child = NULL;
0d62e5e8 1578
95954743 1579 /* Check for a lwp with a pending status. */
bd99dc85 1580
95954743
PA
1581 if (ptid_equal (ptid, minus_one_ptid)
1582 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
0d62e5e8 1583 {
54a0b537 1584 event_child = (struct lwp_info *)
d50171e4 1585 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
0d62e5e8 1586 if (debug_threads && event_child)
bd99dc85 1587 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
0d62e5e8
DJ
1588 }
1589 else
1590 {
95954743 1591 requested_child = find_lwp_pid (ptid);
d50171e4 1592
fa593d66
PA
1593 if (!stopping_threads
1594 && requested_child->status_pending_p
1595 && requested_child->collecting_fast_tracepoint)
1596 {
1597 enqueue_one_deferred_signal (requested_child,
1598 &requested_child->status_pending);
1599 requested_child->status_pending_p = 0;
1600 requested_child->status_pending = 0;
1601 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1602 }
1603
1604 if (requested_child->suspended
1605 && requested_child->status_pending_p)
1606 fatal ("requesting an event out of a suspended child?");
1607
d50171e4 1608 if (requested_child->status_pending_p)
bd99dc85 1609 event_child = requested_child;
0d62e5e8 1610 }
611cb4a5 1611
0d62e5e8
DJ
1612 if (event_child != NULL)
1613 {
bd99dc85
PA
1614 if (debug_threads)
1615 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1616 lwpid_of (event_child), event_child->status_pending);
1617 *wstat = event_child->status_pending;
1618 event_child->status_pending_p = 0;
1619 event_child->status_pending = 0;
1620 current_inferior = get_lwp_thread (event_child);
1621 return lwpid_of (event_child);
0d62e5e8
DJ
1622 }
1623
1624 /* We only enter this loop if no process has a pending wait status. Thus
1625 any action taken in response to a wait status inside this loop is
1626 responding as soon as we detect the status, not after any pending
1627 events. */
1628 while (1)
1629 {
6bf5e0ba 1630 event_child = linux_wait_for_lwp (ptid, wstat, options);
0d62e5e8 1631
bd99dc85 1632 if ((options & WNOHANG) && event_child == NULL)
d50171e4
PA
1633 {
1634 if (debug_threads)
1635 fprintf (stderr, "WNOHANG set, no event found\n");
1636 return 0;
1637 }
0d62e5e8
DJ
1638
1639 if (event_child == NULL)
1640 error ("event from unknown child");
611cb4a5 1641
bd99dc85 1642 current_inferior = get_lwp_thread (event_child);
0d62e5e8 1643
89be2091 1644 /* Check for thread exit. */
bd99dc85 1645 if (! WIFSTOPPED (*wstat))
0d62e5e8 1646 {
89be2091 1647 if (debug_threads)
95954743 1648 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
89be2091
DJ
1649
1650 /* If the last thread is exiting, just return. */
95954743 1651 if (last_thread_of_process_p (current_inferior))
bd99dc85
PA
1652 {
1653 if (debug_threads)
95954743
PA
1654 fprintf (stderr, "LWP %ld is last lwp of process\n",
1655 lwpid_of (event_child));
bd99dc85
PA
1656 return lwpid_of (event_child);
1657 }
89be2091 1658
bd99dc85
PA
1659 if (!non_stop)
1660 {
1661 current_inferior = (struct thread_info *) all_threads.head;
1662 if (debug_threads)
1663 fprintf (stderr, "Current inferior is now %ld\n",
1664 lwpid_of (get_thread_lwp (current_inferior)));
1665 }
1666 else
1667 {
1668 current_inferior = NULL;
1669 if (debug_threads)
1670 fprintf (stderr, "Current inferior is now <NULL>\n");
1671 }
89be2091
DJ
1672
1673 /* If we were waiting for this particular child to do something...
1674 well, it did something. */
bd99dc85 1675 if (requested_child != NULL)
d50171e4
PA
1676 {
1677 int lwpid = lwpid_of (event_child);
1678
1679 /* Cancel the step-over operation --- the thread that
1680 started it is gone. */
1681 if (finish_step_over (event_child))
7984d532 1682 unstop_all_lwps (1, event_child);
d50171e4
PA
1683 delete_lwp (event_child);
1684 return lwpid;
1685 }
1686
1687 delete_lwp (event_child);
89be2091
DJ
1688
1689 /* Wait for a more interesting event. */
1690 continue;
1691 }
1692
a6dbe5df
PA
1693 if (event_child->must_set_ptrace_flags)
1694 {
1e7fc18c 1695 linux_enable_event_reporting (lwpid_of (event_child));
a6dbe5df
PA
1696 event_child->must_set_ptrace_flags = 0;
1697 }
1698
bd99dc85
PA
1699 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1700 && *wstat >> 16 != 0)
24a09b5f 1701 {
bd99dc85 1702 handle_extended_wait (event_child, *wstat);
24a09b5f
DJ
1703 continue;
1704 }
1705
d50171e4
PA
1706 if (WIFSTOPPED (*wstat)
1707 && WSTOPSIG (*wstat) == SIGSTOP
1708 && event_child->stop_expected)
1709 {
1710 int should_stop;
1711
1712 if (debug_threads)
1713 fprintf (stderr, "Expected stop.\n");
1714 event_child->stop_expected = 0;
1715
8336d594 1716 should_stop = (current_inferior->last_resume_kind == resume_stop
d50171e4
PA
1717 || stopping_threads);
1718
1719 if (!should_stop)
1720 {
1721 linux_resume_one_lwp (event_child,
1722 event_child->stepping, 0, NULL);
1723 continue;
1724 }
1725 }
1726
bd99dc85 1727 return lwpid_of (event_child);
611cb4a5 1728 }
0d62e5e8 1729
611cb4a5
DJ
1730 /* NOTREACHED */
1731 return 0;
1732}
1733
95954743
PA
1734static int
1735linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1736{
1737 ptid_t wait_ptid;
1738
1739 if (ptid_is_pid (ptid))
1740 {
1741 /* A request to wait for a specific tgid. This is not possible
1742 with waitpid, so instead, we wait for any child, and leave
1743 children we're not interested in right now with a pending
1744 status to report later. */
1745 wait_ptid = minus_one_ptid;
1746 }
1747 else
1748 wait_ptid = ptid;
1749
1750 while (1)
1751 {
1752 int event_pid;
1753
1754 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1755
1756 if (event_pid > 0
1757 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1758 {
493e2a69
MS
1759 struct lwp_info *event_child
1760 = find_lwp_pid (pid_to_ptid (event_pid));
95954743
PA
1761
1762 if (! WIFSTOPPED (*wstat))
1763 mark_lwp_dead (event_child, *wstat);
1764 else
1765 {
1766 event_child->status_pending_p = 1;
1767 event_child->status_pending = *wstat;
1768 }
1769 }
1770 else
1771 return event_pid;
1772 }
1773}
1774
6bf5e0ba
PA
1775
1776/* Count the LWP's that have had events. */
1777
1778static int
1779count_events_callback (struct inferior_list_entry *entry, void *data)
1780{
1781 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1782 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1783 int *count = data;
1784
1785 gdb_assert (count != NULL);
1786
1787 /* Count only resumed LWPs that have a SIGTRAP event pending that
1788 should be reported to GDB. */
8336d594
PA
1789 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1790 && thread->last_resume_kind != resume_stop
6bf5e0ba
PA
1791 && lp->status_pending_p
1792 && WIFSTOPPED (lp->status_pending)
1793 && WSTOPSIG (lp->status_pending) == SIGTRAP
1794 && !breakpoint_inserted_here (lp->stop_pc))
1795 (*count)++;
1796
1797 return 0;
1798}
1799
1800/* Select the LWP (if any) that is currently being single-stepped. */
1801
1802static int
1803select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1804{
1805 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1806 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba 1807
8336d594
PA
1808 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1809 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
1810 && lp->status_pending_p)
1811 return 1;
1812 else
1813 return 0;
1814}
1815
1816/* Select the Nth LWP that has had a SIGTRAP event that should be
1817 reported to GDB. */
1818
1819static int
1820select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1821{
1822 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1823 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1824 int *selector = data;
1825
1826 gdb_assert (selector != NULL);
1827
1828 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
1829 if (thread->last_resume_kind != resume_stop
1830 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1831 && lp->status_pending_p
1832 && WIFSTOPPED (lp->status_pending)
1833 && WSTOPSIG (lp->status_pending) == SIGTRAP
1834 && !breakpoint_inserted_here (lp->stop_pc))
1835 if ((*selector)-- == 0)
1836 return 1;
1837
1838 return 0;
1839}
1840
1841static int
1842cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1843{
1844 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1845 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1846 struct lwp_info *event_lp = data;
1847
1848 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1849 if (lp == event_lp)
1850 return 0;
1851
1852 /* If a LWP other than the LWP that we're reporting an event for has
1853 hit a GDB breakpoint (as opposed to some random trap signal),
1854 then just arrange for it to hit it again later. We don't keep
1855 the SIGTRAP status and don't forward the SIGTRAP signal to the
1856 LWP. We will handle the current event, eventually we will resume
1857 all LWPs, and this one will get its breakpoint trap again.
1858
1859 If we do not do this, then we run the risk that the user will
1860 delete or disable the breakpoint, but the LWP will have already
1861 tripped on it. */
1862
8336d594
PA
1863 if (thread->last_resume_kind != resume_stop
1864 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1865 && lp->status_pending_p
1866 && WIFSTOPPED (lp->status_pending)
1867 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
1868 && !lp->stepping
1869 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
1870 && cancel_breakpoint (lp))
1871 /* Throw away the SIGTRAP. */
1872 lp->status_pending_p = 0;
1873
1874 return 0;
1875}
1876
7984d532
PA
1877static void
1878linux_cancel_breakpoints (void)
1879{
1880 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1881}
1882
6bf5e0ba
PA
1883/* Select one LWP out of those that have events pending. */
1884
1885static void
1886select_event_lwp (struct lwp_info **orig_lp)
1887{
1888 int num_events = 0;
1889 int random_selector;
1890 struct lwp_info *event_lp;
1891
1892 /* Give preference to any LWP that is being single-stepped. */
1893 event_lp
1894 = (struct lwp_info *) find_inferior (&all_lwps,
1895 select_singlestep_lwp_callback, NULL);
1896 if (event_lp != NULL)
1897 {
1898 if (debug_threads)
1899 fprintf (stderr,
1900 "SEL: Select single-step %s\n",
1901 target_pid_to_str (ptid_of (event_lp)));
1902 }
1903 else
1904 {
1905 /* No single-stepping LWP. Select one at random, out of those
1906 which have had SIGTRAP events. */
1907
1908 /* First see how many SIGTRAP events we have. */
1909 find_inferior (&all_lwps, count_events_callback, &num_events);
1910
1911 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1912 random_selector = (int)
1913 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1914
1915 if (debug_threads && num_events > 1)
1916 fprintf (stderr,
1917 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1918 num_events, random_selector);
1919
1920 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1921 select_event_lwp_callback,
1922 &random_selector);
1923 }
1924
1925 if (event_lp != NULL)
1926 {
1927 /* Switch the event LWP. */
1928 *orig_lp = event_lp;
1929 }
1930}
1931
7984d532
PA
1932/* Decrement the suspend count of an LWP. */
1933
1934static int
1935unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1936{
1937 struct lwp_info *lwp = (struct lwp_info *) entry;
1938
1939 /* Ignore EXCEPT. */
1940 if (lwp == except)
1941 return 0;
1942
1943 lwp->suspended--;
1944
1945 gdb_assert (lwp->suspended >= 0);
1946 return 0;
1947}
1948
1949/* Decrement the suspend count of all LWPs, except EXCEPT, if non
1950 NULL. */
1951
1952static void
1953unsuspend_all_lwps (struct lwp_info *except)
1954{
1955 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1956}
1957
fa593d66
PA
1958static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1959static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1960 void *data);
1961static int lwp_running (struct inferior_list_entry *entry, void *data);
1962static ptid_t linux_wait_1 (ptid_t ptid,
1963 struct target_waitstatus *ourstatus,
1964 int target_options);
1965
1966/* Stabilize threads (move out of jump pads).
1967
1968 If a thread is midway collecting a fast tracepoint, we need to
1969 finish the collection and move it out of the jump pad before
1970 reporting the signal.
1971
1972 This avoids recursion while collecting (when a signal arrives
1973 midway, and the signal handler itself collects), which would trash
1974 the trace buffer. In case the user set a breakpoint in a signal
1975 handler, this avoids the backtrace showing the jump pad, etc..
1976 Most importantly, there are certain things we can't do safely if
1977 threads are stopped in a jump pad (or in its callee's). For
1978 example:
1979
1980 - starting a new trace run. A thread still collecting the
1981 previous run, could trash the trace buffer when resumed. The trace
1982 buffer control structures would have been reset but the thread had
1983 no way to tell. The thread could even midway memcpy'ing to the
1984 buffer, which would mean that when resumed, it would clobber the
1985 trace buffer that had been set for a new run.
1986
1987 - we can't rewrite/reuse the jump pads for new tracepoints
1988 safely. Say you do tstart while a thread is stopped midway while
1989 collecting. When the thread is later resumed, it finishes the
1990 collection, and returns to the jump pad, to execute the original
1991 instruction that was under the tracepoint jump at the time the
1992 older run had been started. If the jump pad had been rewritten
1993 since for something else in the new run, the thread would now
1994 execute the wrong / random instructions. */
1995
1996static void
1997linux_stabilize_threads (void)
1998{
1999 struct thread_info *save_inferior;
2000 struct lwp_info *lwp_stuck;
2001
2002 lwp_stuck
2003 = (struct lwp_info *) find_inferior (&all_lwps,
2004 stuck_in_jump_pad_callback, NULL);
2005 if (lwp_stuck != NULL)
2006 {
b4d51a55
PA
2007 if (debug_threads)
2008 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2009 lwpid_of (lwp_stuck));
fa593d66
PA
2010 return;
2011 }
2012
2013 save_inferior = current_inferior;
2014
2015 stabilizing_threads = 1;
2016
2017 /* Kick 'em all. */
2018 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2019
2020 /* Loop until all are stopped out of the jump pads. */
2021 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2022 {
2023 struct target_waitstatus ourstatus;
2024 struct lwp_info *lwp;
fa593d66
PA
2025 int wstat;
2026
2027 /* Note that we go through the full wait even loop. While
2028 moving threads out of jump pad, we need to be able to step
2029 over internal breakpoints and such. */
32fcada3 2030 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2031
2032 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2033 {
2034 lwp = get_thread_lwp (current_inferior);
2035
2036 /* Lock it. */
2037 lwp->suspended++;
2038
2039 if (ourstatus.value.sig != TARGET_SIGNAL_0
2040 || current_inferior->last_resume_kind == resume_stop)
2041 {
2042 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2043 enqueue_one_deferred_signal (lwp, &wstat);
2044 }
2045 }
2046 }
2047
2048 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2049
2050 stabilizing_threads = 0;
2051
2052 current_inferior = save_inferior;
2053
b4d51a55 2054 if (debug_threads)
fa593d66 2055 {
b4d51a55
PA
2056 lwp_stuck
2057 = (struct lwp_info *) find_inferior (&all_lwps,
2058 stuck_in_jump_pad_callback, NULL);
2059 if (lwp_stuck != NULL)
fa593d66
PA
2060 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2061 lwpid_of (lwp_stuck));
2062 }
2063}
2064
0d62e5e8 2065/* Wait for process, returns status. */
da6d8c04 2066
95954743
PA
2067static ptid_t
2068linux_wait_1 (ptid_t ptid,
2069 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2070{
e5f1222d 2071 int w;
fc7238bb 2072 struct lwp_info *event_child;
bd99dc85 2073 int options;
bd99dc85 2074 int pid;
6bf5e0ba
PA
2075 int step_over_finished;
2076 int bp_explains_trap;
2077 int maybe_internal_trap;
2078 int report_to_gdb;
219f2f23 2079 int trace_event;
bd99dc85
PA
2080
2081 /* Translate generic target options into linux options. */
2082 options = __WALL;
2083 if (target_options & TARGET_WNOHANG)
2084 options |= WNOHANG;
0d62e5e8
DJ
2085
2086retry:
fa593d66
PA
2087 bp_explains_trap = 0;
2088 trace_event = 0;
bd99dc85
PA
2089 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2090
0d62e5e8
DJ
2091 /* If we were only supposed to resume one thread, only wait for
2092 that thread - if it's still alive. If it died, however - which
2093 can happen if we're coming from the thread death case below -
2094 then we need to make sure we restart the other threads. We could
2095 pick a thread at random or restart all; restarting all is less
2096 arbitrary. */
95954743
PA
2097 if (!non_stop
2098 && !ptid_equal (cont_thread, null_ptid)
2099 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 2100 {
fc7238bb
PA
2101 struct thread_info *thread;
2102
bd99dc85
PA
2103 thread = (struct thread_info *) find_inferior_id (&all_threads,
2104 cont_thread);
0d62e5e8
DJ
2105
2106 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 2107 if (thread == NULL)
64386c31
DJ
2108 {
2109 struct thread_resume resume_info;
95954743 2110 resume_info.thread = minus_one_ptid;
bd99dc85
PA
2111 resume_info.kind = resume_continue;
2112 resume_info.sig = 0;
2bd7c093 2113 linux_resume (&resume_info, 1);
64386c31 2114 }
bd99dc85 2115 else
95954743 2116 ptid = cont_thread;
0d62e5e8 2117 }
da6d8c04 2118
6bf5e0ba
PA
2119 if (ptid_equal (step_over_bkpt, null_ptid))
2120 pid = linux_wait_for_event (ptid, &w, options);
2121 else
2122 {
2123 if (debug_threads)
2124 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2125 target_pid_to_str (step_over_bkpt));
2126 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2127 }
2128
bd99dc85 2129 if (pid == 0) /* only if TARGET_WNOHANG */
95954743 2130 return null_ptid;
bd99dc85 2131
6bf5e0ba 2132 event_child = get_thread_lwp (current_inferior);
da6d8c04 2133
0d62e5e8
DJ
2134 /* If we are waiting for a particular child, and it exited,
2135 linux_wait_for_event will return its exit status. Similarly if
2136 the last child exited. If this is not the last child, however,
2137 do not report it as exited until there is a 'thread exited' response
2138 available in the remote protocol. Instead, just wait for another event.
2139 This should be safe, because if the thread crashed we will already
2140 have reported the termination signal to GDB; that should stop any
2141 in-progress stepping operations, etc.
2142
2143 Report the exit status of the last thread to exit. This matches
2144 LinuxThreads' behavior. */
2145
95954743 2146 if (last_thread_of_process_p (current_inferior))
da6d8c04 2147 {
bd99dc85 2148 if (WIFEXITED (w) || WIFSIGNALED (w))
0d62e5e8 2149 {
bd99dc85
PA
2150 if (WIFEXITED (w))
2151 {
2152 ourstatus->kind = TARGET_WAITKIND_EXITED;
2153 ourstatus->value.integer = WEXITSTATUS (w);
2154
2155 if (debug_threads)
493e2a69
MS
2156 fprintf (stderr,
2157 "\nChild exited with retcode = %x \n",
2158 WEXITSTATUS (w));
bd99dc85
PA
2159 }
2160 else
2161 {
2162 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2163 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2164
2165 if (debug_threads)
493e2a69
MS
2166 fprintf (stderr,
2167 "\nChild terminated with signal = %x \n",
2168 WTERMSIG (w));
bd99dc85
PA
2169
2170 }
5b1c542e 2171
3e4c1235 2172 return ptid_of (event_child);
0d62e5e8 2173 }
da6d8c04 2174 }
0d62e5e8 2175 else
da6d8c04 2176 {
0d62e5e8
DJ
2177 if (!WIFSTOPPED (w))
2178 goto retry;
da6d8c04
DJ
2179 }
2180
6bf5e0ba
PA
2181 /* If this event was not handled before, and is not a SIGTRAP, we
2182 report it. SIGILL and SIGSEGV are also treated as traps in case
2183 a breakpoint is inserted at the current PC. If this target does
2184 not support internal breakpoints at all, we also report the
2185 SIGTRAP without further processing; it's of no concern to us. */
2186 maybe_internal_trap
2187 = (supports_breakpoints ()
2188 && (WSTOPSIG (w) == SIGTRAP
2189 || ((WSTOPSIG (w) == SIGILL
2190 || WSTOPSIG (w) == SIGSEGV)
2191 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2192
2193 if (maybe_internal_trap)
2194 {
2195 /* Handle anything that requires bookkeeping before deciding to
2196 report the event or continue waiting. */
2197
2198 /* First check if we can explain the SIGTRAP with an internal
2199 breakpoint, or if we should possibly report the event to GDB.
2200 Do this before anything that may remove or insert a
2201 breakpoint. */
2202 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2203
2204 /* We have a SIGTRAP, possibly a step-over dance has just
2205 finished. If so, tweak the state machine accordingly,
2206 reinsert breakpoints and delete any reinsert (software
2207 single-step) breakpoints. */
2208 step_over_finished = finish_step_over (event_child);
2209
2210 /* Now invoke the callbacks of any internal breakpoints there. */
2211 check_breakpoints (event_child->stop_pc);
2212
219f2f23
PA
2213 /* Handle tracepoint data collecting. This may overflow the
2214 trace buffer, and cause a tracing stop, removing
2215 breakpoints. */
2216 trace_event = handle_tracepoints (event_child);
2217
6bf5e0ba
PA
2218 if (bp_explains_trap)
2219 {
2220 /* If we stepped or ran into an internal breakpoint, we've
2221 already handled it. So next time we resume (from this
2222 PC), we should step over it. */
2223 if (debug_threads)
2224 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2225
8b07ae33
PA
2226 if (breakpoint_here (event_child->stop_pc))
2227 event_child->need_step_over = 1;
6bf5e0ba
PA
2228 }
2229 }
2230 else
2231 {
2232 /* We have some other signal, possibly a step-over dance was in
2233 progress, and it should be cancelled too. */
2234 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2235 }
2236
2237 /* We have all the data we need. Either report the event to GDB, or
2238 resume threads and keep waiting for more. */
2239
2240 /* If we're collecting a fast tracepoint, finish the collection and
2241 move out of the jump pad before delivering a signal. See
2242 linux_stabilize_threads. */
2243
2244 if (WIFSTOPPED (w)
2245 && WSTOPSIG (w) != SIGTRAP
2246 && supports_fast_tracepoints ()
2247 && in_process_agent_loaded ())
2248 {
2249 if (debug_threads)
2250 fprintf (stderr,
2251 "Got signal %d for LWP %ld. Check if we need "
2252 "to defer or adjust it.\n",
2253 WSTOPSIG (w), lwpid_of (event_child));
2254
2255 /* Allow debugging the jump pad itself. */
2256 if (current_inferior->last_resume_kind != resume_step
2257 && maybe_move_out_of_jump_pad (event_child, &w))
2258 {
2259 enqueue_one_deferred_signal (event_child, &w);
2260
2261 if (debug_threads)
2262 fprintf (stderr,
2263 "Signal %d for LWP %ld deferred (in jump pad)\n",
2264 WSTOPSIG (w), lwpid_of (event_child));
2265
2266 linux_resume_one_lwp (event_child, 0, 0, NULL);
2267 goto retry;
2268 }
2269 }
219f2f23 2270
fa593d66
PA
2271 if (event_child->collecting_fast_tracepoint)
2272 {
2273 if (debug_threads)
2274 fprintf (stderr, "\
2275LWP %ld was trying to move out of the jump pad (%d). \
2276Check if we're already there.\n",
2277 lwpid_of (event_child),
2278 event_child->collecting_fast_tracepoint);
2279
2280 trace_event = 1;
2281
2282 event_child->collecting_fast_tracepoint
2283 = linux_fast_tracepoint_collecting (event_child, NULL);
2284
2285 if (event_child->collecting_fast_tracepoint != 1)
2286 {
2287 /* No longer need this breakpoint. */
2288 if (event_child->exit_jump_pad_bkpt != NULL)
2289 {
2290 if (debug_threads)
2291 fprintf (stderr,
2292 "No longer need exit-jump-pad bkpt; removing it."
2293 "stopping all threads momentarily.\n");
2294
2295 /* Other running threads could hit this breakpoint.
2296 We don't handle moribund locations like GDB does,
2297 instead we always pause all threads when removing
2298 breakpoints, so that any step-over or
2299 decr_pc_after_break adjustment is always taken
2300 care of while the breakpoint is still
2301 inserted. */
2302 stop_all_lwps (1, event_child);
2303 cancel_breakpoints ();
2304
2305 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2306 event_child->exit_jump_pad_bkpt = NULL;
2307
2308 unstop_all_lwps (1, event_child);
2309
2310 gdb_assert (event_child->suspended >= 0);
2311 }
2312 }
2313
2314 if (event_child->collecting_fast_tracepoint == 0)
2315 {
2316 if (debug_threads)
2317 fprintf (stderr,
2318 "fast tracepoint finished "
2319 "collecting successfully.\n");
2320
2321 /* We may have a deferred signal to report. */
2322 if (dequeue_one_deferred_signal (event_child, &w))
2323 {
2324 if (debug_threads)
2325 fprintf (stderr, "dequeued one signal.\n");
2326 }
3c11dd79 2327 else
fa593d66 2328 {
3c11dd79
PA
2329 if (debug_threads)
2330 fprintf (stderr, "no deferred signals.\n");
fa593d66
PA
2331
2332 if (stabilizing_threads)
2333 {
2334 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2335 ourstatus->value.sig = TARGET_SIGNAL_0;
2336 return ptid_of (event_child);
2337 }
2338 }
2339 }
6bf5e0ba
PA
2340 }
2341
e471f25b
PA
2342 /* Check whether GDB would be interested in this event. */
2343
2344 /* If GDB is not interested in this signal, don't stop other
2345 threads, and don't report it to GDB. Just resume the inferior
2346 right away. We do this for threading-related signals as well as
2347 any that GDB specifically requested we ignore. But never ignore
2348 SIGSTOP if we sent it ourselves, and do not ignore signals when
2349 stepping - they may require special handling to skip the signal
2350 handler. */
2351 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2352 thread library? */
2353 if (WIFSTOPPED (w)
2354 && current_inferior->last_resume_kind != resume_step
2355 && (
1a981360 2356#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
e471f25b
PA
2357 (current_process ()->private->thread_db != NULL
2358 && (WSTOPSIG (w) == __SIGRTMIN
2359 || WSTOPSIG (w) == __SIGRTMIN + 1))
2360 ||
2361#endif
2362 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2363 && !(WSTOPSIG (w) == SIGSTOP
2364 && current_inferior->last_resume_kind == resume_stop))))
2365 {
2366 siginfo_t info, *info_p;
2367
2368 if (debug_threads)
2369 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2370 WSTOPSIG (w), lwpid_of (event_child));
2371
2372 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2373 info_p = &info;
2374 else
2375 info_p = NULL;
2376 linux_resume_one_lwp (event_child, event_child->stepping,
2377 WSTOPSIG (w), info_p);
2378 goto retry;
2379 }
2380
2381 /* If GDB wanted this thread to single step, we always want to
2382 report the SIGTRAP, and let GDB handle it. Watchpoints should
2383 always be reported. So should signals we can't explain. A
2384 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2385 not support Z0 breakpoints. If we do, we're be able to handle
2386 GDB breakpoints on top of internal breakpoints, by handling the
2387 internal breakpoint and still reporting the event to GDB. If we
2388 don't, we're out of luck, GDB won't see the breakpoint hit. */
6bf5e0ba 2389 report_to_gdb = (!maybe_internal_trap
8336d594 2390 || current_inferior->last_resume_kind == resume_step
6bf5e0ba 2391 || event_child->stopped_by_watchpoint
493e2a69
MS
2392 || (!step_over_finished
2393 && !bp_explains_trap && !trace_event)
8b07ae33 2394 || gdb_breakpoint_here (event_child->stop_pc));
6bf5e0ba
PA
2395
2396 /* We found no reason GDB would want us to stop. We either hit one
2397 of our own breakpoints, or finished an internal step GDB
2398 shouldn't know about. */
2399 if (!report_to_gdb)
2400 {
2401 if (debug_threads)
2402 {
2403 if (bp_explains_trap)
2404 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2405 if (step_over_finished)
2406 fprintf (stderr, "Step-over finished.\n");
219f2f23
PA
2407 if (trace_event)
2408 fprintf (stderr, "Tracepoint event.\n");
6bf5e0ba
PA
2409 }
2410
2411 /* We're not reporting this breakpoint to GDB, so apply the
2412 decr_pc_after_break adjustment to the inferior's regcache
2413 ourselves. */
2414
2415 if (the_low_target.set_pc != NULL)
2416 {
2417 struct regcache *regcache
2418 = get_thread_regcache (get_lwp_thread (event_child), 1);
2419 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2420 }
2421
7984d532
PA
2422 /* We may have finished stepping over a breakpoint. If so,
2423 we've stopped and suspended all LWPs momentarily except the
2424 stepping one. This is where we resume them all again. We're
2425 going to keep waiting, so use proceed, which handles stepping
2426 over the next breakpoint. */
6bf5e0ba
PA
2427 if (debug_threads)
2428 fprintf (stderr, "proceeding all threads.\n");
7984d532
PA
2429
2430 if (step_over_finished)
2431 unsuspend_all_lwps (event_child);
2432
6bf5e0ba
PA
2433 proceed_all_lwps ();
2434 goto retry;
2435 }
2436
2437 if (debug_threads)
2438 {
8336d594 2439 if (current_inferior->last_resume_kind == resume_step)
6bf5e0ba
PA
2440 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2441 if (event_child->stopped_by_watchpoint)
2442 fprintf (stderr, "Stopped by watchpoint.\n");
8b07ae33
PA
2443 if (gdb_breakpoint_here (event_child->stop_pc))
2444 fprintf (stderr, "Stopped by GDB breakpoint.\n");
6bf5e0ba
PA
2445 if (debug_threads)
2446 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2447 }
2448
2449 /* Alright, we're going to report a stop. */
2450
fa593d66 2451 if (!non_stop && !stabilizing_threads)
6bf5e0ba
PA
2452 {
2453 /* In all-stop, stop all threads. */
7984d532 2454 stop_all_lwps (0, NULL);
6bf5e0ba
PA
2455
2456 /* If we're not waiting for a specific LWP, choose an event LWP
2457 from among those that have had events. Giving equal priority
2458 to all LWPs that have had events helps prevent
2459 starvation. */
2460 if (ptid_equal (ptid, minus_one_ptid))
2461 {
2462 event_child->status_pending_p = 1;
2463 event_child->status_pending = w;
2464
2465 select_event_lwp (&event_child);
2466
2467 event_child->status_pending_p = 0;
2468 w = event_child->status_pending;
2469 }
2470
2471 /* Now that we've selected our final event LWP, cancel any
2472 breakpoints in other LWPs that have hit a GDB breakpoint.
2473 See the comment in cancel_breakpoints_callback to find out
2474 why. */
2475 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
fa593d66
PA
2476
2477 /* Stabilize threads (move out of jump pads). */
2478 stabilize_threads ();
6bf5e0ba
PA
2479 }
2480 else
2481 {
2482 /* If we just finished a step-over, then all threads had been
2483 momentarily paused. In all-stop, that's fine, we want
2484 threads stopped by now anyway. In non-stop, we need to
2485 re-resume threads that GDB wanted to be running. */
2486 if (step_over_finished)
7984d532 2487 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
2488 }
2489
5b1c542e 2490 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 2491
8336d594
PA
2492 if (current_inferior->last_resume_kind == resume_stop
2493 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
2494 {
2495 /* A thread that has been requested to stop by GDB with vCont;t,
2496 and it stopped cleanly, so report as SIG0. The use of
2497 SIGSTOP is an implementation detail. */
2498 ourstatus->value.sig = TARGET_SIGNAL_0;
2499 }
8336d594
PA
2500 else if (current_inferior->last_resume_kind == resume_stop
2501 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
2502 {
2503 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 2504 but, it stopped for other reasons. */
bd99dc85
PA
2505 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2506 }
2507 else
2508 {
2509 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2510 }
2511
d50171e4
PA
2512 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2513
bd99dc85 2514 if (debug_threads)
95954743 2515 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
6bf5e0ba 2516 target_pid_to_str (ptid_of (event_child)),
bd99dc85
PA
2517 ourstatus->kind,
2518 ourstatus->value.sig);
2519
6bf5e0ba 2520 return ptid_of (event_child);
bd99dc85
PA
2521}
2522
2523/* Get rid of any pending event in the pipe. */
2524static void
2525async_file_flush (void)
2526{
2527 int ret;
2528 char buf;
2529
2530 do
2531 ret = read (linux_event_pipe[0], &buf, 1);
2532 while (ret >= 0 || (ret == -1 && errno == EINTR));
2533}
2534
2535/* Put something in the pipe, so the event loop wakes up. */
2536static void
2537async_file_mark (void)
2538{
2539 int ret;
2540
2541 async_file_flush ();
2542
2543 do
2544 ret = write (linux_event_pipe[1], "+", 1);
2545 while (ret == 0 || (ret == -1 && errno == EINTR));
2546
2547 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2548 be awakened anyway. */
2549}
2550
95954743
PA
2551static ptid_t
2552linux_wait (ptid_t ptid,
2553 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 2554{
95954743 2555 ptid_t event_ptid;
bd99dc85
PA
2556
2557 if (debug_threads)
95954743 2558 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
bd99dc85
PA
2559
2560 /* Flush the async file first. */
2561 if (target_is_async_p ())
2562 async_file_flush ();
2563
95954743 2564 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
2565
2566 /* If at least one stop was reported, there may be more. A single
2567 SIGCHLD can signal more than one child stop. */
2568 if (target_is_async_p ()
2569 && (target_options & TARGET_WNOHANG) != 0
95954743 2570 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
2571 async_file_mark ();
2572
2573 return event_ptid;
da6d8c04
DJ
2574}
2575
c5f62d5f 2576/* Send a signal to an LWP. */
fd500816
DJ
2577
2578static int
a1928bad 2579kill_lwp (unsigned long lwpid, int signo)
fd500816 2580{
c5f62d5f
DE
2581 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2582 fails, then we are not using nptl threads and we should be using kill. */
fd500816 2583
c5f62d5f
DE
2584#ifdef __NR_tkill
2585 {
2586 static int tkill_failed;
fd500816 2587
c5f62d5f
DE
2588 if (!tkill_failed)
2589 {
2590 int ret;
2591
2592 errno = 0;
2593 ret = syscall (__NR_tkill, lwpid, signo);
2594 if (errno != ENOSYS)
2595 return ret;
2596 tkill_failed = 1;
2597 }
2598 }
fd500816
DJ
2599#endif
2600
2601 return kill (lwpid, signo);
2602}
2603
964e4306
PA
2604void
2605linux_stop_lwp (struct lwp_info *lwp)
2606{
2607 send_sigstop (lwp);
2608}
2609
0d62e5e8 2610static void
02fc4de7 2611send_sigstop (struct lwp_info *lwp)
0d62e5e8 2612{
bd99dc85 2613 int pid;
0d62e5e8 2614
bd99dc85
PA
2615 pid = lwpid_of (lwp);
2616
0d62e5e8
DJ
2617 /* If we already have a pending stop signal for this process, don't
2618 send another. */
54a0b537 2619 if (lwp->stop_expected)
0d62e5e8 2620 {
ae13219e 2621 if (debug_threads)
bd99dc85 2622 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
ae13219e 2623
0d62e5e8
DJ
2624 return;
2625 }
2626
2627 if (debug_threads)
bd99dc85 2628 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
0d62e5e8 2629
d50171e4 2630 lwp->stop_expected = 1;
bd99dc85 2631 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
2632}
2633
7984d532
PA
2634static int
2635send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7
PA
2636{
2637 struct lwp_info *lwp = (struct lwp_info *) entry;
2638
7984d532
PA
2639 /* Ignore EXCEPT. */
2640 if (lwp == except)
2641 return 0;
2642
02fc4de7 2643 if (lwp->stopped)
7984d532 2644 return 0;
02fc4de7
PA
2645
2646 send_sigstop (lwp);
7984d532
PA
2647 return 0;
2648}
2649
2650/* Increment the suspend count of an LWP, and stop it, if not stopped
2651 yet. */
2652static int
2653suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2654 void *except)
2655{
2656 struct lwp_info *lwp = (struct lwp_info *) entry;
2657
2658 /* Ignore EXCEPT. */
2659 if (lwp == except)
2660 return 0;
2661
2662 lwp->suspended++;
2663
2664 return send_sigstop_callback (entry, except);
02fc4de7
PA
2665}
2666
95954743
PA
2667static void
2668mark_lwp_dead (struct lwp_info *lwp, int wstat)
2669{
2670 /* It's dead, really. */
2671 lwp->dead = 1;
2672
2673 /* Store the exit status for later. */
2674 lwp->status_pending_p = 1;
2675 lwp->status_pending = wstat;
2676
95954743
PA
2677 /* Prevent trying to stop it. */
2678 lwp->stopped = 1;
2679
2680 /* No further stops are expected from a dead lwp. */
2681 lwp->stop_expected = 0;
2682}
2683
0d62e5e8
DJ
2684static void
2685wait_for_sigstop (struct inferior_list_entry *entry)
2686{
54a0b537 2687 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2688 struct thread_info *saved_inferior;
a1928bad 2689 int wstat;
95954743
PA
2690 ptid_t saved_tid;
2691 ptid_t ptid;
d50171e4 2692 int pid;
0d62e5e8 2693
54a0b537 2694 if (lwp->stopped)
d50171e4
PA
2695 {
2696 if (debug_threads)
2697 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2698 lwpid_of (lwp));
2699 return;
2700 }
0d62e5e8
DJ
2701
2702 saved_inferior = current_inferior;
bd99dc85
PA
2703 if (saved_inferior != NULL)
2704 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2705 else
95954743 2706 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 2707
95954743 2708 ptid = lwp->head.id;
bd99dc85 2709
d50171e4
PA
2710 if (debug_threads)
2711 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2712
2713 pid = linux_wait_for_event (ptid, &wstat, __WALL);
0d62e5e8
DJ
2714
2715 /* If we stopped with a non-SIGSTOP signal, save it for later
2716 and record the pending SIGSTOP. If the process exited, just
2717 return. */
d50171e4 2718 if (WIFSTOPPED (wstat))
0d62e5e8
DJ
2719 {
2720 if (debug_threads)
d50171e4
PA
2721 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2722 lwpid_of (lwp), WSTOPSIG (wstat));
c35fafde 2723
d50171e4 2724 if (WSTOPSIG (wstat) != SIGSTOP)
c35fafde
PA
2725 {
2726 if (debug_threads)
d50171e4
PA
2727 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2728 lwpid_of (lwp), wstat);
2729
c35fafde
PA
2730 lwp->status_pending_p = 1;
2731 lwp->status_pending = wstat;
2732 }
0d62e5e8 2733 }
d50171e4 2734 else
95954743
PA
2735 {
2736 if (debug_threads)
d50171e4 2737 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
95954743 2738
d50171e4
PA
2739 lwp = find_lwp_pid (pid_to_ptid (pid));
2740 if (lwp)
2741 {
2742 /* Leave this status pending for the next time we're able to
2743 report it. In the mean time, we'll report this lwp as
2744 dead to GDB, so GDB doesn't try to read registers and
2745 memory from it. This can only happen if this was the
2746 last thread of the process; otherwise, PID is removed
2747 from the thread tables before linux_wait_for_event
2748 returns. */
2749 mark_lwp_dead (lwp, wstat);
2750 }
95954743 2751 }
0d62e5e8 2752
bd99dc85 2753 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
2754 current_inferior = saved_inferior;
2755 else
2756 {
2757 if (debug_threads)
2758 fprintf (stderr, "Previously current thread died.\n");
2759
bd99dc85
PA
2760 if (non_stop)
2761 {
2762 /* We can't change the current inferior behind GDB's back,
2763 otherwise, a subsequent command may apply to the wrong
2764 process. */
2765 current_inferior = NULL;
2766 }
2767 else
2768 {
2769 /* Set a valid thread as current. */
2770 set_desired_inferior (0);
2771 }
0d62e5e8
DJ
2772 }
2773}
2774
fa593d66
PA
2775/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2776 move it out, because we need to report the stop event to GDB. For
2777 example, if the user puts a breakpoint in the jump pad, it's
2778 because she wants to debug it. */
2779
2780static int
2781stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2782{
2783 struct lwp_info *lwp = (struct lwp_info *) entry;
2784 struct thread_info *thread = get_lwp_thread (lwp);
2785
2786 gdb_assert (lwp->suspended == 0);
2787 gdb_assert (lwp->stopped);
2788
2789 /* Allow debugging the jump pad, gdb_collect, etc.. */
2790 return (supports_fast_tracepoints ()
2791 && in_process_agent_loaded ()
2792 && (gdb_breakpoint_here (lwp->stop_pc)
2793 || lwp->stopped_by_watchpoint
2794 || thread->last_resume_kind == resume_step)
2795 && linux_fast_tracepoint_collecting (lwp, NULL));
2796}
2797
2798static void
2799move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2800{
2801 struct lwp_info *lwp = (struct lwp_info *) entry;
2802 struct thread_info *thread = get_lwp_thread (lwp);
2803 int *wstat;
2804
2805 gdb_assert (lwp->suspended == 0);
2806 gdb_assert (lwp->stopped);
2807
2808 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2809
2810 /* Allow debugging the jump pad, gdb_collect, etc. */
2811 if (!gdb_breakpoint_here (lwp->stop_pc)
2812 && !lwp->stopped_by_watchpoint
2813 && thread->last_resume_kind != resume_step
2814 && maybe_move_out_of_jump_pad (lwp, wstat))
2815 {
2816 if (debug_threads)
2817 fprintf (stderr,
2818 "LWP %ld needs stabilizing (in jump pad)\n",
2819 lwpid_of (lwp));
2820
2821 if (wstat)
2822 {
2823 lwp->status_pending_p = 0;
2824 enqueue_one_deferred_signal (lwp, wstat);
2825
2826 if (debug_threads)
2827 fprintf (stderr,
2828 "Signal %d for LWP %ld deferred "
2829 "(in jump pad)\n",
2830 WSTOPSIG (*wstat), lwpid_of (lwp));
2831 }
2832
2833 linux_resume_one_lwp (lwp, 0, 0, NULL);
2834 }
2835 else
2836 lwp->suspended++;
2837}
2838
2839static int
2840lwp_running (struct inferior_list_entry *entry, void *data)
2841{
2842 struct lwp_info *lwp = (struct lwp_info *) entry;
2843
2844 if (lwp->dead)
2845 return 0;
2846 if (lwp->stopped)
2847 return 0;
2848 return 1;
2849}
2850
7984d532
PA
2851/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2852 If SUSPEND, then also increase the suspend count of every LWP,
2853 except EXCEPT. */
2854
0d62e5e8 2855static void
7984d532 2856stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8
DJ
2857{
2858 stopping_threads = 1;
7984d532
PA
2859
2860 if (suspend)
2861 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2862 else
2863 find_inferior (&all_lwps, send_sigstop_callback, except);
54a0b537 2864 for_each_inferior (&all_lwps, wait_for_sigstop);
0d62e5e8
DJ
2865 stopping_threads = 0;
2866}
2867
da6d8c04
DJ
2868/* Resume execution of the inferior process.
2869 If STEP is nonzero, single-step it.
2870 If SIGNAL is nonzero, give it that signal. */
2871
ce3a066d 2872static void
2acc282a 2873linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 2874 int step, int signal, siginfo_t *info)
da6d8c04 2875{
0d62e5e8 2876 struct thread_info *saved_inferior;
fa593d66 2877 int fast_tp_collecting;
0d62e5e8 2878
54a0b537 2879 if (lwp->stopped == 0)
0d62e5e8
DJ
2880 return;
2881
fa593d66
PA
2882 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2883
2884 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2885
219f2f23
PA
2886 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2887 user used the "jump" command, or "set $pc = foo"). */
2888 if (lwp->stop_pc != get_pc (lwp))
2889 {
2890 /* Collecting 'while-stepping' actions doesn't make sense
2891 anymore. */
2892 release_while_stepping_state_list (get_lwp_thread (lwp));
2893 }
2894
0d62e5e8
DJ
2895 /* If we have pending signals or status, and a new signal, enqueue the
2896 signal. Also enqueue the signal if we are waiting to reinsert a
2897 breakpoint; it will be picked up again below. */
2898 if (signal != 0
fa593d66
PA
2899 && (lwp->status_pending_p
2900 || lwp->pending_signals != NULL
2901 || lwp->bp_reinsert != 0
2902 || fast_tp_collecting))
0d62e5e8
DJ
2903 {
2904 struct pending_signals *p_sig;
bca929d3 2905 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 2906 p_sig->prev = lwp->pending_signals;
0d62e5e8 2907 p_sig->signal = signal;
32ca6d61
DJ
2908 if (info == NULL)
2909 memset (&p_sig->info, 0, sizeof (siginfo_t));
2910 else
2911 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 2912 lwp->pending_signals = p_sig;
0d62e5e8
DJ
2913 }
2914
d50171e4
PA
2915 if (lwp->status_pending_p)
2916 {
2917 if (debug_threads)
2918 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2919 " has pending status\n",
2920 lwpid_of (lwp), step ? "step" : "continue", signal,
2921 lwp->stop_expected ? "expected" : "not expected");
2922 return;
2923 }
0d62e5e8
DJ
2924
2925 saved_inferior = current_inferior;
54a0b537 2926 current_inferior = get_lwp_thread (lwp);
0d62e5e8
DJ
2927
2928 if (debug_threads)
1b3f6016 2929 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
bd99dc85 2930 lwpid_of (lwp), step ? "step" : "continue", signal,
54a0b537 2931 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
2932
2933 /* This bit needs some thinking about. If we get a signal that
2934 we must report while a single-step reinsert is still pending,
2935 we often end up resuming the thread. It might be better to
2936 (ew) allow a stack of pending events; then we could be sure that
2937 the reinsert happened right away and not lose any signals.
2938
2939 Making this stack would also shrink the window in which breakpoints are
54a0b537 2940 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
2941 complete correctness, so it won't solve that problem. It may be
2942 worthwhile just to solve this one, however. */
54a0b537 2943 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
2944 {
2945 if (debug_threads)
d50171e4
PA
2946 fprintf (stderr, " pending reinsert at 0x%s\n",
2947 paddress (lwp->bp_reinsert));
2948
2949 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2950 {
fa593d66
PA
2951 if (fast_tp_collecting == 0)
2952 {
2953 if (step == 0)
2954 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2955 if (lwp->suspended)
2956 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2957 lwp->suspended);
2958 }
d50171e4
PA
2959
2960 step = 1;
2961 }
0d62e5e8
DJ
2962
2963 /* Postpone any pending signal. It was enqueued above. */
2964 signal = 0;
2965 }
2966
fa593d66
PA
2967 if (fast_tp_collecting == 1)
2968 {
2969 if (debug_threads)
2970 fprintf (stderr, "\
2971lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2972 lwpid_of (lwp));
2973
2974 /* Postpone any pending signal. It was enqueued above. */
2975 signal = 0;
2976 }
2977 else if (fast_tp_collecting == 2)
2978 {
2979 if (debug_threads)
2980 fprintf (stderr, "\
2981lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2982 lwpid_of (lwp));
2983
2984 if (can_hardware_single_step ())
2985 step = 1;
2986 else
2987 fatal ("moving out of jump pad single-stepping"
2988 " not implemented on this target");
2989
2990 /* Postpone any pending signal. It was enqueued above. */
2991 signal = 0;
2992 }
2993
219f2f23
PA
2994 /* If we have while-stepping actions in this thread set it stepping.
2995 If we have a signal to deliver, it may or may not be set to
2996 SIG_IGN, we don't know. Assume so, and allow collecting
2997 while-stepping into a signal handler. A possible smart thing to
2998 do would be to set an internal breakpoint at the signal return
2999 address, continue, and carry on catching this while-stepping
3000 action only when that breakpoint is hit. A future
3001 enhancement. */
3002 if (get_lwp_thread (lwp)->while_stepping != NULL
3003 && can_hardware_single_step ())
3004 {
3005 if (debug_threads)
3006 fprintf (stderr,
3007 "lwp %ld has a while-stepping action -> forcing step.\n",
3008 lwpid_of (lwp));
3009 step = 1;
3010 }
3011
aa691b87 3012 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 3013 {
442ea881
PA
3014 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3015 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
47c0c975 3016 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
3017 }
3018
fa593d66
PA
3019 /* If we have pending signals, consume one unless we are trying to
3020 reinsert a breakpoint or we're trying to finish a fast tracepoint
3021 collect. */
3022 if (lwp->pending_signals != NULL
3023 && lwp->bp_reinsert == 0
3024 && fast_tp_collecting == 0)
0d62e5e8
DJ
3025 {
3026 struct pending_signals **p_sig;
3027
54a0b537 3028 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3029 while ((*p_sig)->prev != NULL)
3030 p_sig = &(*p_sig)->prev;
3031
3032 signal = (*p_sig)->signal;
32ca6d61 3033 if ((*p_sig)->info.si_signo != 0)
bd99dc85 3034 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
32ca6d61 3035
0d62e5e8
DJ
3036 free (*p_sig);
3037 *p_sig = NULL;
3038 }
3039
aa5ca48f
DE
3040 if (the_low_target.prepare_to_resume != NULL)
3041 the_low_target.prepare_to_resume (lwp);
3042
0d62e5e8 3043 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 3044 get_lwp_thread (lwp));
da6d8c04 3045 errno = 0;
54a0b537 3046 lwp->stopped = 0;
c3adc08c 3047 lwp->stopped_by_watchpoint = 0;
54a0b537 3048 lwp->stepping = step;
14ce3065
DE
3049 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3050 /* Coerce to a uintptr_t first to avoid potential gcc warning
3051 of coercing an 8 byte integer to a 4 byte pointer. */
3052 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
0d62e5e8
DJ
3053
3054 current_inferior = saved_inferior;
da6d8c04 3055 if (errno)
3221518c
UW
3056 {
3057 /* ESRCH from ptrace either means that the thread was already
3058 running (an error) or that it is gone (a race condition). If
3059 it's gone, we will get a notification the next time we wait,
3060 so we can ignore the error. We could differentiate these
3061 two, but it's tricky without waiting; the thread still exists
3062 as a zombie, so sending it signal 0 would succeed. So just
3063 ignore ESRCH. */
3064 if (errno == ESRCH)
3065 return;
3066
3067 perror_with_name ("ptrace");
3068 }
da6d8c04
DJ
3069}
3070
2bd7c093
PA
3071struct thread_resume_array
3072{
3073 struct thread_resume *resume;
3074 size_t n;
3075};
64386c31
DJ
3076
3077/* This function is called once per thread. We look up the thread
5544ad89
DJ
3078 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3079 resume request.
3080
3081 This algorithm is O(threads * resume elements), but resume elements
3082 is small (and will remain small at least until GDB supports thread
3083 suspension). */
2bd7c093
PA
3084static int
3085linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3086{
54a0b537 3087 struct lwp_info *lwp;
64386c31 3088 struct thread_info *thread;
5544ad89 3089 int ndx;
2bd7c093 3090 struct thread_resume_array *r;
64386c31
DJ
3091
3092 thread = (struct thread_info *) entry;
54a0b537 3093 lwp = get_thread_lwp (thread);
2bd7c093 3094 r = arg;
64386c31 3095
2bd7c093 3096 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3097 {
3098 ptid_t ptid = r->resume[ndx].thread;
3099 if (ptid_equal (ptid, minus_one_ptid)
3100 || ptid_equal (ptid, entry->id)
3101 || (ptid_is_pid (ptid)
3102 && (ptid_get_pid (ptid) == pid_of (lwp)))
3103 || (ptid_get_lwp (ptid) == -1
3104 && (ptid_get_pid (ptid) == pid_of (lwp))))
3105 {
d50171e4 3106 if (r->resume[ndx].kind == resume_stop
8336d594 3107 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3108 {
3109 if (debug_threads)
3110 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3111 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3112 ? "stopped"
3113 : "stopping",
3114 lwpid_of (lwp));
3115
3116 continue;
3117 }
3118
95954743 3119 lwp->resume = &r->resume[ndx];
8336d594 3120 thread->last_resume_kind = lwp->resume->kind;
fa593d66
PA
3121
3122 /* If we had a deferred signal to report, dequeue one now.
3123 This can happen if LWP gets more than one signal while
3124 trying to get out of a jump pad. */
3125 if (lwp->stopped
3126 && !lwp->status_pending_p
3127 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3128 {
3129 lwp->status_pending_p = 1;
3130
3131 if (debug_threads)
3132 fprintf (stderr,
3133 "Dequeueing deferred signal %d for LWP %ld, "
3134 "leaving status pending.\n",
3135 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3136 }
3137
95954743
PA
3138 return 0;
3139 }
3140 }
2bd7c093
PA
3141
3142 /* No resume action for this thread. */
3143 lwp->resume = NULL;
64386c31 3144
2bd7c093 3145 return 0;
5544ad89
DJ
3146}
3147
5544ad89 3148
bd99dc85
PA
3149/* Set *FLAG_P if this lwp has an interesting status pending. */
3150static int
3151resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3152{
bd99dc85 3153 struct lwp_info *lwp = (struct lwp_info *) entry;
5544ad89 3154
bd99dc85
PA
3155 /* LWPs which will not be resumed are not interesting, because
3156 we might not wait for them next time through linux_wait. */
2bd7c093 3157 if (lwp->resume == NULL)
bd99dc85 3158 return 0;
64386c31 3159
bd99dc85 3160 if (lwp->status_pending_p)
d50171e4
PA
3161 * (int *) flag_p = 1;
3162
3163 return 0;
3164}
3165
3166/* Return 1 if this lwp that GDB wants running is stopped at an
3167 internal breakpoint that we need to step over. It assumes that any
3168 required STOP_PC adjustment has already been propagated to the
3169 inferior's regcache. */
3170
3171static int
3172need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3173{
3174 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3175 struct thread_info *thread;
d50171e4
PA
3176 struct thread_info *saved_inferior;
3177 CORE_ADDR pc;
3178
3179 /* LWPs which will not be resumed are not interesting, because we
3180 might not wait for them next time through linux_wait. */
3181
3182 if (!lwp->stopped)
3183 {
3184 if (debug_threads)
3185 fprintf (stderr,
3186 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3187 lwpid_of (lwp));
3188 return 0;
3189 }
3190
8336d594
PA
3191 thread = get_lwp_thread (lwp);
3192
3193 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
3194 {
3195 if (debug_threads)
3196 fprintf (stderr,
3197 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3198 lwpid_of (lwp));
3199 return 0;
3200 }
3201
7984d532
PA
3202 gdb_assert (lwp->suspended >= 0);
3203
3204 if (lwp->suspended)
3205 {
3206 if (debug_threads)
3207 fprintf (stderr,
3208 "Need step over [LWP %ld]? Ignoring, suspended\n",
3209 lwpid_of (lwp));
3210 return 0;
3211 }
3212
d50171e4
PA
3213 if (!lwp->need_step_over)
3214 {
3215 if (debug_threads)
3216 fprintf (stderr,
3217 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3218 }
5544ad89 3219
bd99dc85 3220 if (lwp->status_pending_p)
d50171e4
PA
3221 {
3222 if (debug_threads)
3223 fprintf (stderr,
3224 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3225 lwpid_of (lwp));
3226 return 0;
3227 }
3228
3229 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3230 or we have. */
3231 pc = get_pc (lwp);
3232
3233 /* If the PC has changed since we stopped, then don't do anything,
3234 and let the breakpoint/tracepoint be hit. This happens if, for
3235 instance, GDB handled the decr_pc_after_break subtraction itself,
3236 GDB is OOL stepping this thread, or the user has issued a "jump"
3237 command, or poked thread's registers herself. */
3238 if (pc != lwp->stop_pc)
3239 {
3240 if (debug_threads)
3241 fprintf (stderr,
3242 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3243 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3244 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3245
3246 lwp->need_step_over = 0;
3247 return 0;
3248 }
3249
3250 saved_inferior = current_inferior;
8336d594 3251 current_inferior = thread;
d50171e4 3252
8b07ae33 3253 /* We can only step over breakpoints we know about. */
fa593d66 3254 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 3255 {
8b07ae33
PA
3256 /* Don't step over a breakpoint that GDB expects to hit
3257 though. */
3258 if (gdb_breakpoint_here (pc))
3259 {
3260 if (debug_threads)
3261 fprintf (stderr,
3262 "Need step over [LWP %ld]? yes, but found"
3263 " GDB breakpoint at 0x%s; skipping step over\n",
3264 lwpid_of (lwp), paddress (pc));
d50171e4 3265
8b07ae33
PA
3266 current_inferior = saved_inferior;
3267 return 0;
3268 }
3269 else
3270 {
3271 if (debug_threads)
3272 fprintf (stderr,
493e2a69
MS
3273 "Need step over [LWP %ld]? yes, "
3274 "found breakpoint at 0x%s\n",
8b07ae33 3275 lwpid_of (lwp), paddress (pc));
d50171e4 3276
8b07ae33
PA
3277 /* We've found an lwp that needs stepping over --- return 1 so
3278 that find_inferior stops looking. */
3279 current_inferior = saved_inferior;
3280
3281 /* If the step over is cancelled, this is set again. */
3282 lwp->need_step_over = 0;
3283 return 1;
3284 }
d50171e4
PA
3285 }
3286
3287 current_inferior = saved_inferior;
3288
3289 if (debug_threads)
3290 fprintf (stderr,
3291 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3292 lwpid_of (lwp), paddress (pc));
c6ecbae5 3293
bd99dc85 3294 return 0;
5544ad89
DJ
3295}
3296
d50171e4
PA
3297/* Start a step-over operation on LWP. When LWP stopped at a
3298 breakpoint, to make progress, we need to remove the breakpoint out
3299 of the way. If we let other threads run while we do that, they may
3300 pass by the breakpoint location and miss hitting it. To avoid
3301 that, a step-over momentarily stops all threads while LWP is
3302 single-stepped while the breakpoint is temporarily uninserted from
3303 the inferior. When the single-step finishes, we reinsert the
3304 breakpoint, and let all threads that are supposed to be running,
3305 run again.
3306
3307 On targets that don't support hardware single-step, we don't
3308 currently support full software single-stepping. Instead, we only
3309 support stepping over the thread event breakpoint, by asking the
3310 low target where to place a reinsert breakpoint. Since this
3311 routine assumes the breakpoint being stepped over is a thread event
3312 breakpoint, it usually assumes the return address of the current
3313 function is a good enough place to set the reinsert breakpoint. */
3314
3315static int
3316start_step_over (struct lwp_info *lwp)
3317{
3318 struct thread_info *saved_inferior;
3319 CORE_ADDR pc;
3320 int step;
3321
3322 if (debug_threads)
3323 fprintf (stderr,
3324 "Starting step-over on LWP %ld. Stopping all threads\n",
3325 lwpid_of (lwp));
3326
7984d532
PA
3327 stop_all_lwps (1, lwp);
3328 gdb_assert (lwp->suspended == 0);
d50171e4
PA
3329
3330 if (debug_threads)
3331 fprintf (stderr, "Done stopping all threads for step-over.\n");
3332
3333 /* Note, we should always reach here with an already adjusted PC,
3334 either by GDB (if we're resuming due to GDB's request), or by our
3335 caller, if we just finished handling an internal breakpoint GDB
3336 shouldn't care about. */
3337 pc = get_pc (lwp);
3338
3339 saved_inferior = current_inferior;
3340 current_inferior = get_lwp_thread (lwp);
3341
3342 lwp->bp_reinsert = pc;
3343 uninsert_breakpoints_at (pc);
fa593d66 3344 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
3345
3346 if (can_hardware_single_step ())
3347 {
3348 step = 1;
3349 }
3350 else
3351 {
3352 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3353 set_reinsert_breakpoint (raddr);
3354 step = 0;
3355 }
3356
3357 current_inferior = saved_inferior;
3358
3359 linux_resume_one_lwp (lwp, step, 0, NULL);
3360
3361 /* Require next event from this LWP. */
3362 step_over_bkpt = lwp->head.id;
3363 return 1;
3364}
3365
3366/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3367 start_step_over, if still there, and delete any reinsert
3368 breakpoints we've set, on non hardware single-step targets. */
3369
3370static int
3371finish_step_over (struct lwp_info *lwp)
3372{
3373 if (lwp->bp_reinsert != 0)
3374 {
3375 if (debug_threads)
3376 fprintf (stderr, "Finished step over.\n");
3377
3378 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3379 may be no breakpoint to reinsert there by now. */
3380 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 3381 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
3382
3383 lwp->bp_reinsert = 0;
3384
3385 /* Delete any software-single-step reinsert breakpoints. No
3386 longer needed. We don't have to worry about other threads
3387 hitting this trap, and later not being able to explain it,
3388 because we were stepping over a breakpoint, and we hold all
3389 threads but LWP stopped while doing that. */
3390 if (!can_hardware_single_step ())
3391 delete_reinsert_breakpoints ();
3392
3393 step_over_bkpt = null_ptid;
3394 return 1;
3395 }
3396 else
3397 return 0;
3398}
3399
5544ad89
DJ
3400/* This function is called once per thread. We check the thread's resume
3401 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 3402 stopped; and what signal, if any, it should be sent.
5544ad89 3403
bd99dc85
PA
3404 For threads which we aren't explicitly told otherwise, we preserve
3405 the stepping flag; this is used for stepping over gdbserver-placed
3406 breakpoints.
3407
3408 If pending_flags was set in any thread, we queue any needed
3409 signals, since we won't actually resume. We already have a pending
3410 event to report, so we don't need to preserve any step requests;
3411 they should be re-issued if necessary. */
3412
3413static int
3414linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 3415{
54a0b537 3416 struct lwp_info *lwp;
5544ad89 3417 struct thread_info *thread;
bd99dc85 3418 int step;
d50171e4
PA
3419 int leave_all_stopped = * (int *) arg;
3420 int leave_pending;
5544ad89
DJ
3421
3422 thread = (struct thread_info *) entry;
54a0b537 3423 lwp = get_thread_lwp (thread);
5544ad89 3424
2bd7c093 3425 if (lwp->resume == NULL)
bd99dc85 3426 return 0;
5544ad89 3427
bd99dc85 3428 if (lwp->resume->kind == resume_stop)
5544ad89 3429 {
bd99dc85 3430 if (debug_threads)
d50171e4 3431 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
bd99dc85
PA
3432
3433 if (!lwp->stopped)
3434 {
3435 if (debug_threads)
d50171e4 3436 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
bd99dc85 3437
d50171e4
PA
3438 /* Stop the thread, and wait for the event asynchronously,
3439 through the event loop. */
02fc4de7 3440 send_sigstop (lwp);
bd99dc85
PA
3441 }
3442 else
3443 {
3444 if (debug_threads)
d50171e4
PA
3445 fprintf (stderr, "already stopped LWP %ld\n",
3446 lwpid_of (lwp));
3447
3448 /* The LWP may have been stopped in an internal event that
3449 was not meant to be notified back to GDB (e.g., gdbserver
3450 breakpoint), so we should be reporting a stop event in
3451 this case too. */
3452
3453 /* If the thread already has a pending SIGSTOP, this is a
3454 no-op. Otherwise, something later will presumably resume
3455 the thread and this will cause it to cancel any pending
3456 operation, due to last_resume_kind == resume_stop. If
3457 the thread already has a pending status to report, we
3458 will still report it the next time we wait - see
3459 status_pending_p_callback. */
1a981360
PA
3460
3461 /* If we already have a pending signal to report, then
3462 there's no need to queue a SIGSTOP, as this means we're
3463 midway through moving the LWP out of the jumppad, and we
3464 will report the pending signal as soon as that is
3465 finished. */
3466 if (lwp->pending_signals_to_report == NULL)
3467 send_sigstop (lwp);
bd99dc85 3468 }
32ca6d61 3469
bd99dc85
PA
3470 /* For stop requests, we're done. */
3471 lwp->resume = NULL;
fc7238bb 3472 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3473 return 0;
5544ad89
DJ
3474 }
3475
bd99dc85
PA
3476 /* If this thread which is about to be resumed has a pending status,
3477 then don't resume any threads - we can just report the pending
3478 status. Make sure to queue any signals that would otherwise be
3479 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
3480 thread has a pending status. If there's a thread that needs the
3481 step-over-breakpoint dance, then don't resume any other thread
3482 but that particular one. */
3483 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 3484
d50171e4 3485 if (!leave_pending)
bd99dc85
PA
3486 {
3487 if (debug_threads)
3488 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
5544ad89 3489
d50171e4 3490 step = (lwp->resume->kind == resume_step);
2acc282a 3491 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
3492 }
3493 else
3494 {
3495 if (debug_threads)
3496 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
5544ad89 3497
bd99dc85
PA
3498 /* If we have a new signal, enqueue the signal. */
3499 if (lwp->resume->sig != 0)
3500 {
3501 struct pending_signals *p_sig;
3502 p_sig = xmalloc (sizeof (*p_sig));
3503 p_sig->prev = lwp->pending_signals;
3504 p_sig->signal = lwp->resume->sig;
3505 memset (&p_sig->info, 0, sizeof (siginfo_t));
3506
3507 /* If this is the same signal we were previously stopped by,
3508 make sure to queue its siginfo. We can ignore the return
3509 value of ptrace; if it fails, we'll skip
3510 PTRACE_SETSIGINFO. */
3511 if (WIFSTOPPED (lwp->last_status)
3512 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3513 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3514
3515 lwp->pending_signals = p_sig;
3516 }
3517 }
5544ad89 3518
fc7238bb 3519 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3520 lwp->resume = NULL;
5544ad89 3521 return 0;
0d62e5e8
DJ
3522}
3523
3524static void
2bd7c093 3525linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 3526{
2bd7c093 3527 struct thread_resume_array array = { resume_info, n };
d50171e4
PA
3528 struct lwp_info *need_step_over = NULL;
3529 int any_pending;
3530 int leave_all_stopped;
c6ecbae5 3531
2bd7c093 3532 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 3533
d50171e4
PA
3534 /* If there is a thread which would otherwise be resumed, which has
3535 a pending status, then don't resume any threads - we can just
3536 report the pending status. Make sure to queue any signals that
3537 would otherwise be sent. In non-stop mode, we'll apply this
3538 logic to each thread individually. We consume all pending events
3539 before considering to start a step-over (in all-stop). */
3540 any_pending = 0;
bd99dc85 3541 if (!non_stop)
d50171e4
PA
3542 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3543
3544 /* If there is a thread which would otherwise be resumed, which is
3545 stopped at a breakpoint that needs stepping over, then don't
3546 resume any threads - have it step over the breakpoint with all
3547 other threads stopped, then resume all threads again. Make sure
3548 to queue any signals that would otherwise be delivered or
3549 queued. */
3550 if (!any_pending && supports_breakpoints ())
3551 need_step_over
3552 = (struct lwp_info *) find_inferior (&all_lwps,
3553 need_step_over_p, NULL);
3554
3555 leave_all_stopped = (need_step_over != NULL || any_pending);
3556
3557 if (debug_threads)
3558 {
3559 if (need_step_over != NULL)
3560 fprintf (stderr, "Not resuming all, need step over\n");
3561 else if (any_pending)
3562 fprintf (stderr,
3563 "Not resuming, all-stop and found "
3564 "an LWP with pending status\n");
3565 else
3566 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3567 }
3568
3569 /* Even if we're leaving threads stopped, queue all signals we'd
3570 otherwise deliver. */
3571 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3572
3573 if (need_step_over)
3574 start_step_over (need_step_over);
3575}
3576
3577/* This function is called once per thread. We check the thread's
3578 last resume request, which will tell us whether to resume, step, or
3579 leave the thread stopped. Any signal the client requested to be
3580 delivered has already been enqueued at this point.
3581
3582 If any thread that GDB wants running is stopped at an internal
3583 breakpoint that needs stepping over, we start a step-over operation
3584 on that particular thread, and leave all others stopped. */
3585
7984d532
PA
3586static int
3587proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 3588{
7984d532 3589 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3590 struct thread_info *thread;
d50171e4
PA
3591 int step;
3592
7984d532
PA
3593 if (lwp == except)
3594 return 0;
d50171e4
PA
3595
3596 if (debug_threads)
3597 fprintf (stderr,
3598 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3599
3600 if (!lwp->stopped)
3601 {
3602 if (debug_threads)
3603 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
7984d532 3604 return 0;
d50171e4
PA
3605 }
3606
8336d594
PA
3607 thread = get_lwp_thread (lwp);
3608
02fc4de7
PA
3609 if (thread->last_resume_kind == resume_stop
3610 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
3611 {
3612 if (debug_threads)
02fc4de7
PA
3613 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3614 lwpid_of (lwp));
7984d532 3615 return 0;
d50171e4
PA
3616 }
3617
3618 if (lwp->status_pending_p)
3619 {
3620 if (debug_threads)
3621 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3622 lwpid_of (lwp));
7984d532 3623 return 0;
d50171e4
PA
3624 }
3625
7984d532
PA
3626 gdb_assert (lwp->suspended >= 0);
3627
d50171e4
PA
3628 if (lwp->suspended)
3629 {
3630 if (debug_threads)
3631 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
7984d532 3632 return 0;
d50171e4
PA
3633 }
3634
1a981360
PA
3635 if (thread->last_resume_kind == resume_stop
3636 && lwp->pending_signals_to_report == NULL
3637 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
3638 {
3639 /* We haven't reported this LWP as stopped yet (otherwise, the
3640 last_status.kind check above would catch it, and we wouldn't
3641 reach here. This LWP may have been momentarily paused by a
3642 stop_all_lwps call while handling for example, another LWP's
3643 step-over. In that case, the pending expected SIGSTOP signal
3644 that was queued at vCont;t handling time will have already
3645 been consumed by wait_for_sigstop, and so we need to requeue
3646 another one here. Note that if the LWP already has a SIGSTOP
3647 pending, this is a no-op. */
3648
3649 if (debug_threads)
3650 fprintf (stderr,
3651 "Client wants LWP %ld to stop. "
3652 "Making sure it has a SIGSTOP pending\n",
3653 lwpid_of (lwp));
3654
3655 send_sigstop (lwp);
3656 }
3657
8336d594 3658 step = thread->last_resume_kind == resume_step;
d50171e4 3659 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
3660 return 0;
3661}
3662
3663static int
3664unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3665{
3666 struct lwp_info *lwp = (struct lwp_info *) entry;
3667
3668 if (lwp == except)
3669 return 0;
3670
3671 lwp->suspended--;
3672 gdb_assert (lwp->suspended >= 0);
3673
3674 return proceed_one_lwp (entry, except);
d50171e4
PA
3675}
3676
3677/* When we finish a step-over, set threads running again. If there's
3678 another thread that may need a step-over, now's the time to start
3679 it. Eventually, we'll move all threads past their breakpoints. */
3680
3681static void
3682proceed_all_lwps (void)
3683{
3684 struct lwp_info *need_step_over;
3685
3686 /* If there is a thread which would otherwise be resumed, which is
3687 stopped at a breakpoint that needs stepping over, then don't
3688 resume any threads - have it step over the breakpoint with all
3689 other threads stopped, then resume all threads again. */
3690
3691 if (supports_breakpoints ())
3692 {
3693 need_step_over
3694 = (struct lwp_info *) find_inferior (&all_lwps,
3695 need_step_over_p, NULL);
3696
3697 if (need_step_over != NULL)
3698 {
3699 if (debug_threads)
3700 fprintf (stderr, "proceed_all_lwps: found "
3701 "thread %ld needing a step-over\n",
3702 lwpid_of (need_step_over));
3703
3704 start_step_over (need_step_over);
3705 return;
3706 }
3707 }
5544ad89 3708
d50171e4
PA
3709 if (debug_threads)
3710 fprintf (stderr, "Proceeding, no step-over needed\n");
3711
7984d532 3712 find_inferior (&all_lwps, proceed_one_lwp, NULL);
d50171e4
PA
3713}
3714
3715/* Stopped LWPs that the client wanted to be running, that don't have
3716 pending statuses, are set to run again, except for EXCEPT, if not
3717 NULL. This undoes a stop_all_lwps call. */
3718
3719static void
7984d532 3720unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 3721{
5544ad89
DJ
3722 if (debug_threads)
3723 {
d50171e4
PA
3724 if (except)
3725 fprintf (stderr,
3726 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
5544ad89 3727 else
d50171e4
PA
3728 fprintf (stderr,
3729 "unstopping all lwps\n");
5544ad89
DJ
3730 }
3731
7984d532
PA
3732 if (unsuspend)
3733 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3734 else
3735 find_inferior (&all_lwps, proceed_one_lwp, except);
0d62e5e8
DJ
3736}
3737
3738#ifdef HAVE_LINUX_USRREGS
da6d8c04
DJ
3739
3740int
0a30fbc4 3741register_addr (int regnum)
da6d8c04
DJ
3742{
3743 int addr;
3744
2ec06d2e 3745 if (regnum < 0 || regnum >= the_low_target.num_regs)
da6d8c04
DJ
3746 error ("Invalid register number %d.", regnum);
3747
2ec06d2e 3748 addr = the_low_target.regmap[regnum];
da6d8c04
DJ
3749
3750 return addr;
3751}
3752
58caa3dc 3753/* Fetch one register. */
da6d8c04 3754static void
442ea881 3755fetch_register (struct regcache *regcache, int regno)
da6d8c04
DJ
3756{
3757 CORE_ADDR regaddr;
48d93c75 3758 int i, size;
0d62e5e8 3759 char *buf;
95954743 3760 int pid;
da6d8c04 3761
2ec06d2e 3762 if (regno >= the_low_target.num_regs)
0a30fbc4 3763 return;
2ec06d2e 3764 if ((*the_low_target.cannot_fetch_register) (regno))
0a30fbc4 3765 return;
da6d8c04 3766
0a30fbc4
DJ
3767 regaddr = register_addr (regno);
3768 if (regaddr == -1)
3769 return;
95954743 3770
1b3f6016 3771 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
50275556 3772 & -sizeof (PTRACE_XFER_TYPE));
48d93c75 3773 buf = alloca (size);
50275556
MR
3774
3775 pid = lwpid_of (get_thread_lwp (current_inferior));
48d93c75 3776 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04
DJ
3777 {
3778 errno = 0;
0d62e5e8 3779 *(PTRACE_XFER_TYPE *) (buf + i) =
14ce3065
DE
3780 ptrace (PTRACE_PEEKUSER, pid,
3781 /* Coerce to a uintptr_t first to avoid potential gcc warning
3782 of coercing an 8 byte integer to a 4 byte pointer. */
3783 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
da6d8c04
DJ
3784 regaddr += sizeof (PTRACE_XFER_TYPE);
3785 if (errno != 0)
f52cd8cd 3786 error ("reading register %d: %s", regno, strerror (errno));
da6d8c04 3787 }
ee1a7ae4
UW
3788
3789 if (the_low_target.supply_ptrace_register)
442ea881 3790 the_low_target.supply_ptrace_register (regcache, regno, buf);
5a1f5858 3791 else
442ea881 3792 supply_register (regcache, regno, buf);
da6d8c04
DJ
3793}
3794
7325beb4
MR
3795/* Store one register. */
3796static void
3797store_register (struct regcache *regcache, int regno)
3798{
3799 CORE_ADDR regaddr;
3800 int i, size;
3801 char *buf;
3802 int pid;
3803
3804 if (regno >= the_low_target.num_regs)
3805 return;
50275556 3806 if ((*the_low_target.cannot_store_register) (regno))
7325beb4
MR
3807 return;
3808
3809 regaddr = register_addr (regno);
3810 if (regaddr == -1)
3811 return;
50275556
MR
3812
3813 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3814 & -sizeof (PTRACE_XFER_TYPE));
7325beb4
MR
3815 buf = alloca (size);
3816 memset (buf, 0, size);
3817
3818 if (the_low_target.collect_ptrace_register)
3819 the_low_target.collect_ptrace_register (regcache, regno, buf);
3820 else
3821 collect_register (regcache, regno, buf);
3822
3823 pid = lwpid_of (get_thread_lwp (current_inferior));
3824 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3825 {
3826 errno = 0;
3827 ptrace (PTRACE_POKEUSER, pid,
3828 /* Coerce to a uintptr_t first to avoid potential gcc warning
3829 about coercing an 8 byte integer to a 4 byte pointer. */
3830 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3831 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3832 if (errno != 0)
3833 {
3834 /* At this point, ESRCH should mean the process is
3835 already gone, in which case we simply ignore attempts
3836 to change its registers. See also the related
3837 comment in linux_resume_one_lwp. */
3838 if (errno == ESRCH)
3839 return;
3840
3841 if ((*the_low_target.cannot_store_register) (regno) == 0)
3842 error ("writing register %d: %s", regno, strerror (errno));
3843 }
3844 regaddr += sizeof (PTRACE_XFER_TYPE);
3845 }
3846}
3847
da6d8c04 3848/* Fetch all registers, or just one, from the child process. */
58caa3dc 3849static void
442ea881 3850usr_fetch_inferior_registers (struct regcache *regcache, int regno)
da6d8c04 3851{
4463ce24 3852 if (regno == -1)
2ec06d2e 3853 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 3854 fetch_register (regcache, regno);
da6d8c04 3855 else
442ea881 3856 fetch_register (regcache, regno);
da6d8c04
DJ
3857}
3858
3859/* Store our register values back into the inferior.
3860 If REGNO is -1, do this for all registers.
3861 Otherwise, REGNO specifies which register (so we can save time). */
58caa3dc 3862static void
442ea881 3863usr_store_inferior_registers (struct regcache *regcache, int regno)
da6d8c04 3864{
7325beb4 3865 if (regno == -1)
2ec06d2e 3866 for (regno = 0; regno < the_low_target.num_regs; regno++)
7325beb4
MR
3867 store_register (regcache, regno);
3868 else
3869 store_register (regcache, regno);
da6d8c04 3870}
58caa3dc
DJ
3871#endif /* HAVE_LINUX_USRREGS */
3872
3873
3874
3875#ifdef HAVE_LINUX_REGSETS
3876
3877static int
442ea881 3878regsets_fetch_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3879{
3880 struct regset_info *regset;
e9d25b98 3881 int saw_general_regs = 0;
95954743 3882 int pid;
1570b33e 3883 struct iovec iov;
58caa3dc
DJ
3884
3885 regset = target_regsets;
3886
95954743 3887 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3888 while (regset->size >= 0)
3889 {
1570b33e
L
3890 void *buf, *data;
3891 int nt_type, res;
58caa3dc 3892
52fa2412 3893 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3894 {
3895 regset ++;
3896 continue;
3897 }
3898
bca929d3 3899 buf = xmalloc (regset->size);
1570b33e
L
3900
3901 nt_type = regset->nt_type;
3902 if (nt_type)
3903 {
3904 iov.iov_base = buf;
3905 iov.iov_len = regset->size;
3906 data = (void *) &iov;
3907 }
3908 else
3909 data = buf;
3910
dfb64f85 3911#ifndef __sparc__
1570b33e 3912 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3913#else
1570b33e 3914 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 3915#endif
58caa3dc
DJ
3916 if (res < 0)
3917 {
3918 if (errno == EIO)
3919 {
52fa2412
UW
3920 /* If we get EIO on a regset, do not try it again for
3921 this process. */
3922 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3923 free (buf);
52fa2412 3924 continue;
58caa3dc
DJ
3925 }
3926 else
3927 {
0d62e5e8 3928 char s[256];
95954743
PA
3929 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3930 pid);
0d62e5e8 3931 perror (s);
58caa3dc
DJ
3932 }
3933 }
e9d25b98
DJ
3934 else if (regset->type == GENERAL_REGS)
3935 saw_general_regs = 1;
442ea881 3936 regset->store_function (regcache, buf);
58caa3dc 3937 regset ++;
fdeb2a12 3938 free (buf);
58caa3dc 3939 }
e9d25b98
DJ
3940 if (saw_general_regs)
3941 return 0;
3942 else
3943 return 1;
58caa3dc
DJ
3944}
3945
3946static int
442ea881 3947regsets_store_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3948{
3949 struct regset_info *regset;
e9d25b98 3950 int saw_general_regs = 0;
95954743 3951 int pid;
1570b33e 3952 struct iovec iov;
58caa3dc
DJ
3953
3954 regset = target_regsets;
3955
95954743 3956 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3957 while (regset->size >= 0)
3958 {
1570b33e
L
3959 void *buf, *data;
3960 int nt_type, res;
58caa3dc 3961
52fa2412 3962 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3963 {
3964 regset ++;
3965 continue;
3966 }
3967
bca929d3 3968 buf = xmalloc (regset->size);
545587ee
DJ
3969
3970 /* First fill the buffer with the current register set contents,
3971 in case there are any items in the kernel's regset that are
3972 not in gdbserver's regcache. */
1570b33e
L
3973
3974 nt_type = regset->nt_type;
3975 if (nt_type)
3976 {
3977 iov.iov_base = buf;
3978 iov.iov_len = regset->size;
3979 data = (void *) &iov;
3980 }
3981 else
3982 data = buf;
3983
dfb64f85 3984#ifndef __sparc__
1570b33e 3985 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3986#else
1570b33e 3987 res = ptrace (regset->get_request, pid, &iov, data);
dfb64f85 3988#endif
545587ee
DJ
3989
3990 if (res == 0)
3991 {
3992 /* Then overlay our cached registers on that. */
442ea881 3993 regset->fill_function (regcache, buf);
545587ee
DJ
3994
3995 /* Only now do we write the register set. */
dfb64f85 3996#ifndef __sparc__
1570b33e 3997 res = ptrace (regset->set_request, pid, nt_type, data);
dfb64f85 3998#else
1570b33e 3999 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4000#endif
545587ee
DJ
4001 }
4002
58caa3dc
DJ
4003 if (res < 0)
4004 {
4005 if (errno == EIO)
4006 {
52fa2412
UW
4007 /* If we get EIO on a regset, do not try it again for
4008 this process. */
4009 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 4010 free (buf);
52fa2412 4011 continue;
58caa3dc 4012 }
3221518c
UW
4013 else if (errno == ESRCH)
4014 {
1b3f6016
PA
4015 /* At this point, ESRCH should mean the process is
4016 already gone, in which case we simply ignore attempts
4017 to change its registers. See also the related
4018 comment in linux_resume_one_lwp. */
fdeb2a12 4019 free (buf);
3221518c
UW
4020 return 0;
4021 }
58caa3dc
DJ
4022 else
4023 {
ce3a066d 4024 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4025 }
4026 }
e9d25b98
DJ
4027 else if (regset->type == GENERAL_REGS)
4028 saw_general_regs = 1;
58caa3dc 4029 regset ++;
09ec9b38 4030 free (buf);
58caa3dc 4031 }
e9d25b98
DJ
4032 if (saw_general_regs)
4033 return 0;
4034 else
4035 return 1;
ce3a066d 4036 return 0;
58caa3dc
DJ
4037}
4038
4039#endif /* HAVE_LINUX_REGSETS */
4040
4041
4042void
442ea881 4043linux_fetch_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
4044{
4045#ifdef HAVE_LINUX_REGSETS
442ea881 4046 if (regsets_fetch_inferior_registers (regcache) == 0)
52fa2412 4047 return;
58caa3dc
DJ
4048#endif
4049#ifdef HAVE_LINUX_USRREGS
442ea881 4050 usr_fetch_inferior_registers (regcache, regno);
58caa3dc
DJ
4051#endif
4052}
4053
4054void
442ea881 4055linux_store_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
4056{
4057#ifdef HAVE_LINUX_REGSETS
442ea881 4058 if (regsets_store_inferior_registers (regcache) == 0)
52fa2412 4059 return;
58caa3dc
DJ
4060#endif
4061#ifdef HAVE_LINUX_USRREGS
442ea881 4062 usr_store_inferior_registers (regcache, regno);
58caa3dc
DJ
4063#endif
4064}
4065
da6d8c04 4066
da6d8c04
DJ
4067/* Copy LEN bytes from inferior's memory starting at MEMADDR
4068 to debugger memory starting at MYADDR. */
4069
c3e735a6 4070static int
f450004a 4071linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04
DJ
4072{
4073 register int i;
4074 /* Round starting address down to longword boundary. */
4075 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4076 /* Round ending address up; get number of longwords that makes. */
aa691b87
RM
4077 register int count
4078 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
da6d8c04
DJ
4079 / sizeof (PTRACE_XFER_TYPE);
4080 /* Allocate buffer of that many longwords. */
aa691b87 4081 register PTRACE_XFER_TYPE *buffer
da6d8c04 4082 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
fd462a61
DJ
4083 int fd;
4084 char filename[64];
95954743 4085 int pid = lwpid_of (get_thread_lwp (current_inferior));
fd462a61
DJ
4086
4087 /* Try using /proc. Don't bother for one word. */
4088 if (len >= 3 * sizeof (long))
4089 {
4090 /* We could keep this file open and cache it - possibly one per
4091 thread. That requires some juggling, but is even faster. */
95954743 4092 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
4093 fd = open (filename, O_RDONLY | O_LARGEFILE);
4094 if (fd == -1)
4095 goto no_proc;
4096
4097 /* If pread64 is available, use it. It's faster if the kernel
4098 supports it (only one syscall), and it's 64-bit safe even on
4099 32-bit platforms (for instance, SPARC debugging a SPARC64
4100 application). */
4101#ifdef HAVE_PREAD64
4102 if (pread64 (fd, myaddr, len, memaddr) != len)
4103#else
1de1badb 4104 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
fd462a61
DJ
4105#endif
4106 {
4107 close (fd);
4108 goto no_proc;
4109 }
4110
4111 close (fd);
4112 return 0;
4113 }
da6d8c04 4114
fd462a61 4115 no_proc:
da6d8c04
DJ
4116 /* Read all the longwords */
4117 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4118 {
c3e735a6 4119 errno = 0;
14ce3065
DE
4120 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4121 about coercing an 8 byte integer to a 4 byte pointer. */
4122 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4123 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
c3e735a6
DJ
4124 if (errno)
4125 return errno;
da6d8c04
DJ
4126 }
4127
4128 /* Copy appropriate bytes out of the buffer. */
1b3f6016
PA
4129 memcpy (myaddr,
4130 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4131 len);
c3e735a6
DJ
4132
4133 return 0;
da6d8c04
DJ
4134}
4135
93ae6fdc
PA
4136/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4137 memory at MEMADDR. On failure (cannot write to the inferior)
da6d8c04
DJ
4138 returns the value of errno. */
4139
ce3a066d 4140static int
f450004a 4141linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
4142{
4143 register int i;
4144 /* Round starting address down to longword boundary. */
4145 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4146 /* Round ending address up; get number of longwords that makes. */
4147 register int count
493e2a69
MS
4148 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4149 / sizeof (PTRACE_XFER_TYPE);
4150
da6d8c04 4151 /* Allocate buffer of that many longwords. */
493e2a69
MS
4152 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4153 alloca (count * sizeof (PTRACE_XFER_TYPE));
4154
95954743 4155 int pid = lwpid_of (get_thread_lwp (current_inferior));
da6d8c04 4156
0d62e5e8
DJ
4157 if (debug_threads)
4158 {
58d6951d
DJ
4159 /* Dump up to four bytes. */
4160 unsigned int val = * (unsigned int *) myaddr;
4161 if (len == 1)
4162 val = val & 0xff;
4163 else if (len == 2)
4164 val = val & 0xffff;
4165 else if (len == 3)
4166 val = val & 0xffffff;
4167 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4168 val, (long)memaddr);
0d62e5e8
DJ
4169 }
4170
da6d8c04
DJ
4171 /* Fill start and end extra bytes of buffer with existing memory data. */
4172
93ae6fdc 4173 errno = 0;
14ce3065
DE
4174 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4175 about coercing an 8 byte integer to a 4 byte pointer. */
4176 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4177 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
93ae6fdc
PA
4178 if (errno)
4179 return errno;
da6d8c04
DJ
4180
4181 if (count > 1)
4182 {
93ae6fdc 4183 errno = 0;
da6d8c04 4184 buffer[count - 1]
95954743 4185 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
4186 /* Coerce to a uintptr_t first to avoid potential gcc warning
4187 about coercing an 8 byte integer to a 4 byte pointer. */
4188 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4189 * sizeof (PTRACE_XFER_TYPE)),
d844cde6 4190 0);
93ae6fdc
PA
4191 if (errno)
4192 return errno;
da6d8c04
DJ
4193 }
4194
93ae6fdc 4195 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 4196
493e2a69
MS
4197 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4198 myaddr, len);
da6d8c04
DJ
4199
4200 /* Write the entire buffer. */
4201
4202 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4203 {
4204 errno = 0;
14ce3065
DE
4205 ptrace (PTRACE_POKETEXT, pid,
4206 /* Coerce to a uintptr_t first to avoid potential gcc warning
4207 about coercing an 8 byte integer to a 4 byte pointer. */
4208 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4209 (PTRACE_ARG4_TYPE) buffer[i]);
da6d8c04
DJ
4210 if (errno)
4211 return errno;
4212 }
4213
4214 return 0;
4215}
2f2893d9 4216
6076632b 4217/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
24a09b5f
DJ
4218static int linux_supports_tracefork_flag;
4219
1e7fc18c
PA
4220static void
4221linux_enable_event_reporting (int pid)
4222{
4223 if (!linux_supports_tracefork_flag)
4224 return;
4225
4226 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4227}
4228
51c2684e 4229/* Helper functions for linux_test_for_tracefork, called via clone (). */
24a09b5f 4230
51c2684e
DJ
4231static int
4232linux_tracefork_grandchild (void *arg)
4233{
4234 _exit (0);
4235}
4236
7407e2de
AS
4237#define STACK_SIZE 4096
4238
51c2684e
DJ
4239static int
4240linux_tracefork_child (void *arg)
24a09b5f
DJ
4241{
4242 ptrace (PTRACE_TRACEME, 0, 0, 0);
4243 kill (getpid (), SIGSTOP);
e4b7f41c
JK
4244
4245#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4246
4247 if (fork () == 0)
4248 linux_tracefork_grandchild (NULL);
4249
4250#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4251
7407e2de
AS
4252#ifdef __ia64__
4253 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4254 CLONE_VM | SIGCHLD, NULL);
4255#else
a1f2ce7d 4256 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
7407e2de
AS
4257 CLONE_VM | SIGCHLD, NULL);
4258#endif
e4b7f41c
JK
4259
4260#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4261
24a09b5f
DJ
4262 _exit (0);
4263}
4264
24a09b5f
DJ
4265/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4266 sure that we can enable the option, and that it had the desired
4267 effect. */
4268
4269static void
4270linux_test_for_tracefork (void)
4271{
4272 int child_pid, ret, status;
4273 long second_pid;
e4b7f41c 4274#if defined(__UCLIBC__) && defined(HAS_NOMMU)
bca929d3 4275 char *stack = xmalloc (STACK_SIZE * 4);
e4b7f41c 4276#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4277
4278 linux_supports_tracefork_flag = 0;
4279
e4b7f41c
JK
4280#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4281
4282 child_pid = fork ();
4283 if (child_pid == 0)
4284 linux_tracefork_child (NULL);
4285
4286#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4287
51c2684e 4288 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
7407e2de
AS
4289#ifdef __ia64__
4290 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4291 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c 4292#else /* !__ia64__ */
7407e2de
AS
4293 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4294 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c
JK
4295#endif /* !__ia64__ */
4296
4297#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4298
24a09b5f 4299 if (child_pid == -1)
51c2684e 4300 perror_with_name ("clone");
24a09b5f
DJ
4301
4302 ret = my_waitpid (child_pid, &status, 0);
4303 if (ret == -1)
4304 perror_with_name ("waitpid");
4305 else if (ret != child_pid)
4306 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4307 if (! WIFSTOPPED (status))
4308 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4309
14ce3065
DE
4310 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4311 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
24a09b5f
DJ
4312 if (ret != 0)
4313 {
4314 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4315 if (ret != 0)
4316 {
4317 warning ("linux_test_for_tracefork: failed to kill child");
4318 return;
4319 }
4320
4321 ret = my_waitpid (child_pid, &status, 0);
4322 if (ret != child_pid)
4323 warning ("linux_test_for_tracefork: failed to wait for killed child");
4324 else if (!WIFSIGNALED (status))
4325 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4326 "killed child", status);
4327
4328 return;
4329 }
4330
4331 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4332 if (ret != 0)
4333 warning ("linux_test_for_tracefork: failed to resume child");
4334
4335 ret = my_waitpid (child_pid, &status, 0);
4336
4337 if (ret == child_pid && WIFSTOPPED (status)
4338 && status >> 16 == PTRACE_EVENT_FORK)
4339 {
4340 second_pid = 0;
4341 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4342 if (ret == 0 && second_pid != 0)
4343 {
4344 int second_status;
4345
4346 linux_supports_tracefork_flag = 1;
4347 my_waitpid (second_pid, &second_status, 0);
4348 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4349 if (ret != 0)
4350 warning ("linux_test_for_tracefork: failed to kill second child");
4351 my_waitpid (second_pid, &status, 0);
4352 }
4353 }
4354 else
4355 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4356 "(%d, status 0x%x)", ret, status);
4357
4358 do
4359 {
4360 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4361 if (ret != 0)
4362 warning ("linux_test_for_tracefork: failed to kill child");
4363 my_waitpid (child_pid, &status, 0);
4364 }
4365 while (WIFSTOPPED (status));
51c2684e 4366
e4b7f41c 4367#if defined(__UCLIBC__) && defined(HAS_NOMMU)
51c2684e 4368 free (stack);
e4b7f41c 4369#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4370}
4371
4372
2f2893d9
DJ
4373static void
4374linux_look_up_symbols (void)
4375{
0d62e5e8 4376#ifdef USE_THREAD_DB
95954743
PA
4377 struct process_info *proc = current_process ();
4378
cdbfd419 4379 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
4380 return;
4381
6076632b
DE
4382 /* If the kernel supports tracing forks then it also supports tracing
4383 clones, and then we don't need to use the magic thread event breakpoint
4384 to learn about threads. */
cdbfd419 4385 thread_db_init (!linux_supports_tracefork_flag);
0d62e5e8
DJ
4386#endif
4387}
4388
e5379b03 4389static void
ef57601b 4390linux_request_interrupt (void)
e5379b03 4391{
a1928bad 4392 extern unsigned long signal_pid;
e5379b03 4393
95954743
PA
4394 if (!ptid_equal (cont_thread, null_ptid)
4395 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 4396 {
54a0b537 4397 struct lwp_info *lwp;
bd99dc85 4398 int lwpid;
e5379b03 4399
54a0b537 4400 lwp = get_thread_lwp (current_inferior);
bd99dc85
PA
4401 lwpid = lwpid_of (lwp);
4402 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
4403 }
4404 else
ef57601b 4405 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
4406}
4407
aa691b87
RM
4408/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4409 to debugger memory starting at MYADDR. */
4410
4411static int
f450004a 4412linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
4413{
4414 char filename[PATH_MAX];
4415 int fd, n;
95954743 4416 int pid = lwpid_of (get_thread_lwp (current_inferior));
aa691b87 4417
6cebaf6e 4418 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
4419
4420 fd = open (filename, O_RDONLY);
4421 if (fd < 0)
4422 return -1;
4423
4424 if (offset != (CORE_ADDR) 0
4425 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4426 n = -1;
4427 else
4428 n = read (fd, myaddr, len);
4429
4430 close (fd);
4431
4432 return n;
4433}
4434
d993e290
PA
4435/* These breakpoint and watchpoint related wrapper functions simply
4436 pass on the function call if the target has registered a
4437 corresponding function. */
e013ee27
OF
4438
4439static int
d993e290 4440linux_insert_point (char type, CORE_ADDR addr, int len)
e013ee27 4441{
d993e290
PA
4442 if (the_low_target.insert_point != NULL)
4443 return the_low_target.insert_point (type, addr, len);
e013ee27
OF
4444 else
4445 /* Unsupported (see target.h). */
4446 return 1;
4447}
4448
4449static int
d993e290 4450linux_remove_point (char type, CORE_ADDR addr, int len)
e013ee27 4451{
d993e290
PA
4452 if (the_low_target.remove_point != NULL)
4453 return the_low_target.remove_point (type, addr, len);
e013ee27
OF
4454 else
4455 /* Unsupported (see target.h). */
4456 return 1;
4457}
4458
4459static int
4460linux_stopped_by_watchpoint (void)
4461{
c3adc08c
PA
4462 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4463
4464 return lwp->stopped_by_watchpoint;
e013ee27
OF
4465}
4466
4467static CORE_ADDR
4468linux_stopped_data_address (void)
4469{
c3adc08c
PA
4470 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4471
4472 return lwp->stopped_data_address;
e013ee27
OF
4473}
4474
42c81e2a 4475#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
4476#if defined(__mcoldfire__)
4477/* These should really be defined in the kernel's ptrace.h header. */
4478#define PT_TEXT_ADDR 49*4
4479#define PT_DATA_ADDR 50*4
4480#define PT_TEXT_END_ADDR 51*4
eb826dc6
MF
4481#elif defined(BFIN)
4482#define PT_TEXT_ADDR 220
4483#define PT_TEXT_END_ADDR 224
4484#define PT_DATA_ADDR 228
58dbd541
YQ
4485#elif defined(__TMS320C6X__)
4486#define PT_TEXT_ADDR (0x10000*4)
4487#define PT_DATA_ADDR (0x10004*4)
4488#define PT_TEXT_END_ADDR (0x10008*4)
52fb6437
NS
4489#endif
4490
4491/* Under uClinux, programs are loaded at non-zero offsets, which we need
4492 to tell gdb about. */
4493
4494static int
4495linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4496{
4497#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4498 unsigned long text, text_end, data;
bd99dc85 4499 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
4500
4501 errno = 0;
4502
4503 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4504 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4505 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4506
4507 if (errno == 0)
4508 {
4509 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
4510 used by gdb) are relative to the beginning of the program,
4511 with the data segment immediately following the text segment.
4512 However, the actual runtime layout in memory may put the data
4513 somewhere else, so when we send gdb a data base-address, we
4514 use the real data base address and subtract the compile-time
4515 data base-address from it (which is just the length of the
4516 text segment). BSS immediately follows data in both
4517 cases. */
52fb6437
NS
4518 *text_p = text;
4519 *data_p = data - (text_end - text);
1b3f6016 4520
52fb6437
NS
4521 return 1;
4522 }
4523#endif
4524 return 0;
4525}
4526#endif
4527
07e059b5
VP
4528static int
4529linux_qxfer_osdata (const char *annex,
1b3f6016
PA
4530 unsigned char *readbuf, unsigned const char *writebuf,
4531 CORE_ADDR offset, int len)
07e059b5 4532{
d26e3629 4533 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4534}
4535
d0722149
DE
4536/* Convert a native/host siginfo object, into/from the siginfo in the
4537 layout of the inferiors' architecture. */
4538
4539static void
4540siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4541{
4542 int done = 0;
4543
4544 if (the_low_target.siginfo_fixup != NULL)
4545 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4546
4547 /* If there was no callback, or the callback didn't do anything,
4548 then just do a straight memcpy. */
4549 if (!done)
4550 {
4551 if (direction == 1)
4552 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4553 else
4554 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4555 }
4556}
4557
4aa995e1
PA
4558static int
4559linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4560 unsigned const char *writebuf, CORE_ADDR offset, int len)
4561{
d0722149 4562 int pid;
4aa995e1 4563 struct siginfo siginfo;
d0722149 4564 char inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
4565
4566 if (current_inferior == NULL)
4567 return -1;
4568
bd99dc85 4569 pid = lwpid_of (get_thread_lwp (current_inferior));
4aa995e1
PA
4570
4571 if (debug_threads)
d0722149 4572 fprintf (stderr, "%s siginfo for lwp %d.\n",
4aa995e1
PA
4573 readbuf != NULL ? "Reading" : "Writing",
4574 pid);
4575
0adea5f7 4576 if (offset >= sizeof (siginfo))
4aa995e1
PA
4577 return -1;
4578
4579 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4580 return -1;
4581
d0722149
DE
4582 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4583 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4584 inferior with a 64-bit GDBSERVER should look the same as debugging it
4585 with a 32-bit GDBSERVER, we need to convert it. */
4586 siginfo_fixup (&siginfo, inf_siginfo, 0);
4587
4aa995e1
PA
4588 if (offset + len > sizeof (siginfo))
4589 len = sizeof (siginfo) - offset;
4590
4591 if (readbuf != NULL)
d0722149 4592 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4593 else
4594 {
d0722149
DE
4595 memcpy (inf_siginfo + offset, writebuf, len);
4596
4597 /* Convert back to ptrace layout before flushing it out. */
4598 siginfo_fixup (&siginfo, inf_siginfo, 1);
4599
4aa995e1
PA
4600 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4601 return -1;
4602 }
4603
4604 return len;
4605}
4606
bd99dc85
PA
4607/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4608 so we notice when children change state; as the handler for the
4609 sigsuspend in my_waitpid. */
4610
4611static void
4612sigchld_handler (int signo)
4613{
4614 int old_errno = errno;
4615
4616 if (debug_threads)
e581f2b4
PA
4617 {
4618 do
4619 {
4620 /* fprintf is not async-signal-safe, so call write
4621 directly. */
4622 if (write (2, "sigchld_handler\n",
4623 sizeof ("sigchld_handler\n") - 1) < 0)
4624 break; /* just ignore */
4625 } while (0);
4626 }
bd99dc85
PA
4627
4628 if (target_is_async_p ())
4629 async_file_mark (); /* trigger a linux_wait */
4630
4631 errno = old_errno;
4632}
4633
4634static int
4635linux_supports_non_stop (void)
4636{
4637 return 1;
4638}
4639
4640static int
4641linux_async (int enable)
4642{
4643 int previous = (linux_event_pipe[0] != -1);
4644
8336d594
PA
4645 if (debug_threads)
4646 fprintf (stderr, "linux_async (%d), previous=%d\n",
4647 enable, previous);
4648
bd99dc85
PA
4649 if (previous != enable)
4650 {
4651 sigset_t mask;
4652 sigemptyset (&mask);
4653 sigaddset (&mask, SIGCHLD);
4654
4655 sigprocmask (SIG_BLOCK, &mask, NULL);
4656
4657 if (enable)
4658 {
4659 if (pipe (linux_event_pipe) == -1)
4660 fatal ("creating event pipe failed.");
4661
4662 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4663 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4664
4665 /* Register the event loop handler. */
4666 add_file_handler (linux_event_pipe[0],
4667 handle_target_event, NULL);
4668
4669 /* Always trigger a linux_wait. */
4670 async_file_mark ();
4671 }
4672 else
4673 {
4674 delete_file_handler (linux_event_pipe[0]);
4675
4676 close (linux_event_pipe[0]);
4677 close (linux_event_pipe[1]);
4678 linux_event_pipe[0] = -1;
4679 linux_event_pipe[1] = -1;
4680 }
4681
4682 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4683 }
4684
4685 return previous;
4686}
4687
4688static int
4689linux_start_non_stop (int nonstop)
4690{
4691 /* Register or unregister from event-loop accordingly. */
4692 linux_async (nonstop);
4693 return 0;
4694}
4695
cf8fd78b
PA
4696static int
4697linux_supports_multi_process (void)
4698{
4699 return 1;
4700}
4701
03583c20
UW
4702static int
4703linux_supports_disable_randomization (void)
4704{
4705#ifdef HAVE_PERSONALITY
4706 return 1;
4707#else
4708 return 0;
4709#endif
4710}
efcbbd14
UW
4711
4712/* Enumerate spufs IDs for process PID. */
4713static int
4714spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4715{
4716 int pos = 0;
4717 int written = 0;
4718 char path[128];
4719 DIR *dir;
4720 struct dirent *entry;
4721
4722 sprintf (path, "/proc/%ld/fd", pid);
4723 dir = opendir (path);
4724 if (!dir)
4725 return -1;
4726
4727 rewinddir (dir);
4728 while ((entry = readdir (dir)) != NULL)
4729 {
4730 struct stat st;
4731 struct statfs stfs;
4732 int fd;
4733
4734 fd = atoi (entry->d_name);
4735 if (!fd)
4736 continue;
4737
4738 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4739 if (stat (path, &st) != 0)
4740 continue;
4741 if (!S_ISDIR (st.st_mode))
4742 continue;
4743
4744 if (statfs (path, &stfs) != 0)
4745 continue;
4746 if (stfs.f_type != SPUFS_MAGIC)
4747 continue;
4748
4749 if (pos >= offset && pos + 4 <= offset + len)
4750 {
4751 *(unsigned int *)(buf + pos - offset) = fd;
4752 written += 4;
4753 }
4754 pos += 4;
4755 }
4756
4757 closedir (dir);
4758 return written;
4759}
4760
4761/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4762 object type, using the /proc file system. */
4763static int
4764linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4765 unsigned const char *writebuf,
4766 CORE_ADDR offset, int len)
4767{
4768 long pid = lwpid_of (get_thread_lwp (current_inferior));
4769 char buf[128];
4770 int fd = 0;
4771 int ret = 0;
4772
4773 if (!writebuf && !readbuf)
4774 return -1;
4775
4776 if (!*annex)
4777 {
4778 if (!readbuf)
4779 return -1;
4780 else
4781 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4782 }
4783
4784 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4785 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4786 if (fd <= 0)
4787 return -1;
4788
4789 if (offset != 0
4790 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4791 {
4792 close (fd);
4793 return 0;
4794 }
4795
4796 if (writebuf)
4797 ret = write (fd, writebuf, (size_t) len);
4798 else
4799 ret = read (fd, readbuf, (size_t) len);
4800
4801 close (fd);
4802 return ret;
4803}
4804
723b724b 4805#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
4806struct target_loadseg
4807{
4808 /* Core address to which the segment is mapped. */
4809 Elf32_Addr addr;
4810 /* VMA recorded in the program header. */
4811 Elf32_Addr p_vaddr;
4812 /* Size of this segment in memory. */
4813 Elf32_Word p_memsz;
4814};
4815
723b724b 4816# if defined PT_GETDSBT
78d85199
YQ
4817struct target_loadmap
4818{
4819 /* Protocol version number, must be zero. */
4820 Elf32_Word version;
4821 /* Pointer to the DSBT table, its size, and the DSBT index. */
4822 unsigned *dsbt_table;
4823 unsigned dsbt_size, dsbt_index;
4824 /* Number of segments in this map. */
4825 Elf32_Word nsegs;
4826 /* The actual memory map. */
4827 struct target_loadseg segs[/*nsegs*/];
4828};
723b724b
MF
4829# define LINUX_LOADMAP PT_GETDSBT
4830# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
4831# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
4832# else
4833struct target_loadmap
4834{
4835 /* Protocol version number, must be zero. */
4836 Elf32_Half version;
4837 /* Number of segments in this map. */
4838 Elf32_Half nsegs;
4839 /* The actual memory map. */
4840 struct target_loadseg segs[/*nsegs*/];
4841};
4842# define LINUX_LOADMAP PTRACE_GETFDPIC
4843# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
4844# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
4845# endif
78d85199 4846
78d85199
YQ
4847static int
4848linux_read_loadmap (const char *annex, CORE_ADDR offset,
4849 unsigned char *myaddr, unsigned int len)
4850{
4851 int pid = lwpid_of (get_thread_lwp (current_inferior));
4852 int addr = -1;
4853 struct target_loadmap *data = NULL;
4854 unsigned int actual_length, copy_length;
4855
4856 if (strcmp (annex, "exec") == 0)
723b724b 4857 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 4858 else if (strcmp (annex, "interp") == 0)
723b724b 4859 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
4860 else
4861 return -1;
4862
723b724b 4863 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
4864 return -1;
4865
4866 if (data == NULL)
4867 return -1;
4868
4869 actual_length = sizeof (struct target_loadmap)
4870 + sizeof (struct target_loadseg) * data->nsegs;
4871
4872 if (offset < 0 || offset > actual_length)
4873 return -1;
4874
4875 copy_length = actual_length - offset < len ? actual_length - offset : len;
4876 memcpy (myaddr, (char *) data + offset, copy_length);
4877 return copy_length;
4878}
723b724b
MF
4879#else
4880# define linux_read_loadmap NULL
4881#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 4882
1570b33e
L
4883static void
4884linux_process_qsupported (const char *query)
4885{
4886 if (the_low_target.process_qsupported != NULL)
4887 the_low_target.process_qsupported (query);
4888}
4889
219f2f23
PA
4890static int
4891linux_supports_tracepoints (void)
4892{
4893 if (*the_low_target.supports_tracepoints == NULL)
4894 return 0;
4895
4896 return (*the_low_target.supports_tracepoints) ();
4897}
4898
4899static CORE_ADDR
4900linux_read_pc (struct regcache *regcache)
4901{
4902 if (the_low_target.get_pc == NULL)
4903 return 0;
4904
4905 return (*the_low_target.get_pc) (regcache);
4906}
4907
4908static void
4909linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4910{
4911 gdb_assert (the_low_target.set_pc != NULL);
4912
4913 (*the_low_target.set_pc) (regcache, pc);
4914}
4915
8336d594
PA
4916static int
4917linux_thread_stopped (struct thread_info *thread)
4918{
4919 return get_thread_lwp (thread)->stopped;
4920}
4921
4922/* This exposes stop-all-threads functionality to other modules. */
4923
4924static void
7984d532 4925linux_pause_all (int freeze)
8336d594 4926{
7984d532
PA
4927 stop_all_lwps (freeze, NULL);
4928}
4929
4930/* This exposes unstop-all-threads functionality to other gdbserver
4931 modules. */
4932
4933static void
4934linux_unpause_all (int unfreeze)
4935{
4936 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
4937}
4938
90d74c30
PA
4939static int
4940linux_prepare_to_access_memory (void)
4941{
4942 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4943 running LWP. */
4944 if (non_stop)
4945 linux_pause_all (1);
4946 return 0;
4947}
4948
4949static void
0146f85b 4950linux_done_accessing_memory (void)
90d74c30
PA
4951{
4952 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4953 running LWP. */
4954 if (non_stop)
4955 linux_unpause_all (1);
4956}
4957
fa593d66
PA
4958static int
4959linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
4960 CORE_ADDR collector,
4961 CORE_ADDR lockaddr,
4962 ULONGEST orig_size,
4963 CORE_ADDR *jump_entry,
405f8e94
SS
4964 CORE_ADDR *trampoline,
4965 ULONGEST *trampoline_size,
fa593d66
PA
4966 unsigned char *jjump_pad_insn,
4967 ULONGEST *jjump_pad_insn_size,
4968 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
4969 CORE_ADDR *adjusted_insn_addr_end,
4970 char *err)
fa593d66
PA
4971{
4972 return (*the_low_target.install_fast_tracepoint_jump_pad)
4973 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
4974 jump_entry, trampoline, trampoline_size,
4975 jjump_pad_insn, jjump_pad_insn_size,
4976 adjusted_insn_addr, adjusted_insn_addr_end,
4977 err);
fa593d66
PA
4978}
4979
6a271cae
PA
4980static struct emit_ops *
4981linux_emit_ops (void)
4982{
4983 if (the_low_target.emit_ops != NULL)
4984 return (*the_low_target.emit_ops) ();
4985 else
4986 return NULL;
4987}
4988
405f8e94
SS
4989static int
4990linux_get_min_fast_tracepoint_insn_len (void)
4991{
4992 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
4993}
4994
2268b414
JK
4995/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
4996
4997static int
4998get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
4999 CORE_ADDR *phdr_memaddr, int *num_phdr)
5000{
5001 char filename[PATH_MAX];
5002 int fd;
5003 const int auxv_size = is_elf64
5004 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5005 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5006
5007 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5008
5009 fd = open (filename, O_RDONLY);
5010 if (fd < 0)
5011 return 1;
5012
5013 *phdr_memaddr = 0;
5014 *num_phdr = 0;
5015 while (read (fd, buf, auxv_size) == auxv_size
5016 && (*phdr_memaddr == 0 || *num_phdr == 0))
5017 {
5018 if (is_elf64)
5019 {
5020 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5021
5022 switch (aux->a_type)
5023 {
5024 case AT_PHDR:
5025 *phdr_memaddr = aux->a_un.a_val;
5026 break;
5027 case AT_PHNUM:
5028 *num_phdr = aux->a_un.a_val;
5029 break;
5030 }
5031 }
5032 else
5033 {
5034 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5035
5036 switch (aux->a_type)
5037 {
5038 case AT_PHDR:
5039 *phdr_memaddr = aux->a_un.a_val;
5040 break;
5041 case AT_PHNUM:
5042 *num_phdr = aux->a_un.a_val;
5043 break;
5044 }
5045 }
5046 }
5047
5048 close (fd);
5049
5050 if (*phdr_memaddr == 0 || *num_phdr == 0)
5051 {
5052 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5053 "phdr_memaddr = %ld, phdr_num = %d",
5054 (long) *phdr_memaddr, *num_phdr);
5055 return 2;
5056 }
5057
5058 return 0;
5059}
5060
5061/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5062
5063static CORE_ADDR
5064get_dynamic (const int pid, const int is_elf64)
5065{
5066 CORE_ADDR phdr_memaddr, relocation;
5067 int num_phdr, i;
5068 unsigned char *phdr_buf;
5069 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5070
5071 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5072 return 0;
5073
5074 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5075 phdr_buf = alloca (num_phdr * phdr_size);
5076
5077 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5078 return 0;
5079
5080 /* Compute relocation: it is expected to be 0 for "regular" executables,
5081 non-zero for PIE ones. */
5082 relocation = -1;
5083 for (i = 0; relocation == -1 && i < num_phdr; i++)
5084 if (is_elf64)
5085 {
5086 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5087
5088 if (p->p_type == PT_PHDR)
5089 relocation = phdr_memaddr - p->p_vaddr;
5090 }
5091 else
5092 {
5093 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5094
5095 if (p->p_type == PT_PHDR)
5096 relocation = phdr_memaddr - p->p_vaddr;
5097 }
5098
5099 if (relocation == -1)
5100 {
5101 warning ("Unexpected missing PT_PHDR");
5102 return 0;
5103 }
5104
5105 for (i = 0; i < num_phdr; i++)
5106 {
5107 if (is_elf64)
5108 {
5109 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5110
5111 if (p->p_type == PT_DYNAMIC)
5112 return p->p_vaddr + relocation;
5113 }
5114 else
5115 {
5116 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5117
5118 if (p->p_type == PT_DYNAMIC)
5119 return p->p_vaddr + relocation;
5120 }
5121 }
5122
5123 return 0;
5124}
5125
5126/* Return &_r_debug in the inferior, or -1 if not present. Return value
5127 can be 0 if the inferior does not yet have the library list initialized. */
5128
5129static CORE_ADDR
5130get_r_debug (const int pid, const int is_elf64)
5131{
5132 CORE_ADDR dynamic_memaddr;
5133 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5134 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5135
5136 dynamic_memaddr = get_dynamic (pid, is_elf64);
5137 if (dynamic_memaddr == 0)
5138 return (CORE_ADDR) -1;
5139
5140 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5141 {
5142 if (is_elf64)
5143 {
5144 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5145
5146 if (dyn->d_tag == DT_DEBUG)
5147 return dyn->d_un.d_val;
5148
5149 if (dyn->d_tag == DT_NULL)
5150 break;
5151 }
5152 else
5153 {
5154 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5155
5156 if (dyn->d_tag == DT_DEBUG)
5157 return dyn->d_un.d_val;
5158
5159 if (dyn->d_tag == DT_NULL)
5160 break;
5161 }
5162
5163 dynamic_memaddr += dyn_size;
5164 }
5165
5166 return (CORE_ADDR) -1;
5167}
5168
5169/* Read one pointer from MEMADDR in the inferior. */
5170
5171static int
5172read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5173{
5174 *ptr = 0;
5175 return linux_read_memory (memaddr, (unsigned char *) ptr, ptr_size);
5176}
5177
5178struct link_map_offsets
5179 {
5180 /* Offset and size of r_debug.r_version. */
5181 int r_version_offset;
5182
5183 /* Offset and size of r_debug.r_map. */
5184 int r_map_offset;
5185
5186 /* Offset to l_addr field in struct link_map. */
5187 int l_addr_offset;
5188
5189 /* Offset to l_name field in struct link_map. */
5190 int l_name_offset;
5191
5192 /* Offset to l_ld field in struct link_map. */
5193 int l_ld_offset;
5194
5195 /* Offset to l_next field in struct link_map. */
5196 int l_next_offset;
5197
5198 /* Offset to l_prev field in struct link_map. */
5199 int l_prev_offset;
5200 };
5201
5202/* Construct qXfer:libraries:read reply. */
5203
5204static int
5205linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5206 unsigned const char *writebuf,
5207 CORE_ADDR offset, int len)
5208{
5209 char *document;
5210 unsigned document_len;
5211 struct process_info_private *const priv = current_process ()->private;
5212 char filename[PATH_MAX];
5213 int pid, is_elf64;
5214
5215 static const struct link_map_offsets lmo_32bit_offsets =
5216 {
5217 0, /* r_version offset. */
5218 4, /* r_debug.r_map offset. */
5219 0, /* l_addr offset in link_map. */
5220 4, /* l_name offset in link_map. */
5221 8, /* l_ld offset in link_map. */
5222 12, /* l_next offset in link_map. */
5223 16 /* l_prev offset in link_map. */
5224 };
5225
5226 static const struct link_map_offsets lmo_64bit_offsets =
5227 {
5228 0, /* r_version offset. */
5229 8, /* r_debug.r_map offset. */
5230 0, /* l_addr offset in link_map. */
5231 8, /* l_name offset in link_map. */
5232 16, /* l_ld offset in link_map. */
5233 24, /* l_next offset in link_map. */
5234 32 /* l_prev offset in link_map. */
5235 };
5236 const struct link_map_offsets *lmo;
5237
5238 if (writebuf != NULL)
5239 return -2;
5240 if (readbuf == NULL)
5241 return -1;
5242
5243 pid = lwpid_of (get_thread_lwp (current_inferior));
5244 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5245 is_elf64 = elf_64_file_p (filename);
5246 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5247
5248 if (priv->r_debug == 0)
5249 priv->r_debug = get_r_debug (pid, is_elf64);
5250
5251 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5252 {
5253 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5254 }
5255 else
5256 {
5257 int allocated = 1024;
5258 char *p;
5259 const int ptr_size = is_elf64 ? 8 : 4;
5260 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5261 int r_version, header_done = 0;
5262
5263 document = xmalloc (allocated);
5264 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5265 p = document + strlen (document);
5266
5267 r_version = 0;
5268 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5269 (unsigned char *) &r_version,
5270 sizeof (r_version)) != 0
5271 || r_version != 1)
5272 {
5273 warning ("unexpected r_debug version %d", r_version);
5274 goto done;
5275 }
5276
5277 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5278 &lm_addr, ptr_size) != 0)
5279 {
5280 warning ("unable to read r_map from 0x%lx",
5281 (long) priv->r_debug + lmo->r_map_offset);
5282 goto done;
5283 }
5284
5285 lm_prev = 0;
5286 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5287 &l_name, ptr_size) == 0
5288 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5289 &l_addr, ptr_size) == 0
5290 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5291 &l_ld, ptr_size) == 0
5292 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5293 &l_prev, ptr_size) == 0
5294 && read_one_ptr (lm_addr + lmo->l_next_offset,
5295 &l_next, ptr_size) == 0)
5296 {
5297 unsigned char libname[PATH_MAX];
5298
5299 if (lm_prev != l_prev)
5300 {
5301 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5302 (long) lm_prev, (long) l_prev);
5303 break;
5304 }
5305
5306 /* Not checking for error because reading may stop before
5307 we've got PATH_MAX worth of characters. */
5308 libname[0] = '\0';
5309 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5310 libname[sizeof (libname) - 1] = '\0';
5311 if (libname[0] != '\0')
5312 {
5313 /* 6x the size for xml_escape_text below. */
5314 size_t len = 6 * strlen ((char *) libname);
5315 char *name;
5316
5317 if (!header_done)
5318 {
5319 /* Terminate `<library-list-svr4'. */
5320 *p++ = '>';
5321 header_done = 1;
5322 }
5323
5324 while (allocated < p - document + len + 200)
5325 {
5326 /* Expand to guarantee sufficient storage. */
5327 uintptr_t document_len = p - document;
5328
5329 document = xrealloc (document, 2 * allocated);
5330 allocated *= 2;
5331 p = document + document_len;
5332 }
5333
5334 name = xml_escape_text ((char *) libname);
5335 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5336 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5337 name, (unsigned long) lm_addr,
5338 (unsigned long) l_addr, (unsigned long) l_ld);
5339 free (name);
5340 }
5341 else if (lm_prev == 0)
5342 {
5343 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5344 p = p + strlen (p);
5345 }
5346
5347 if (l_next == 0)
5348 break;
5349
5350 lm_prev = lm_addr;
5351 lm_addr = l_next;
5352 }
5353 done:
5354 strcpy (p, "</library-list-svr4>");
5355 }
5356
5357 document_len = strlen (document);
5358 if (offset < document_len)
5359 document_len -= offset;
5360 else
5361 document_len = 0;
5362 if (len > document_len)
5363 len = document_len;
5364
5365 memcpy (readbuf, document + offset, len);
5366 xfree (document);
5367
5368 return len;
5369}
5370
ce3a066d
DJ
5371static struct target_ops linux_target_ops = {
5372 linux_create_inferior,
5373 linux_attach,
5374 linux_kill,
6ad8ae5c 5375 linux_detach,
8336d594 5376 linux_mourn,
444d6139 5377 linux_join,
ce3a066d
DJ
5378 linux_thread_alive,
5379 linux_resume,
5380 linux_wait,
5381 linux_fetch_registers,
5382 linux_store_registers,
90d74c30 5383 linux_prepare_to_access_memory,
0146f85b 5384 linux_done_accessing_memory,
ce3a066d
DJ
5385 linux_read_memory,
5386 linux_write_memory,
2f2893d9 5387 linux_look_up_symbols,
ef57601b 5388 linux_request_interrupt,
aa691b87 5389 linux_read_auxv,
d993e290
PA
5390 linux_insert_point,
5391 linux_remove_point,
e013ee27
OF
5392 linux_stopped_by_watchpoint,
5393 linux_stopped_data_address,
42c81e2a 5394#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437 5395 linux_read_offsets,
dae5f5cf
DJ
5396#else
5397 NULL,
5398#endif
5399#ifdef USE_THREAD_DB
5400 thread_db_get_tls_address,
5401#else
5402 NULL,
52fb6437 5403#endif
efcbbd14 5404 linux_qxfer_spu,
59a016f0 5405 hostio_last_error_from_errno,
07e059b5 5406 linux_qxfer_osdata,
4aa995e1 5407 linux_xfer_siginfo,
bd99dc85
PA
5408 linux_supports_non_stop,
5409 linux_async,
5410 linux_start_non_stop,
cdbfd419
PP
5411 linux_supports_multi_process,
5412#ifdef USE_THREAD_DB
dc146f7c 5413 thread_db_handle_monitor_command,
cdbfd419 5414#else
dc146f7c 5415 NULL,
cdbfd419 5416#endif
d26e3629 5417 linux_common_core_of_thread,
78d85199 5418 linux_read_loadmap,
219f2f23
PA
5419 linux_process_qsupported,
5420 linux_supports_tracepoints,
5421 linux_read_pc,
8336d594
PA
5422 linux_write_pc,
5423 linux_thread_stopped,
7984d532 5424 NULL,
711e434b 5425 linux_pause_all,
7984d532 5426 linux_unpause_all,
fa593d66
PA
5427 linux_cancel_breakpoints,
5428 linux_stabilize_threads,
6a271cae 5429 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
5430 linux_emit_ops,
5431 linux_supports_disable_randomization,
405f8e94 5432 linux_get_min_fast_tracepoint_insn_len,
2268b414 5433 linux_qxfer_libraries_svr4,
ce3a066d
DJ
5434};
5435
0d62e5e8
DJ
5436static void
5437linux_init_signals ()
5438{
5439 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5440 to find what the cancel signal actually is. */
1a981360 5441#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 5442 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 5443#endif
0d62e5e8
DJ
5444}
5445
da6d8c04
DJ
5446void
5447initialize_low (void)
5448{
bd99dc85
PA
5449 struct sigaction sigchld_action;
5450 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 5451 set_target_ops (&linux_target_ops);
611cb4a5
DJ
5452 set_breakpoint_data (the_low_target.breakpoint,
5453 the_low_target.breakpoint_len);
0d62e5e8 5454 linux_init_signals ();
24a09b5f 5455 linux_test_for_tracefork ();
52fa2412
UW
5456#ifdef HAVE_LINUX_REGSETS
5457 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5458 ;
bca929d3 5459 disabled_regsets = xmalloc (num_regsets);
52fa2412 5460#endif
bd99dc85
PA
5461
5462 sigchld_action.sa_handler = sigchld_handler;
5463 sigemptyset (&sigchld_action.sa_mask);
5464 sigchld_action.sa_flags = SA_RESTART;
5465 sigaction (SIGCHLD, &sigchld_action, NULL);
da6d8c04 5466}
This page took 1.021075 seconds and 4 git commands to generate.