714dac3a1192b2163c9d834427de35c0359e72ce
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #include <sys/uio.h>
43 #ifndef ELFMAG0
44 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48 #include <elf.h>
49 #endif
50
51 #ifndef SPUFS_MAGIC
52 #define SPUFS_MAGIC 0x23c9b64e
53 #endif
54
55 #ifndef PTRACE_GETSIGINFO
56 # define PTRACE_GETSIGINFO 0x4202
57 # define PTRACE_SETSIGINFO 0x4203
58 #endif
59
60 #ifndef O_LARGEFILE
61 #define O_LARGEFILE 0
62 #endif
63
64 /* If the system headers did not provide the constants, hard-code the normal
65 values. */
66 #ifndef PTRACE_EVENT_FORK
67
68 #define PTRACE_SETOPTIONS 0x4200
69 #define PTRACE_GETEVENTMSG 0x4201
70
71 /* options set using PTRACE_SETOPTIONS */
72 #define PTRACE_O_TRACESYSGOOD 0x00000001
73 #define PTRACE_O_TRACEFORK 0x00000002
74 #define PTRACE_O_TRACEVFORK 0x00000004
75 #define PTRACE_O_TRACECLONE 0x00000008
76 #define PTRACE_O_TRACEEXEC 0x00000010
77 #define PTRACE_O_TRACEVFORKDONE 0x00000020
78 #define PTRACE_O_TRACEEXIT 0x00000040
79
80 /* Wait extended result codes for the above trace options. */
81 #define PTRACE_EVENT_FORK 1
82 #define PTRACE_EVENT_VFORK 2
83 #define PTRACE_EVENT_CLONE 3
84 #define PTRACE_EVENT_EXEC 4
85 #define PTRACE_EVENT_VFORK_DONE 5
86 #define PTRACE_EVENT_EXIT 6
87
88 #endif /* PTRACE_EVENT_FORK */
89
90 /* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93 #ifndef __WALL
94 #define __WALL 0x40000000 /* Wait for any child. */
95 #endif
96
97 #ifndef W_STOPCODE
98 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99 #endif
100
101 /* This is the kernel's hard limit. Not to be confused with
102 SIGRTMIN. */
103 #ifndef __SIGRTMIN
104 #define __SIGRTMIN 32
105 #endif
106
107 #ifdef __UCLIBC__
108 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
109 #define HAS_NOMMU
110 #endif
111 #endif
112
113 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
114 representation of the thread ID.
115
116 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
117 the same as the LWP ID.
118
119 ``all_processes'' is keyed by the "overall process ID", which
120 GNU/Linux calls tgid, "thread group ID". */
121
122 struct inferior_list all_lwps;
123
124 /* A list of all unknown processes which receive stop signals. Some other
125 process will presumably claim each of these as forked children
126 momentarily. */
127
128 struct inferior_list stopped_pids;
129
130 /* FIXME this is a bit of a hack, and could be removed. */
131 int stopping_threads;
132
133 /* FIXME make into a target method? */
134 int using_threads = 1;
135
136 /* True if we're presently stabilizing threads (moving them out of
137 jump pads). */
138 static int stabilizing_threads;
139
140 /* This flag is true iff we've just created or attached to our first
141 inferior but it has not stopped yet. As soon as it does, we need
142 to call the low target's arch_setup callback. Doing this only on
143 the first inferior avoids reinializing the architecture on every
144 inferior, and avoids messing with the register caches of the
145 already running inferiors. NOTE: this assumes all inferiors under
146 control of gdbserver have the same architecture. */
147 static int new_inferior;
148
149 static void linux_resume_one_lwp (struct lwp_info *lwp,
150 int step, int signal, siginfo_t *info);
151 static void linux_resume (struct thread_resume *resume_info, size_t n);
152 static void stop_all_lwps (int suspend, struct lwp_info *except);
153 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
154 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
155 static void *add_lwp (ptid_t ptid);
156 static int linux_stopped_by_watchpoint (void);
157 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
158 static int linux_core_of_thread (ptid_t ptid);
159 static void proceed_all_lwps (void);
160 static int finish_step_over (struct lwp_info *lwp);
161 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
162 static int kill_lwp (unsigned long lwpid, int signo);
163 static void linux_enable_event_reporting (int pid);
164
165 /* True if the low target can hardware single-step. Such targets
166 don't need a BREAKPOINT_REINSERT_ADDR callback. */
167
168 static int
169 can_hardware_single_step (void)
170 {
171 return (the_low_target.breakpoint_reinsert_addr == NULL);
172 }
173
174 /* True if the low target supports memory breakpoints. If so, we'll
175 have a GET_PC implementation. */
176
177 static int
178 supports_breakpoints (void)
179 {
180 return (the_low_target.get_pc != NULL);
181 }
182
183 /* Returns true if this target can support fast tracepoints. This
184 does not mean that the in-process agent has been loaded in the
185 inferior. */
186
187 static int
188 supports_fast_tracepoints (void)
189 {
190 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
191 }
192
193 struct pending_signals
194 {
195 int signal;
196 siginfo_t info;
197 struct pending_signals *prev;
198 };
199
200 #define PTRACE_ARG3_TYPE void *
201 #define PTRACE_ARG4_TYPE void *
202 #define PTRACE_XFER_TYPE long
203
204 #ifdef HAVE_LINUX_REGSETS
205 static char *disabled_regsets;
206 static int num_regsets;
207 #endif
208
209 /* The read/write ends of the pipe registered as waitable file in the
210 event loop. */
211 static int linux_event_pipe[2] = { -1, -1 };
212
213 /* True if we're currently in async mode. */
214 #define target_is_async_p() (linux_event_pipe[0] != -1)
215
216 static void send_sigstop (struct lwp_info *lwp);
217 static void wait_for_sigstop (struct inferior_list_entry *entry);
218
219 /* Accepts an integer PID; Returns a string representing a file that
220 can be opened to get info for the child process.
221 Space for the result is malloc'd, caller must free. */
222
223 char *
224 linux_child_pid_to_exec_file (int pid)
225 {
226 char *name1, *name2;
227
228 name1 = xmalloc (MAXPATHLEN);
229 name2 = xmalloc (MAXPATHLEN);
230 memset (name2, 0, MAXPATHLEN);
231
232 sprintf (name1, "/proc/%d/exe", pid);
233 if (readlink (name1, name2, MAXPATHLEN) > 0)
234 {
235 free (name1);
236 return name2;
237 }
238 else
239 {
240 free (name2);
241 return name1;
242 }
243 }
244
245 /* Return non-zero if HEADER is a 64-bit ELF file. */
246
247 static int
248 elf_64_header_p (const Elf64_Ehdr *header)
249 {
250 return (header->e_ident[EI_MAG0] == ELFMAG0
251 && header->e_ident[EI_MAG1] == ELFMAG1
252 && header->e_ident[EI_MAG2] == ELFMAG2
253 && header->e_ident[EI_MAG3] == ELFMAG3
254 && header->e_ident[EI_CLASS] == ELFCLASS64);
255 }
256
257 /* Return non-zero if FILE is a 64-bit ELF file,
258 zero if the file is not a 64-bit ELF file,
259 and -1 if the file is not accessible or doesn't exist. */
260
261 int
262 elf_64_file_p (const char *file)
263 {
264 Elf64_Ehdr header;
265 int fd;
266
267 fd = open (file, O_RDONLY);
268 if (fd < 0)
269 return -1;
270
271 if (read (fd, &header, sizeof (header)) != sizeof (header))
272 {
273 close (fd);
274 return 0;
275 }
276 close (fd);
277
278 return elf_64_header_p (&header);
279 }
280
281 static void
282 delete_lwp (struct lwp_info *lwp)
283 {
284 remove_thread (get_lwp_thread (lwp));
285 remove_inferior (&all_lwps, &lwp->head);
286 free (lwp->arch_private);
287 free (lwp);
288 }
289
290 /* Add a process to the common process list, and set its private
291 data. */
292
293 static struct process_info *
294 linux_add_process (int pid, int attached)
295 {
296 struct process_info *proc;
297
298 /* Is this the first process? If so, then set the arch. */
299 if (all_processes.head == NULL)
300 new_inferior = 1;
301
302 proc = add_process (pid, attached);
303 proc->private = xcalloc (1, sizeof (*proc->private));
304
305 if (the_low_target.new_process != NULL)
306 proc->private->arch_private = the_low_target.new_process ();
307
308 return proc;
309 }
310
311 /* Wrapper function for waitpid which handles EINTR, and emulates
312 __WALL for systems where that is not available. */
313
314 static int
315 my_waitpid (int pid, int *status, int flags)
316 {
317 int ret, out_errno;
318
319 if (debug_threads)
320 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
321
322 if (flags & __WALL)
323 {
324 sigset_t block_mask, org_mask, wake_mask;
325 int wnohang;
326
327 wnohang = (flags & WNOHANG) != 0;
328 flags &= ~(__WALL | __WCLONE);
329 flags |= WNOHANG;
330
331 /* Block all signals while here. This avoids knowing about
332 LinuxThread's signals. */
333 sigfillset (&block_mask);
334 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
335
336 /* ... except during the sigsuspend below. */
337 sigemptyset (&wake_mask);
338
339 while (1)
340 {
341 /* Since all signals are blocked, there's no need to check
342 for EINTR here. */
343 ret = waitpid (pid, status, flags);
344 out_errno = errno;
345
346 if (ret == -1 && out_errno != ECHILD)
347 break;
348 else if (ret > 0)
349 break;
350
351 if (flags & __WCLONE)
352 {
353 /* We've tried both flavors now. If WNOHANG is set,
354 there's nothing else to do, just bail out. */
355 if (wnohang)
356 break;
357
358 if (debug_threads)
359 fprintf (stderr, "blocking\n");
360
361 /* Block waiting for signals. */
362 sigsuspend (&wake_mask);
363 }
364
365 flags ^= __WCLONE;
366 }
367
368 sigprocmask (SIG_SETMASK, &org_mask, NULL);
369 }
370 else
371 {
372 do
373 ret = waitpid (pid, status, flags);
374 while (ret == -1 && errno == EINTR);
375 out_errno = errno;
376 }
377
378 if (debug_threads)
379 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
380 pid, flags, status ? *status : -1, ret);
381
382 errno = out_errno;
383 return ret;
384 }
385
386 /* Handle a GNU/Linux extended wait response. If we see a clone
387 event, we need to add the new LWP to our list (and not report the
388 trap to higher layers). */
389
390 static void
391 handle_extended_wait (struct lwp_info *event_child, int wstat)
392 {
393 int event = wstat >> 16;
394 struct lwp_info *new_lwp;
395
396 if (event == PTRACE_EVENT_CLONE)
397 {
398 ptid_t ptid;
399 unsigned long new_pid;
400 int ret, status = W_STOPCODE (SIGSTOP);
401
402 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
403
404 /* If we haven't already seen the new PID stop, wait for it now. */
405 if (! pull_pid_from_list (&stopped_pids, new_pid))
406 {
407 /* The new child has a pending SIGSTOP. We can't affect it until it
408 hits the SIGSTOP, but we're already attached. */
409
410 ret = my_waitpid (new_pid, &status, __WALL);
411
412 if (ret == -1)
413 perror_with_name ("waiting for new child");
414 else if (ret != new_pid)
415 warning ("wait returned unexpected PID %d", ret);
416 else if (!WIFSTOPPED (status))
417 warning ("wait returned unexpected status 0x%x", status);
418 }
419
420 linux_enable_event_reporting (new_pid);
421
422 ptid = ptid_build (pid_of (event_child), new_pid, 0);
423 new_lwp = (struct lwp_info *) add_lwp (ptid);
424 add_thread (ptid, new_lwp);
425
426 /* Either we're going to immediately resume the new thread
427 or leave it stopped. linux_resume_one_lwp is a nop if it
428 thinks the thread is currently running, so set this first
429 before calling linux_resume_one_lwp. */
430 new_lwp->stopped = 1;
431
432 /* Normally we will get the pending SIGSTOP. But in some cases
433 we might get another signal delivered to the group first.
434 If we do get another signal, be sure not to lose it. */
435 if (WSTOPSIG (status) == SIGSTOP)
436 {
437 if (stopping_threads)
438 new_lwp->stop_pc = get_stop_pc (new_lwp);
439 else
440 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
441 }
442 else
443 {
444 new_lwp->stop_expected = 1;
445
446 if (stopping_threads)
447 {
448 new_lwp->stop_pc = get_stop_pc (new_lwp);
449 new_lwp->status_pending_p = 1;
450 new_lwp->status_pending = status;
451 }
452 else
453 /* Pass the signal on. This is what GDB does - except
454 shouldn't we really report it instead? */
455 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
456 }
457
458 /* Always resume the current thread. If we are stopping
459 threads, it will have a pending SIGSTOP; we may as well
460 collect it now. */
461 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
462 }
463 }
464
465 /* Return the PC as read from the regcache of LWP, without any
466 adjustment. */
467
468 static CORE_ADDR
469 get_pc (struct lwp_info *lwp)
470 {
471 struct thread_info *saved_inferior;
472 struct regcache *regcache;
473 CORE_ADDR pc;
474
475 if (the_low_target.get_pc == NULL)
476 return 0;
477
478 saved_inferior = current_inferior;
479 current_inferior = get_lwp_thread (lwp);
480
481 regcache = get_thread_regcache (current_inferior, 1);
482 pc = (*the_low_target.get_pc) (regcache);
483
484 if (debug_threads)
485 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
486
487 current_inferior = saved_inferior;
488 return pc;
489 }
490
491 /* This function should only be called if LWP got a SIGTRAP.
492 The SIGTRAP could mean several things.
493
494 On i386, where decr_pc_after_break is non-zero:
495 If we were single-stepping this process using PTRACE_SINGLESTEP,
496 we will get only the one SIGTRAP (even if the instruction we
497 stepped over was a breakpoint). The value of $eip will be the
498 next instruction.
499 If we continue the process using PTRACE_CONT, we will get a
500 SIGTRAP when we hit a breakpoint. The value of $eip will be
501 the instruction after the breakpoint (i.e. needs to be
502 decremented). If we report the SIGTRAP to GDB, we must also
503 report the undecremented PC. If we cancel the SIGTRAP, we
504 must resume at the decremented PC.
505
506 (Presumably, not yet tested) On a non-decr_pc_after_break machine
507 with hardware or kernel single-step:
508 If we single-step over a breakpoint instruction, our PC will
509 point at the following instruction. If we continue and hit a
510 breakpoint instruction, our PC will point at the breakpoint
511 instruction. */
512
513 static CORE_ADDR
514 get_stop_pc (struct lwp_info *lwp)
515 {
516 CORE_ADDR stop_pc;
517
518 if (the_low_target.get_pc == NULL)
519 return 0;
520
521 stop_pc = get_pc (lwp);
522
523 if (WSTOPSIG (lwp->last_status) == SIGTRAP
524 && !lwp->stepping
525 && !lwp->stopped_by_watchpoint
526 && lwp->last_status >> 16 == 0)
527 stop_pc -= the_low_target.decr_pc_after_break;
528
529 if (debug_threads)
530 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
531
532 return stop_pc;
533 }
534
535 static void *
536 add_lwp (ptid_t ptid)
537 {
538 struct lwp_info *lwp;
539
540 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
541 memset (lwp, 0, sizeof (*lwp));
542
543 lwp->head.id = ptid;
544
545 if (the_low_target.new_thread != NULL)
546 lwp->arch_private = the_low_target.new_thread ();
547
548 add_inferior_to_list (&all_lwps, &lwp->head);
549
550 return lwp;
551 }
552
553 /* Start an inferior process and returns its pid.
554 ALLARGS is a vector of program-name and args. */
555
556 static int
557 linux_create_inferior (char *program, char **allargs)
558 {
559 struct lwp_info *new_lwp;
560 int pid;
561 ptid_t ptid;
562
563 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
564 pid = vfork ();
565 #else
566 pid = fork ();
567 #endif
568 if (pid < 0)
569 perror_with_name ("fork");
570
571 if (pid == 0)
572 {
573 ptrace (PTRACE_TRACEME, 0, 0, 0);
574
575 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
576 signal (__SIGRTMIN + 1, SIG_DFL);
577 #endif
578
579 setpgid (0, 0);
580
581 execv (program, allargs);
582 if (errno == ENOENT)
583 execvp (program, allargs);
584
585 fprintf (stderr, "Cannot exec %s: %s.\n", program,
586 strerror (errno));
587 fflush (stderr);
588 _exit (0177);
589 }
590
591 linux_add_process (pid, 0);
592
593 ptid = ptid_build (pid, pid, 0);
594 new_lwp = add_lwp (ptid);
595 add_thread (ptid, new_lwp);
596 new_lwp->must_set_ptrace_flags = 1;
597
598 return pid;
599 }
600
601 /* Attach to an inferior process. */
602
603 static void
604 linux_attach_lwp_1 (unsigned long lwpid, int initial)
605 {
606 ptid_t ptid;
607 struct lwp_info *new_lwp;
608
609 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
610 {
611 if (!initial)
612 {
613 /* If we fail to attach to an LWP, just warn. */
614 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
615 strerror (errno), errno);
616 fflush (stderr);
617 return;
618 }
619 else
620 /* If we fail to attach to a process, report an error. */
621 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
622 strerror (errno), errno);
623 }
624
625 if (initial)
626 /* NOTE/FIXME: This lwp might have not been the tgid. */
627 ptid = ptid_build (lwpid, lwpid, 0);
628 else
629 {
630 /* Note that extracting the pid from the current inferior is
631 safe, since we're always called in the context of the same
632 process as this new thread. */
633 int pid = pid_of (get_thread_lwp (current_inferior));
634 ptid = ptid_build (pid, lwpid, 0);
635 }
636
637 new_lwp = (struct lwp_info *) add_lwp (ptid);
638 add_thread (ptid, new_lwp);
639
640 /* We need to wait for SIGSTOP before being able to make the next
641 ptrace call on this LWP. */
642 new_lwp->must_set_ptrace_flags = 1;
643
644 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
645 brings it to a halt.
646
647 There are several cases to consider here:
648
649 1) gdbserver has already attached to the process and is being notified
650 of a new thread that is being created.
651 In this case we should ignore that SIGSTOP and resume the
652 process. This is handled below by setting stop_expected = 1,
653 and the fact that add_thread sets last_resume_kind ==
654 resume_continue.
655
656 2) This is the first thread (the process thread), and we're attaching
657 to it via attach_inferior.
658 In this case we want the process thread to stop.
659 This is handled by having linux_attach set last_resume_kind ==
660 resume_stop after we return.
661 ??? If the process already has several threads we leave the other
662 threads running.
663
664 3) GDB is connecting to gdbserver and is requesting an enumeration of all
665 existing threads.
666 In this case we want the thread to stop.
667 FIXME: This case is currently not properly handled.
668 We should wait for the SIGSTOP but don't. Things work apparently
669 because enough time passes between when we ptrace (ATTACH) and when
670 gdb makes the next ptrace call on the thread.
671
672 On the other hand, if we are currently trying to stop all threads, we
673 should treat the new thread as if we had sent it a SIGSTOP. This works
674 because we are guaranteed that the add_lwp call above added us to the
675 end of the list, and so the new thread has not yet reached
676 wait_for_sigstop (but will). */
677 new_lwp->stop_expected = 1;
678 }
679
680 void
681 linux_attach_lwp (unsigned long lwpid)
682 {
683 linux_attach_lwp_1 (lwpid, 0);
684 }
685
686 int
687 linux_attach (unsigned long pid)
688 {
689 linux_attach_lwp_1 (pid, 1);
690 linux_add_process (pid, 1);
691
692 if (!non_stop)
693 {
694 struct thread_info *thread;
695
696 /* Don't ignore the initial SIGSTOP if we just attached to this
697 process. It will be collected by wait shortly. */
698 thread = find_thread_ptid (ptid_build (pid, pid, 0));
699 thread->last_resume_kind = resume_stop;
700 }
701
702 return 0;
703 }
704
705 struct counter
706 {
707 int pid;
708 int count;
709 };
710
711 static int
712 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
713 {
714 struct counter *counter = args;
715
716 if (ptid_get_pid (entry->id) == counter->pid)
717 {
718 if (++counter->count > 1)
719 return 1;
720 }
721
722 return 0;
723 }
724
725 static int
726 last_thread_of_process_p (struct thread_info *thread)
727 {
728 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
729 int pid = ptid_get_pid (ptid);
730 struct counter counter = { pid , 0 };
731
732 return (find_inferior (&all_threads,
733 second_thread_of_pid_p, &counter) == NULL);
734 }
735
736 /* Kill the inferior lwp. */
737
738 static int
739 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
740 {
741 struct thread_info *thread = (struct thread_info *) entry;
742 struct lwp_info *lwp = get_thread_lwp (thread);
743 int wstat;
744 int pid = * (int *) args;
745
746 if (ptid_get_pid (entry->id) != pid)
747 return 0;
748
749 /* We avoid killing the first thread here, because of a Linux kernel (at
750 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
751 the children get a chance to be reaped, it will remain a zombie
752 forever. */
753
754 if (lwpid_of (lwp) == pid)
755 {
756 if (debug_threads)
757 fprintf (stderr, "lkop: is last of process %s\n",
758 target_pid_to_str (entry->id));
759 return 0;
760 }
761
762 do
763 {
764 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
765
766 /* Make sure it died. The loop is most likely unnecessary. */
767 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
768 } while (pid > 0 && WIFSTOPPED (wstat));
769
770 return 0;
771 }
772
773 static int
774 linux_kill (int pid)
775 {
776 struct process_info *process;
777 struct lwp_info *lwp;
778 struct thread_info *thread;
779 int wstat;
780 int lwpid;
781
782 process = find_process_pid (pid);
783 if (process == NULL)
784 return -1;
785
786 /* If we're killing a running inferior, make sure it is stopped
787 first, as PTRACE_KILL will not work otherwise. */
788 stop_all_lwps (0, NULL);
789
790 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
791
792 /* See the comment in linux_kill_one_lwp. We did not kill the first
793 thread in the list, so do so now. */
794 lwp = find_lwp_pid (pid_to_ptid (pid));
795 thread = get_lwp_thread (lwp);
796
797 if (debug_threads)
798 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
799 lwpid_of (lwp), pid);
800
801 do
802 {
803 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
804
805 /* Make sure it died. The loop is most likely unnecessary. */
806 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
807 } while (lwpid > 0 && WIFSTOPPED (wstat));
808
809 the_target->mourn (process);
810
811 /* Since we presently can only stop all lwps of all processes, we
812 need to unstop lwps of other processes. */
813 unstop_all_lwps (0, NULL);
814 return 0;
815 }
816
817 static int
818 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
819 {
820 struct thread_info *thread = (struct thread_info *) entry;
821 struct lwp_info *lwp = get_thread_lwp (thread);
822 int pid = * (int *) args;
823
824 if (ptid_get_pid (entry->id) != pid)
825 return 0;
826
827 /* If this process is stopped but is expecting a SIGSTOP, then make
828 sure we take care of that now. This isn't absolutely guaranteed
829 to collect the SIGSTOP, but is fairly likely to. */
830 if (lwp->stop_expected)
831 {
832 int wstat;
833 /* Clear stop_expected, so that the SIGSTOP will be reported. */
834 lwp->stop_expected = 0;
835 linux_resume_one_lwp (lwp, 0, 0, NULL);
836 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
837 }
838
839 /* Flush any pending changes to the process's registers. */
840 regcache_invalidate_one ((struct inferior_list_entry *)
841 get_lwp_thread (lwp));
842
843 /* Finally, let it resume. */
844 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
845
846 delete_lwp (lwp);
847 return 0;
848 }
849
850 static int
851 linux_detach (int pid)
852 {
853 struct process_info *process;
854
855 process = find_process_pid (pid);
856 if (process == NULL)
857 return -1;
858
859 /* Stop all threads before detaching. First, ptrace requires that
860 the thread is stopped to sucessfully detach. Second, thread_db
861 may need to uninstall thread event breakpoints from memory, which
862 only works with a stopped process anyway. */
863 stop_all_lwps (0, NULL);
864
865 #ifdef USE_THREAD_DB
866 thread_db_detach (process);
867 #endif
868
869 /* Stabilize threads (move out of jump pads). */
870 stabilize_threads ();
871
872 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
873
874 the_target->mourn (process);
875
876 /* Since we presently can only stop all lwps of all processes, we
877 need to unstop lwps of other processes. */
878 unstop_all_lwps (0, NULL);
879 return 0;
880 }
881
882 /* Remove all LWPs that belong to process PROC from the lwp list. */
883
884 static int
885 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
886 {
887 struct lwp_info *lwp = (struct lwp_info *) entry;
888 struct process_info *process = proc;
889
890 if (pid_of (lwp) == pid_of (process))
891 delete_lwp (lwp);
892
893 return 0;
894 }
895
896 static void
897 linux_mourn (struct process_info *process)
898 {
899 struct process_info_private *priv;
900
901 #ifdef USE_THREAD_DB
902 thread_db_mourn (process);
903 #endif
904
905 find_inferior (&all_lwps, delete_lwp_callback, process);
906
907 /* Freeing all private data. */
908 priv = process->private;
909 free (priv->arch_private);
910 free (priv);
911 process->private = NULL;
912
913 remove_process (process);
914 }
915
916 static void
917 linux_join (int pid)
918 {
919 int status, ret;
920 struct process_info *process;
921
922 process = find_process_pid (pid);
923 if (process == NULL)
924 return;
925
926 do {
927 ret = my_waitpid (pid, &status, 0);
928 if (WIFEXITED (status) || WIFSIGNALED (status))
929 break;
930 } while (ret != -1 || errno != ECHILD);
931 }
932
933 /* Return nonzero if the given thread is still alive. */
934 static int
935 linux_thread_alive (ptid_t ptid)
936 {
937 struct lwp_info *lwp = find_lwp_pid (ptid);
938
939 /* We assume we always know if a thread exits. If a whole process
940 exited but we still haven't been able to report it to GDB, we'll
941 hold on to the last lwp of the dead process. */
942 if (lwp != NULL)
943 return !lwp->dead;
944 else
945 return 0;
946 }
947
948 /* Return 1 if this lwp has an interesting status pending. */
949 static int
950 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
951 {
952 struct lwp_info *lwp = (struct lwp_info *) entry;
953 ptid_t ptid = * (ptid_t *) arg;
954 struct thread_info *thread;
955
956 /* Check if we're only interested in events from a specific process
957 or its lwps. */
958 if (!ptid_equal (minus_one_ptid, ptid)
959 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
960 return 0;
961
962 thread = get_lwp_thread (lwp);
963
964 /* If we got a `vCont;t', but we haven't reported a stop yet, do
965 report any status pending the LWP may have. */
966 if (thread->last_resume_kind == resume_stop
967 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
968 return 0;
969
970 return lwp->status_pending_p;
971 }
972
973 static int
974 same_lwp (struct inferior_list_entry *entry, void *data)
975 {
976 ptid_t ptid = *(ptid_t *) data;
977 int lwp;
978
979 if (ptid_get_lwp (ptid) != 0)
980 lwp = ptid_get_lwp (ptid);
981 else
982 lwp = ptid_get_pid (ptid);
983
984 if (ptid_get_lwp (entry->id) == lwp)
985 return 1;
986
987 return 0;
988 }
989
990 struct lwp_info *
991 find_lwp_pid (ptid_t ptid)
992 {
993 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
994 }
995
996 static struct lwp_info *
997 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
998 {
999 int ret;
1000 int to_wait_for = -1;
1001 struct lwp_info *child = NULL;
1002
1003 if (debug_threads)
1004 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1005
1006 if (ptid_equal (ptid, minus_one_ptid))
1007 to_wait_for = -1; /* any child */
1008 else
1009 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1010
1011 options |= __WALL;
1012
1013 retry:
1014
1015 ret = my_waitpid (to_wait_for, wstatp, options);
1016 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1017 return NULL;
1018 else if (ret == -1)
1019 perror_with_name ("waitpid");
1020
1021 if (debug_threads
1022 && (!WIFSTOPPED (*wstatp)
1023 || (WSTOPSIG (*wstatp) != 32
1024 && WSTOPSIG (*wstatp) != 33)))
1025 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1026
1027 child = find_lwp_pid (pid_to_ptid (ret));
1028
1029 /* If we didn't find a process, one of two things presumably happened:
1030 - A process we started and then detached from has exited. Ignore it.
1031 - A process we are controlling has forked and the new child's stop
1032 was reported to us by the kernel. Save its PID. */
1033 if (child == NULL && WIFSTOPPED (*wstatp))
1034 {
1035 add_pid_to_list (&stopped_pids, ret);
1036 goto retry;
1037 }
1038 else if (child == NULL)
1039 goto retry;
1040
1041 child->stopped = 1;
1042
1043 child->last_status = *wstatp;
1044
1045 /* Architecture-specific setup after inferior is running.
1046 This needs to happen after we have attached to the inferior
1047 and it is stopped for the first time, but before we access
1048 any inferior registers. */
1049 if (new_inferior)
1050 {
1051 the_low_target.arch_setup ();
1052 #ifdef HAVE_LINUX_REGSETS
1053 memset (disabled_regsets, 0, num_regsets);
1054 #endif
1055 new_inferior = 0;
1056 }
1057
1058 /* Fetch the possibly triggered data watchpoint info and store it in
1059 CHILD.
1060
1061 On some archs, like x86, that use debug registers to set
1062 watchpoints, it's possible that the way to know which watched
1063 address trapped, is to check the register that is used to select
1064 which address to watch. Problem is, between setting the
1065 watchpoint and reading back which data address trapped, the user
1066 may change the set of watchpoints, and, as a consequence, GDB
1067 changes the debug registers in the inferior. To avoid reading
1068 back a stale stopped-data-address when that happens, we cache in
1069 LP the fact that a watchpoint trapped, and the corresponding data
1070 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1071 changes the debug registers meanwhile, we have the cached data we
1072 can rely on. */
1073
1074 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1075 {
1076 if (the_low_target.stopped_by_watchpoint == NULL)
1077 {
1078 child->stopped_by_watchpoint = 0;
1079 }
1080 else
1081 {
1082 struct thread_info *saved_inferior;
1083
1084 saved_inferior = current_inferior;
1085 current_inferior = get_lwp_thread (child);
1086
1087 child->stopped_by_watchpoint
1088 = the_low_target.stopped_by_watchpoint ();
1089
1090 if (child->stopped_by_watchpoint)
1091 {
1092 if (the_low_target.stopped_data_address != NULL)
1093 child->stopped_data_address
1094 = the_low_target.stopped_data_address ();
1095 else
1096 child->stopped_data_address = 0;
1097 }
1098
1099 current_inferior = saved_inferior;
1100 }
1101 }
1102
1103 /* Store the STOP_PC, with adjustment applied. This depends on the
1104 architecture being defined already (so that CHILD has a valid
1105 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1106 not). */
1107 if (WIFSTOPPED (*wstatp))
1108 child->stop_pc = get_stop_pc (child);
1109
1110 if (debug_threads
1111 && WIFSTOPPED (*wstatp)
1112 && the_low_target.get_pc != NULL)
1113 {
1114 struct thread_info *saved_inferior = current_inferior;
1115 struct regcache *regcache;
1116 CORE_ADDR pc;
1117
1118 current_inferior = get_lwp_thread (child);
1119 regcache = get_thread_regcache (current_inferior, 1);
1120 pc = (*the_low_target.get_pc) (regcache);
1121 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1122 current_inferior = saved_inferior;
1123 }
1124
1125 return child;
1126 }
1127
1128 /* This function should only be called if the LWP got a SIGTRAP.
1129
1130 Handle any tracepoint steps or hits. Return true if a tracepoint
1131 event was handled, 0 otherwise. */
1132
1133 static int
1134 handle_tracepoints (struct lwp_info *lwp)
1135 {
1136 struct thread_info *tinfo = get_lwp_thread (lwp);
1137 int tpoint_related_event = 0;
1138
1139 /* If this tracepoint hit causes a tracing stop, we'll immediately
1140 uninsert tracepoints. To do this, we temporarily pause all
1141 threads, unpatch away, and then unpause threads. We need to make
1142 sure the unpausing doesn't resume LWP too. */
1143 lwp->suspended++;
1144
1145 /* And we need to be sure that any all-threads-stopping doesn't try
1146 to move threads out of the jump pads, as it could deadlock the
1147 inferior (LWP could be in the jump pad, maybe even holding the
1148 lock.) */
1149
1150 /* Do any necessary step collect actions. */
1151 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1152
1153 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1154
1155 /* See if we just hit a tracepoint and do its main collect
1156 actions. */
1157 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1158
1159 lwp->suspended--;
1160
1161 gdb_assert (lwp->suspended == 0);
1162 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1163
1164 if (tpoint_related_event)
1165 {
1166 if (debug_threads)
1167 fprintf (stderr, "got a tracepoint event\n");
1168 return 1;
1169 }
1170
1171 return 0;
1172 }
1173
1174 /* Convenience wrapper. Returns true if LWP is presently collecting a
1175 fast tracepoint. */
1176
1177 static int
1178 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1179 struct fast_tpoint_collect_status *status)
1180 {
1181 CORE_ADDR thread_area;
1182
1183 if (the_low_target.get_thread_area == NULL)
1184 return 0;
1185
1186 /* Get the thread area address. This is used to recognize which
1187 thread is which when tracing with the in-process agent library.
1188 We don't read anything from the address, and treat it as opaque;
1189 it's the address itself that we assume is unique per-thread. */
1190 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1191 return 0;
1192
1193 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1194 }
1195
1196 /* The reason we resume in the caller, is because we want to be able
1197 to pass lwp->status_pending as WSTAT, and we need to clear
1198 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1199 refuses to resume. */
1200
1201 static int
1202 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1203 {
1204 struct thread_info *saved_inferior;
1205
1206 saved_inferior = current_inferior;
1207 current_inferior = get_lwp_thread (lwp);
1208
1209 if ((wstat == NULL
1210 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1211 && supports_fast_tracepoints ()
1212 && in_process_agent_loaded ())
1213 {
1214 struct fast_tpoint_collect_status status;
1215 int r;
1216
1217 if (debug_threads)
1218 fprintf (stderr, "\
1219 Checking whether LWP %ld needs to move out of the jump pad.\n",
1220 lwpid_of (lwp));
1221
1222 r = linux_fast_tracepoint_collecting (lwp, &status);
1223
1224 if (wstat == NULL
1225 || (WSTOPSIG (*wstat) != SIGILL
1226 && WSTOPSIG (*wstat) != SIGFPE
1227 && WSTOPSIG (*wstat) != SIGSEGV
1228 && WSTOPSIG (*wstat) != SIGBUS))
1229 {
1230 lwp->collecting_fast_tracepoint = r;
1231
1232 if (r != 0)
1233 {
1234 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1235 {
1236 /* Haven't executed the original instruction yet.
1237 Set breakpoint there, and wait till it's hit,
1238 then single-step until exiting the jump pad. */
1239 lwp->exit_jump_pad_bkpt
1240 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1241 }
1242
1243 if (debug_threads)
1244 fprintf (stderr, "\
1245 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1246 lwpid_of (lwp));
1247
1248 return 1;
1249 }
1250 }
1251 else
1252 {
1253 /* If we get a synchronous signal while collecting, *and*
1254 while executing the (relocated) original instruction,
1255 reset the PC to point at the tpoint address, before
1256 reporting to GDB. Otherwise, it's an IPA lib bug: just
1257 report the signal to GDB, and pray for the best. */
1258
1259 lwp->collecting_fast_tracepoint = 0;
1260
1261 if (r != 0
1262 && (status.adjusted_insn_addr <= lwp->stop_pc
1263 && lwp->stop_pc < status.adjusted_insn_addr_end))
1264 {
1265 siginfo_t info;
1266 struct regcache *regcache;
1267
1268 /* The si_addr on a few signals references the address
1269 of the faulting instruction. Adjust that as
1270 well. */
1271 if ((WSTOPSIG (*wstat) == SIGILL
1272 || WSTOPSIG (*wstat) == SIGFPE
1273 || WSTOPSIG (*wstat) == SIGBUS
1274 || WSTOPSIG (*wstat) == SIGSEGV)
1275 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1276 /* Final check just to make sure we don't clobber
1277 the siginfo of non-kernel-sent signals. */
1278 && (uintptr_t) info.si_addr == lwp->stop_pc)
1279 {
1280 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1281 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1282 }
1283
1284 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1285 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1286 lwp->stop_pc = status.tpoint_addr;
1287
1288 /* Cancel any fast tracepoint lock this thread was
1289 holding. */
1290 force_unlock_trace_buffer ();
1291 }
1292
1293 if (lwp->exit_jump_pad_bkpt != NULL)
1294 {
1295 if (debug_threads)
1296 fprintf (stderr,
1297 "Cancelling fast exit-jump-pad: removing bkpt. "
1298 "stopping all threads momentarily.\n");
1299
1300 stop_all_lwps (1, lwp);
1301 cancel_breakpoints ();
1302
1303 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1304 lwp->exit_jump_pad_bkpt = NULL;
1305
1306 unstop_all_lwps (1, lwp);
1307
1308 gdb_assert (lwp->suspended >= 0);
1309 }
1310 }
1311 }
1312
1313 if (debug_threads)
1314 fprintf (stderr, "\
1315 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1316 lwpid_of (lwp));
1317 return 0;
1318 }
1319
1320 /* Enqueue one signal in the "signals to report later when out of the
1321 jump pad" list. */
1322
1323 static void
1324 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1325 {
1326 struct pending_signals *p_sig;
1327
1328 if (debug_threads)
1329 fprintf (stderr, "\
1330 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1331
1332 if (debug_threads)
1333 {
1334 struct pending_signals *sig;
1335
1336 for (sig = lwp->pending_signals_to_report;
1337 sig != NULL;
1338 sig = sig->prev)
1339 fprintf (stderr,
1340 " Already queued %d\n",
1341 sig->signal);
1342
1343 fprintf (stderr, " (no more currently queued signals)\n");
1344 }
1345
1346 /* Don't enqueue non-RT signals if they are already in the deferred
1347 queue. (SIGSTOP being the easiest signal to see ending up here
1348 twice) */
1349 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1350 {
1351 struct pending_signals *sig;
1352
1353 for (sig = lwp->pending_signals_to_report;
1354 sig != NULL;
1355 sig = sig->prev)
1356 {
1357 if (sig->signal == WSTOPSIG (*wstat))
1358 {
1359 if (debug_threads)
1360 fprintf (stderr,
1361 "Not requeuing already queued non-RT signal %d"
1362 " for LWP %ld\n",
1363 sig->signal,
1364 lwpid_of (lwp));
1365 return;
1366 }
1367 }
1368 }
1369
1370 p_sig = xmalloc (sizeof (*p_sig));
1371 p_sig->prev = lwp->pending_signals_to_report;
1372 p_sig->signal = WSTOPSIG (*wstat);
1373 memset (&p_sig->info, 0, sizeof (siginfo_t));
1374 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1375
1376 lwp->pending_signals_to_report = p_sig;
1377 }
1378
1379 /* Dequeue one signal from the "signals to report later when out of
1380 the jump pad" list. */
1381
1382 static int
1383 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1384 {
1385 if (lwp->pending_signals_to_report != NULL)
1386 {
1387 struct pending_signals **p_sig;
1388
1389 p_sig = &lwp->pending_signals_to_report;
1390 while ((*p_sig)->prev != NULL)
1391 p_sig = &(*p_sig)->prev;
1392
1393 *wstat = W_STOPCODE ((*p_sig)->signal);
1394 if ((*p_sig)->info.si_signo != 0)
1395 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1396 free (*p_sig);
1397 *p_sig = NULL;
1398
1399 if (debug_threads)
1400 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1401 WSTOPSIG (*wstat), lwpid_of (lwp));
1402
1403 if (debug_threads)
1404 {
1405 struct pending_signals *sig;
1406
1407 for (sig = lwp->pending_signals_to_report;
1408 sig != NULL;
1409 sig = sig->prev)
1410 fprintf (stderr,
1411 " Still queued %d\n",
1412 sig->signal);
1413
1414 fprintf (stderr, " (no more queued signals)\n");
1415 }
1416
1417 return 1;
1418 }
1419
1420 return 0;
1421 }
1422
1423 /* Arrange for a breakpoint to be hit again later. We don't keep the
1424 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1425 will handle the current event, eventually we will resume this LWP,
1426 and this breakpoint will trap again. */
1427
1428 static int
1429 cancel_breakpoint (struct lwp_info *lwp)
1430 {
1431 struct thread_info *saved_inferior;
1432
1433 /* There's nothing to do if we don't support breakpoints. */
1434 if (!supports_breakpoints ())
1435 return 0;
1436
1437 /* breakpoint_at reads from current inferior. */
1438 saved_inferior = current_inferior;
1439 current_inferior = get_lwp_thread (lwp);
1440
1441 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1442 {
1443 if (debug_threads)
1444 fprintf (stderr,
1445 "CB: Push back breakpoint for %s\n",
1446 target_pid_to_str (ptid_of (lwp)));
1447
1448 /* Back up the PC if necessary. */
1449 if (the_low_target.decr_pc_after_break)
1450 {
1451 struct regcache *regcache
1452 = get_thread_regcache (current_inferior, 1);
1453 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1454 }
1455
1456 current_inferior = saved_inferior;
1457 return 1;
1458 }
1459 else
1460 {
1461 if (debug_threads)
1462 fprintf (stderr,
1463 "CB: No breakpoint found at %s for [%s]\n",
1464 paddress (lwp->stop_pc),
1465 target_pid_to_str (ptid_of (lwp)));
1466 }
1467
1468 current_inferior = saved_inferior;
1469 return 0;
1470 }
1471
1472 /* When the event-loop is doing a step-over, this points at the thread
1473 being stepped. */
1474 ptid_t step_over_bkpt;
1475
1476 /* Wait for an event from child PID. If PID is -1, wait for any
1477 child. Store the stop status through the status pointer WSTAT.
1478 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1479 event was found and OPTIONS contains WNOHANG. Return the PID of
1480 the stopped child otherwise. */
1481
1482 static int
1483 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1484 {
1485 struct lwp_info *event_child, *requested_child;
1486
1487 event_child = NULL;
1488 requested_child = NULL;
1489
1490 /* Check for a lwp with a pending status. */
1491
1492 if (ptid_equal (ptid, minus_one_ptid)
1493 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1494 {
1495 event_child = (struct lwp_info *)
1496 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1497 if (debug_threads && event_child)
1498 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1499 }
1500 else
1501 {
1502 requested_child = find_lwp_pid (ptid);
1503
1504 if (!stopping_threads
1505 && requested_child->status_pending_p
1506 && requested_child->collecting_fast_tracepoint)
1507 {
1508 enqueue_one_deferred_signal (requested_child,
1509 &requested_child->status_pending);
1510 requested_child->status_pending_p = 0;
1511 requested_child->status_pending = 0;
1512 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1513 }
1514
1515 if (requested_child->suspended
1516 && requested_child->status_pending_p)
1517 fatal ("requesting an event out of a suspended child?");
1518
1519 if (requested_child->status_pending_p)
1520 event_child = requested_child;
1521 }
1522
1523 if (event_child != NULL)
1524 {
1525 if (debug_threads)
1526 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1527 lwpid_of (event_child), event_child->status_pending);
1528 *wstat = event_child->status_pending;
1529 event_child->status_pending_p = 0;
1530 event_child->status_pending = 0;
1531 current_inferior = get_lwp_thread (event_child);
1532 return lwpid_of (event_child);
1533 }
1534
1535 /* We only enter this loop if no process has a pending wait status. Thus
1536 any action taken in response to a wait status inside this loop is
1537 responding as soon as we detect the status, not after any pending
1538 events. */
1539 while (1)
1540 {
1541 event_child = linux_wait_for_lwp (ptid, wstat, options);
1542
1543 if ((options & WNOHANG) && event_child == NULL)
1544 {
1545 if (debug_threads)
1546 fprintf (stderr, "WNOHANG set, no event found\n");
1547 return 0;
1548 }
1549
1550 if (event_child == NULL)
1551 error ("event from unknown child");
1552
1553 current_inferior = get_lwp_thread (event_child);
1554
1555 /* Check for thread exit. */
1556 if (! WIFSTOPPED (*wstat))
1557 {
1558 if (debug_threads)
1559 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1560
1561 /* If the last thread is exiting, just return. */
1562 if (last_thread_of_process_p (current_inferior))
1563 {
1564 if (debug_threads)
1565 fprintf (stderr, "LWP %ld is last lwp of process\n",
1566 lwpid_of (event_child));
1567 return lwpid_of (event_child);
1568 }
1569
1570 if (!non_stop)
1571 {
1572 current_inferior = (struct thread_info *) all_threads.head;
1573 if (debug_threads)
1574 fprintf (stderr, "Current inferior is now %ld\n",
1575 lwpid_of (get_thread_lwp (current_inferior)));
1576 }
1577 else
1578 {
1579 current_inferior = NULL;
1580 if (debug_threads)
1581 fprintf (stderr, "Current inferior is now <NULL>\n");
1582 }
1583
1584 /* If we were waiting for this particular child to do something...
1585 well, it did something. */
1586 if (requested_child != NULL)
1587 {
1588 int lwpid = lwpid_of (event_child);
1589
1590 /* Cancel the step-over operation --- the thread that
1591 started it is gone. */
1592 if (finish_step_over (event_child))
1593 unstop_all_lwps (1, event_child);
1594 delete_lwp (event_child);
1595 return lwpid;
1596 }
1597
1598 delete_lwp (event_child);
1599
1600 /* Wait for a more interesting event. */
1601 continue;
1602 }
1603
1604 if (event_child->must_set_ptrace_flags)
1605 {
1606 linux_enable_event_reporting (lwpid_of (event_child));
1607 event_child->must_set_ptrace_flags = 0;
1608 }
1609
1610 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1611 && *wstat >> 16 != 0)
1612 {
1613 handle_extended_wait (event_child, *wstat);
1614 continue;
1615 }
1616
1617 if (WIFSTOPPED (*wstat)
1618 && WSTOPSIG (*wstat) == SIGSTOP
1619 && event_child->stop_expected)
1620 {
1621 int should_stop;
1622
1623 if (debug_threads)
1624 fprintf (stderr, "Expected stop.\n");
1625 event_child->stop_expected = 0;
1626
1627 should_stop = (current_inferior->last_resume_kind == resume_stop
1628 || stopping_threads);
1629
1630 if (!should_stop)
1631 {
1632 linux_resume_one_lwp (event_child,
1633 event_child->stepping, 0, NULL);
1634 continue;
1635 }
1636 }
1637
1638 return lwpid_of (event_child);
1639 }
1640
1641 /* NOTREACHED */
1642 return 0;
1643 }
1644
1645 static int
1646 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1647 {
1648 ptid_t wait_ptid;
1649
1650 if (ptid_is_pid (ptid))
1651 {
1652 /* A request to wait for a specific tgid. This is not possible
1653 with waitpid, so instead, we wait for any child, and leave
1654 children we're not interested in right now with a pending
1655 status to report later. */
1656 wait_ptid = minus_one_ptid;
1657 }
1658 else
1659 wait_ptid = ptid;
1660
1661 while (1)
1662 {
1663 int event_pid;
1664
1665 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1666
1667 if (event_pid > 0
1668 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1669 {
1670 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1671
1672 if (! WIFSTOPPED (*wstat))
1673 mark_lwp_dead (event_child, *wstat);
1674 else
1675 {
1676 event_child->status_pending_p = 1;
1677 event_child->status_pending = *wstat;
1678 }
1679 }
1680 else
1681 return event_pid;
1682 }
1683 }
1684
1685
1686 /* Count the LWP's that have had events. */
1687
1688 static int
1689 count_events_callback (struct inferior_list_entry *entry, void *data)
1690 {
1691 struct lwp_info *lp = (struct lwp_info *) entry;
1692 struct thread_info *thread = get_lwp_thread (lp);
1693 int *count = data;
1694
1695 gdb_assert (count != NULL);
1696
1697 /* Count only resumed LWPs that have a SIGTRAP event pending that
1698 should be reported to GDB. */
1699 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1700 && thread->last_resume_kind != resume_stop
1701 && lp->status_pending_p
1702 && WIFSTOPPED (lp->status_pending)
1703 && WSTOPSIG (lp->status_pending) == SIGTRAP
1704 && !breakpoint_inserted_here (lp->stop_pc))
1705 (*count)++;
1706
1707 return 0;
1708 }
1709
1710 /* Select the LWP (if any) that is currently being single-stepped. */
1711
1712 static int
1713 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1714 {
1715 struct lwp_info *lp = (struct lwp_info *) entry;
1716 struct thread_info *thread = get_lwp_thread (lp);
1717
1718 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1719 && thread->last_resume_kind == resume_step
1720 && lp->status_pending_p)
1721 return 1;
1722 else
1723 return 0;
1724 }
1725
1726 /* Select the Nth LWP that has had a SIGTRAP event that should be
1727 reported to GDB. */
1728
1729 static int
1730 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1731 {
1732 struct lwp_info *lp = (struct lwp_info *) entry;
1733 struct thread_info *thread = get_lwp_thread (lp);
1734 int *selector = data;
1735
1736 gdb_assert (selector != NULL);
1737
1738 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1739 if (thread->last_resume_kind != resume_stop
1740 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1741 && lp->status_pending_p
1742 && WIFSTOPPED (lp->status_pending)
1743 && WSTOPSIG (lp->status_pending) == SIGTRAP
1744 && !breakpoint_inserted_here (lp->stop_pc))
1745 if ((*selector)-- == 0)
1746 return 1;
1747
1748 return 0;
1749 }
1750
1751 static int
1752 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1753 {
1754 struct lwp_info *lp = (struct lwp_info *) entry;
1755 struct thread_info *thread = get_lwp_thread (lp);
1756 struct lwp_info *event_lp = data;
1757
1758 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1759 if (lp == event_lp)
1760 return 0;
1761
1762 /* If a LWP other than the LWP that we're reporting an event for has
1763 hit a GDB breakpoint (as opposed to some random trap signal),
1764 then just arrange for it to hit it again later. We don't keep
1765 the SIGTRAP status and don't forward the SIGTRAP signal to the
1766 LWP. We will handle the current event, eventually we will resume
1767 all LWPs, and this one will get its breakpoint trap again.
1768
1769 If we do not do this, then we run the risk that the user will
1770 delete or disable the breakpoint, but the LWP will have already
1771 tripped on it. */
1772
1773 if (thread->last_resume_kind != resume_stop
1774 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1775 && lp->status_pending_p
1776 && WIFSTOPPED (lp->status_pending)
1777 && WSTOPSIG (lp->status_pending) == SIGTRAP
1778 && !lp->stepping
1779 && !lp->stopped_by_watchpoint
1780 && cancel_breakpoint (lp))
1781 /* Throw away the SIGTRAP. */
1782 lp->status_pending_p = 0;
1783
1784 return 0;
1785 }
1786
1787 static void
1788 linux_cancel_breakpoints (void)
1789 {
1790 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1791 }
1792
1793 /* Select one LWP out of those that have events pending. */
1794
1795 static void
1796 select_event_lwp (struct lwp_info **orig_lp)
1797 {
1798 int num_events = 0;
1799 int random_selector;
1800 struct lwp_info *event_lp;
1801
1802 /* Give preference to any LWP that is being single-stepped. */
1803 event_lp
1804 = (struct lwp_info *) find_inferior (&all_lwps,
1805 select_singlestep_lwp_callback, NULL);
1806 if (event_lp != NULL)
1807 {
1808 if (debug_threads)
1809 fprintf (stderr,
1810 "SEL: Select single-step %s\n",
1811 target_pid_to_str (ptid_of (event_lp)));
1812 }
1813 else
1814 {
1815 /* No single-stepping LWP. Select one at random, out of those
1816 which have had SIGTRAP events. */
1817
1818 /* First see how many SIGTRAP events we have. */
1819 find_inferior (&all_lwps, count_events_callback, &num_events);
1820
1821 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1822 random_selector = (int)
1823 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1824
1825 if (debug_threads && num_events > 1)
1826 fprintf (stderr,
1827 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1828 num_events, random_selector);
1829
1830 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1831 select_event_lwp_callback,
1832 &random_selector);
1833 }
1834
1835 if (event_lp != NULL)
1836 {
1837 /* Switch the event LWP. */
1838 *orig_lp = event_lp;
1839 }
1840 }
1841
1842 /* Decrement the suspend count of an LWP. */
1843
1844 static int
1845 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1846 {
1847 struct lwp_info *lwp = (struct lwp_info *) entry;
1848
1849 /* Ignore EXCEPT. */
1850 if (lwp == except)
1851 return 0;
1852
1853 lwp->suspended--;
1854
1855 gdb_assert (lwp->suspended >= 0);
1856 return 0;
1857 }
1858
1859 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
1860 NULL. */
1861
1862 static void
1863 unsuspend_all_lwps (struct lwp_info *except)
1864 {
1865 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1866 }
1867
1868 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1869 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1870 void *data);
1871 static int lwp_running (struct inferior_list_entry *entry, void *data);
1872 static ptid_t linux_wait_1 (ptid_t ptid,
1873 struct target_waitstatus *ourstatus,
1874 int target_options);
1875
1876 /* Stabilize threads (move out of jump pads).
1877
1878 If a thread is midway collecting a fast tracepoint, we need to
1879 finish the collection and move it out of the jump pad before
1880 reporting the signal.
1881
1882 This avoids recursion while collecting (when a signal arrives
1883 midway, and the signal handler itself collects), which would trash
1884 the trace buffer. In case the user set a breakpoint in a signal
1885 handler, this avoids the backtrace showing the jump pad, etc..
1886 Most importantly, there are certain things we can't do safely if
1887 threads are stopped in a jump pad (or in its callee's). For
1888 example:
1889
1890 - starting a new trace run. A thread still collecting the
1891 previous run, could trash the trace buffer when resumed. The trace
1892 buffer control structures would have been reset but the thread had
1893 no way to tell. The thread could even midway memcpy'ing to the
1894 buffer, which would mean that when resumed, it would clobber the
1895 trace buffer that had been set for a new run.
1896
1897 - we can't rewrite/reuse the jump pads for new tracepoints
1898 safely. Say you do tstart while a thread is stopped midway while
1899 collecting. When the thread is later resumed, it finishes the
1900 collection, and returns to the jump pad, to execute the original
1901 instruction that was under the tracepoint jump at the time the
1902 older run had been started. If the jump pad had been rewritten
1903 since for something else in the new run, the thread would now
1904 execute the wrong / random instructions. */
1905
1906 static void
1907 linux_stabilize_threads (void)
1908 {
1909 struct thread_info *save_inferior;
1910 struct lwp_info *lwp_stuck;
1911
1912 lwp_stuck
1913 = (struct lwp_info *) find_inferior (&all_lwps,
1914 stuck_in_jump_pad_callback, NULL);
1915 if (lwp_stuck != NULL)
1916 {
1917 if (debug_threads)
1918 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
1919 lwpid_of (lwp_stuck));
1920 return;
1921 }
1922
1923 save_inferior = current_inferior;
1924
1925 stabilizing_threads = 1;
1926
1927 /* Kick 'em all. */
1928 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
1929
1930 /* Loop until all are stopped out of the jump pads. */
1931 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
1932 {
1933 struct target_waitstatus ourstatus;
1934 struct lwp_info *lwp;
1935 ptid_t ptid;
1936 int wstat;
1937
1938 /* Note that we go through the full wait even loop. While
1939 moving threads out of jump pad, we need to be able to step
1940 over internal breakpoints and such. */
1941 ptid = linux_wait_1 (minus_one_ptid, &ourstatus, 0);
1942
1943 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
1944 {
1945 lwp = get_thread_lwp (current_inferior);
1946
1947 /* Lock it. */
1948 lwp->suspended++;
1949
1950 if (ourstatus.value.sig != TARGET_SIGNAL_0
1951 || current_inferior->last_resume_kind == resume_stop)
1952 {
1953 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
1954 enqueue_one_deferred_signal (lwp, &wstat);
1955 }
1956 }
1957 }
1958
1959 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
1960
1961 stabilizing_threads = 0;
1962
1963 current_inferior = save_inferior;
1964
1965 if (debug_threads)
1966 {
1967 lwp_stuck
1968 = (struct lwp_info *) find_inferior (&all_lwps,
1969 stuck_in_jump_pad_callback, NULL);
1970 if (lwp_stuck != NULL)
1971 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
1972 lwpid_of (lwp_stuck));
1973 }
1974 }
1975
1976 /* Wait for process, returns status. */
1977
1978 static ptid_t
1979 linux_wait_1 (ptid_t ptid,
1980 struct target_waitstatus *ourstatus, int target_options)
1981 {
1982 int w;
1983 struct lwp_info *event_child;
1984 int options;
1985 int pid;
1986 int step_over_finished;
1987 int bp_explains_trap;
1988 int maybe_internal_trap;
1989 int report_to_gdb;
1990 int trace_event;
1991
1992 /* Translate generic target options into linux options. */
1993 options = __WALL;
1994 if (target_options & TARGET_WNOHANG)
1995 options |= WNOHANG;
1996
1997 retry:
1998 bp_explains_trap = 0;
1999 trace_event = 0;
2000 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2001
2002 /* If we were only supposed to resume one thread, only wait for
2003 that thread - if it's still alive. If it died, however - which
2004 can happen if we're coming from the thread death case below -
2005 then we need to make sure we restart the other threads. We could
2006 pick a thread at random or restart all; restarting all is less
2007 arbitrary. */
2008 if (!non_stop
2009 && !ptid_equal (cont_thread, null_ptid)
2010 && !ptid_equal (cont_thread, minus_one_ptid))
2011 {
2012 struct thread_info *thread;
2013
2014 thread = (struct thread_info *) find_inferior_id (&all_threads,
2015 cont_thread);
2016
2017 /* No stepping, no signal - unless one is pending already, of course. */
2018 if (thread == NULL)
2019 {
2020 struct thread_resume resume_info;
2021 resume_info.thread = minus_one_ptid;
2022 resume_info.kind = resume_continue;
2023 resume_info.sig = 0;
2024 linux_resume (&resume_info, 1);
2025 }
2026 else
2027 ptid = cont_thread;
2028 }
2029
2030 if (ptid_equal (step_over_bkpt, null_ptid))
2031 pid = linux_wait_for_event (ptid, &w, options);
2032 else
2033 {
2034 if (debug_threads)
2035 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2036 target_pid_to_str (step_over_bkpt));
2037 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2038 }
2039
2040 if (pid == 0) /* only if TARGET_WNOHANG */
2041 return null_ptid;
2042
2043 event_child = get_thread_lwp (current_inferior);
2044
2045 /* If we are waiting for a particular child, and it exited,
2046 linux_wait_for_event will return its exit status. Similarly if
2047 the last child exited. If this is not the last child, however,
2048 do not report it as exited until there is a 'thread exited' response
2049 available in the remote protocol. Instead, just wait for another event.
2050 This should be safe, because if the thread crashed we will already
2051 have reported the termination signal to GDB; that should stop any
2052 in-progress stepping operations, etc.
2053
2054 Report the exit status of the last thread to exit. This matches
2055 LinuxThreads' behavior. */
2056
2057 if (last_thread_of_process_p (current_inferior))
2058 {
2059 if (WIFEXITED (w) || WIFSIGNALED (w))
2060 {
2061 if (WIFEXITED (w))
2062 {
2063 ourstatus->kind = TARGET_WAITKIND_EXITED;
2064 ourstatus->value.integer = WEXITSTATUS (w);
2065
2066 if (debug_threads)
2067 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
2068 }
2069 else
2070 {
2071 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2072 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2073
2074 if (debug_threads)
2075 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
2076
2077 }
2078
2079 return ptid_of (event_child);
2080 }
2081 }
2082 else
2083 {
2084 if (!WIFSTOPPED (w))
2085 goto retry;
2086 }
2087
2088 /* If this event was not handled before, and is not a SIGTRAP, we
2089 report it. SIGILL and SIGSEGV are also treated as traps in case
2090 a breakpoint is inserted at the current PC. If this target does
2091 not support internal breakpoints at all, we also report the
2092 SIGTRAP without further processing; it's of no concern to us. */
2093 maybe_internal_trap
2094 = (supports_breakpoints ()
2095 && (WSTOPSIG (w) == SIGTRAP
2096 || ((WSTOPSIG (w) == SIGILL
2097 || WSTOPSIG (w) == SIGSEGV)
2098 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2099
2100 if (maybe_internal_trap)
2101 {
2102 /* Handle anything that requires bookkeeping before deciding to
2103 report the event or continue waiting. */
2104
2105 /* First check if we can explain the SIGTRAP with an internal
2106 breakpoint, or if we should possibly report the event to GDB.
2107 Do this before anything that may remove or insert a
2108 breakpoint. */
2109 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2110
2111 /* We have a SIGTRAP, possibly a step-over dance has just
2112 finished. If so, tweak the state machine accordingly,
2113 reinsert breakpoints and delete any reinsert (software
2114 single-step) breakpoints. */
2115 step_over_finished = finish_step_over (event_child);
2116
2117 /* Now invoke the callbacks of any internal breakpoints there. */
2118 check_breakpoints (event_child->stop_pc);
2119
2120 /* Handle tracepoint data collecting. This may overflow the
2121 trace buffer, and cause a tracing stop, removing
2122 breakpoints. */
2123 trace_event = handle_tracepoints (event_child);
2124
2125 if (bp_explains_trap)
2126 {
2127 /* If we stepped or ran into an internal breakpoint, we've
2128 already handled it. So next time we resume (from this
2129 PC), we should step over it. */
2130 if (debug_threads)
2131 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2132
2133 if (breakpoint_here (event_child->stop_pc))
2134 event_child->need_step_over = 1;
2135 }
2136 }
2137 else
2138 {
2139 /* We have some other signal, possibly a step-over dance was in
2140 progress, and it should be cancelled too. */
2141 step_over_finished = finish_step_over (event_child);
2142 }
2143
2144 /* We have all the data we need. Either report the event to GDB, or
2145 resume threads and keep waiting for more. */
2146
2147 /* If we're collecting a fast tracepoint, finish the collection and
2148 move out of the jump pad before delivering a signal. See
2149 linux_stabilize_threads. */
2150
2151 if (WIFSTOPPED (w)
2152 && WSTOPSIG (w) != SIGTRAP
2153 && supports_fast_tracepoints ()
2154 && in_process_agent_loaded ())
2155 {
2156 if (debug_threads)
2157 fprintf (stderr,
2158 "Got signal %d for LWP %ld. Check if we need "
2159 "to defer or adjust it.\n",
2160 WSTOPSIG (w), lwpid_of (event_child));
2161
2162 /* Allow debugging the jump pad itself. */
2163 if (current_inferior->last_resume_kind != resume_step
2164 && maybe_move_out_of_jump_pad (event_child, &w))
2165 {
2166 enqueue_one_deferred_signal (event_child, &w);
2167
2168 if (debug_threads)
2169 fprintf (stderr,
2170 "Signal %d for LWP %ld deferred (in jump pad)\n",
2171 WSTOPSIG (w), lwpid_of (event_child));
2172
2173 linux_resume_one_lwp (event_child, 0, 0, NULL);
2174 goto retry;
2175 }
2176 }
2177
2178 if (event_child->collecting_fast_tracepoint)
2179 {
2180 if (debug_threads)
2181 fprintf (stderr, "\
2182 LWP %ld was trying to move out of the jump pad (%d). \
2183 Check if we're already there.\n",
2184 lwpid_of (event_child),
2185 event_child->collecting_fast_tracepoint);
2186
2187 trace_event = 1;
2188
2189 event_child->collecting_fast_tracepoint
2190 = linux_fast_tracepoint_collecting (event_child, NULL);
2191
2192 if (event_child->collecting_fast_tracepoint != 1)
2193 {
2194 /* No longer need this breakpoint. */
2195 if (event_child->exit_jump_pad_bkpt != NULL)
2196 {
2197 if (debug_threads)
2198 fprintf (stderr,
2199 "No longer need exit-jump-pad bkpt; removing it."
2200 "stopping all threads momentarily.\n");
2201
2202 /* Other running threads could hit this breakpoint.
2203 We don't handle moribund locations like GDB does,
2204 instead we always pause all threads when removing
2205 breakpoints, so that any step-over or
2206 decr_pc_after_break adjustment is always taken
2207 care of while the breakpoint is still
2208 inserted. */
2209 stop_all_lwps (1, event_child);
2210 cancel_breakpoints ();
2211
2212 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2213 event_child->exit_jump_pad_bkpt = NULL;
2214
2215 unstop_all_lwps (1, event_child);
2216
2217 gdb_assert (event_child->suspended >= 0);
2218 }
2219 }
2220
2221 if (event_child->collecting_fast_tracepoint == 0)
2222 {
2223 if (debug_threads)
2224 fprintf (stderr,
2225 "fast tracepoint finished "
2226 "collecting successfully.\n");
2227
2228 /* We may have a deferred signal to report. */
2229 if (dequeue_one_deferred_signal (event_child, &w))
2230 {
2231 if (debug_threads)
2232 fprintf (stderr, "dequeued one signal.\n");
2233 }
2234 else
2235 {
2236 if (debug_threads)
2237 fprintf (stderr, "no deferred signals.\n");
2238
2239 if (stabilizing_threads)
2240 {
2241 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2242 ourstatus->value.sig = TARGET_SIGNAL_0;
2243 return ptid_of (event_child);
2244 }
2245 }
2246 }
2247 }
2248
2249 /* Check whether GDB would be interested in this event. */
2250
2251 /* If GDB is not interested in this signal, don't stop other
2252 threads, and don't report it to GDB. Just resume the inferior
2253 right away. We do this for threading-related signals as well as
2254 any that GDB specifically requested we ignore. But never ignore
2255 SIGSTOP if we sent it ourselves, and do not ignore signals when
2256 stepping - they may require special handling to skip the signal
2257 handler. */
2258 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2259 thread library? */
2260 if (WIFSTOPPED (w)
2261 && current_inferior->last_resume_kind != resume_step
2262 && (
2263 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2264 (current_process ()->private->thread_db != NULL
2265 && (WSTOPSIG (w) == __SIGRTMIN
2266 || WSTOPSIG (w) == __SIGRTMIN + 1))
2267 ||
2268 #endif
2269 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2270 && !(WSTOPSIG (w) == SIGSTOP
2271 && current_inferior->last_resume_kind == resume_stop))))
2272 {
2273 siginfo_t info, *info_p;
2274
2275 if (debug_threads)
2276 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2277 WSTOPSIG (w), lwpid_of (event_child));
2278
2279 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2280 info_p = &info;
2281 else
2282 info_p = NULL;
2283 linux_resume_one_lwp (event_child, event_child->stepping,
2284 WSTOPSIG (w), info_p);
2285 goto retry;
2286 }
2287
2288 /* If GDB wanted this thread to single step, we always want to
2289 report the SIGTRAP, and let GDB handle it. Watchpoints should
2290 always be reported. So should signals we can't explain. A
2291 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2292 not support Z0 breakpoints. If we do, we're be able to handle
2293 GDB breakpoints on top of internal breakpoints, by handling the
2294 internal breakpoint and still reporting the event to GDB. If we
2295 don't, we're out of luck, GDB won't see the breakpoint hit. */
2296 report_to_gdb = (!maybe_internal_trap
2297 || current_inferior->last_resume_kind == resume_step
2298 || event_child->stopped_by_watchpoint
2299 || (!step_over_finished && !bp_explains_trap && !trace_event)
2300 || gdb_breakpoint_here (event_child->stop_pc));
2301
2302 /* We found no reason GDB would want us to stop. We either hit one
2303 of our own breakpoints, or finished an internal step GDB
2304 shouldn't know about. */
2305 if (!report_to_gdb)
2306 {
2307 if (debug_threads)
2308 {
2309 if (bp_explains_trap)
2310 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2311 if (step_over_finished)
2312 fprintf (stderr, "Step-over finished.\n");
2313 if (trace_event)
2314 fprintf (stderr, "Tracepoint event.\n");
2315 }
2316
2317 /* We're not reporting this breakpoint to GDB, so apply the
2318 decr_pc_after_break adjustment to the inferior's regcache
2319 ourselves. */
2320
2321 if (the_low_target.set_pc != NULL)
2322 {
2323 struct regcache *regcache
2324 = get_thread_regcache (get_lwp_thread (event_child), 1);
2325 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2326 }
2327
2328 /* We may have finished stepping over a breakpoint. If so,
2329 we've stopped and suspended all LWPs momentarily except the
2330 stepping one. This is where we resume them all again. We're
2331 going to keep waiting, so use proceed, which handles stepping
2332 over the next breakpoint. */
2333 if (debug_threads)
2334 fprintf (stderr, "proceeding all threads.\n");
2335
2336 if (step_over_finished)
2337 unsuspend_all_lwps (event_child);
2338
2339 proceed_all_lwps ();
2340 goto retry;
2341 }
2342
2343 if (debug_threads)
2344 {
2345 if (current_inferior->last_resume_kind == resume_step)
2346 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2347 if (event_child->stopped_by_watchpoint)
2348 fprintf (stderr, "Stopped by watchpoint.\n");
2349 if (gdb_breakpoint_here (event_child->stop_pc))
2350 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2351 if (debug_threads)
2352 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2353 }
2354
2355 /* Alright, we're going to report a stop. */
2356
2357 if (!non_stop && !stabilizing_threads)
2358 {
2359 /* In all-stop, stop all threads. */
2360 stop_all_lwps (0, NULL);
2361
2362 /* If we're not waiting for a specific LWP, choose an event LWP
2363 from among those that have had events. Giving equal priority
2364 to all LWPs that have had events helps prevent
2365 starvation. */
2366 if (ptid_equal (ptid, minus_one_ptid))
2367 {
2368 event_child->status_pending_p = 1;
2369 event_child->status_pending = w;
2370
2371 select_event_lwp (&event_child);
2372
2373 event_child->status_pending_p = 0;
2374 w = event_child->status_pending;
2375 }
2376
2377 /* Now that we've selected our final event LWP, cancel any
2378 breakpoints in other LWPs that have hit a GDB breakpoint.
2379 See the comment in cancel_breakpoints_callback to find out
2380 why. */
2381 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2382
2383 /* Stabilize threads (move out of jump pads). */
2384 stabilize_threads ();
2385 }
2386 else
2387 {
2388 /* If we just finished a step-over, then all threads had been
2389 momentarily paused. In all-stop, that's fine, we want
2390 threads stopped by now anyway. In non-stop, we need to
2391 re-resume threads that GDB wanted to be running. */
2392 if (step_over_finished)
2393 unstop_all_lwps (1, event_child);
2394 }
2395
2396 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2397
2398 if (current_inferior->last_resume_kind == resume_stop
2399 && WSTOPSIG (w) == SIGSTOP)
2400 {
2401 /* A thread that has been requested to stop by GDB with vCont;t,
2402 and it stopped cleanly, so report as SIG0. The use of
2403 SIGSTOP is an implementation detail. */
2404 ourstatus->value.sig = TARGET_SIGNAL_0;
2405 }
2406 else if (current_inferior->last_resume_kind == resume_stop
2407 && WSTOPSIG (w) != SIGSTOP)
2408 {
2409 /* A thread that has been requested to stop by GDB with vCont;t,
2410 but, it stopped for other reasons. */
2411 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2412 }
2413 else
2414 {
2415 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2416 }
2417
2418 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2419
2420 if (debug_threads)
2421 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2422 target_pid_to_str (ptid_of (event_child)),
2423 ourstatus->kind,
2424 ourstatus->value.sig);
2425
2426 return ptid_of (event_child);
2427 }
2428
2429 /* Get rid of any pending event in the pipe. */
2430 static void
2431 async_file_flush (void)
2432 {
2433 int ret;
2434 char buf;
2435
2436 do
2437 ret = read (linux_event_pipe[0], &buf, 1);
2438 while (ret >= 0 || (ret == -1 && errno == EINTR));
2439 }
2440
2441 /* Put something in the pipe, so the event loop wakes up. */
2442 static void
2443 async_file_mark (void)
2444 {
2445 int ret;
2446
2447 async_file_flush ();
2448
2449 do
2450 ret = write (linux_event_pipe[1], "+", 1);
2451 while (ret == 0 || (ret == -1 && errno == EINTR));
2452
2453 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2454 be awakened anyway. */
2455 }
2456
2457 static ptid_t
2458 linux_wait (ptid_t ptid,
2459 struct target_waitstatus *ourstatus, int target_options)
2460 {
2461 ptid_t event_ptid;
2462
2463 if (debug_threads)
2464 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2465
2466 /* Flush the async file first. */
2467 if (target_is_async_p ())
2468 async_file_flush ();
2469
2470 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2471
2472 /* If at least one stop was reported, there may be more. A single
2473 SIGCHLD can signal more than one child stop. */
2474 if (target_is_async_p ()
2475 && (target_options & TARGET_WNOHANG) != 0
2476 && !ptid_equal (event_ptid, null_ptid))
2477 async_file_mark ();
2478
2479 return event_ptid;
2480 }
2481
2482 /* Send a signal to an LWP. */
2483
2484 static int
2485 kill_lwp (unsigned long lwpid, int signo)
2486 {
2487 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2488 fails, then we are not using nptl threads and we should be using kill. */
2489
2490 #ifdef __NR_tkill
2491 {
2492 static int tkill_failed;
2493
2494 if (!tkill_failed)
2495 {
2496 int ret;
2497
2498 errno = 0;
2499 ret = syscall (__NR_tkill, lwpid, signo);
2500 if (errno != ENOSYS)
2501 return ret;
2502 tkill_failed = 1;
2503 }
2504 }
2505 #endif
2506
2507 return kill (lwpid, signo);
2508 }
2509
2510 void
2511 linux_stop_lwp (struct lwp_info *lwp)
2512 {
2513 send_sigstop (lwp);
2514 }
2515
2516 static void
2517 send_sigstop (struct lwp_info *lwp)
2518 {
2519 int pid;
2520
2521 pid = lwpid_of (lwp);
2522
2523 /* If we already have a pending stop signal for this process, don't
2524 send another. */
2525 if (lwp->stop_expected)
2526 {
2527 if (debug_threads)
2528 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2529
2530 return;
2531 }
2532
2533 if (debug_threads)
2534 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2535
2536 lwp->stop_expected = 1;
2537 kill_lwp (pid, SIGSTOP);
2538 }
2539
2540 static int
2541 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2542 {
2543 struct lwp_info *lwp = (struct lwp_info *) entry;
2544
2545 /* Ignore EXCEPT. */
2546 if (lwp == except)
2547 return 0;
2548
2549 if (lwp->stopped)
2550 return 0;
2551
2552 send_sigstop (lwp);
2553 return 0;
2554 }
2555
2556 /* Increment the suspend count of an LWP, and stop it, if not stopped
2557 yet. */
2558 static int
2559 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2560 void *except)
2561 {
2562 struct lwp_info *lwp = (struct lwp_info *) entry;
2563
2564 /* Ignore EXCEPT. */
2565 if (lwp == except)
2566 return 0;
2567
2568 lwp->suspended++;
2569
2570 return send_sigstop_callback (entry, except);
2571 }
2572
2573 static void
2574 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2575 {
2576 /* It's dead, really. */
2577 lwp->dead = 1;
2578
2579 /* Store the exit status for later. */
2580 lwp->status_pending_p = 1;
2581 lwp->status_pending = wstat;
2582
2583 /* Prevent trying to stop it. */
2584 lwp->stopped = 1;
2585
2586 /* No further stops are expected from a dead lwp. */
2587 lwp->stop_expected = 0;
2588 }
2589
2590 static void
2591 wait_for_sigstop (struct inferior_list_entry *entry)
2592 {
2593 struct lwp_info *lwp = (struct lwp_info *) entry;
2594 struct thread_info *saved_inferior;
2595 int wstat;
2596 ptid_t saved_tid;
2597 ptid_t ptid;
2598 int pid;
2599
2600 if (lwp->stopped)
2601 {
2602 if (debug_threads)
2603 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2604 lwpid_of (lwp));
2605 return;
2606 }
2607
2608 saved_inferior = current_inferior;
2609 if (saved_inferior != NULL)
2610 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2611 else
2612 saved_tid = null_ptid; /* avoid bogus unused warning */
2613
2614 ptid = lwp->head.id;
2615
2616 if (debug_threads)
2617 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2618
2619 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2620
2621 /* If we stopped with a non-SIGSTOP signal, save it for later
2622 and record the pending SIGSTOP. If the process exited, just
2623 return. */
2624 if (WIFSTOPPED (wstat))
2625 {
2626 if (debug_threads)
2627 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2628 lwpid_of (lwp), WSTOPSIG (wstat));
2629
2630 if (WSTOPSIG (wstat) != SIGSTOP)
2631 {
2632 if (debug_threads)
2633 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2634 lwpid_of (lwp), wstat);
2635
2636 lwp->status_pending_p = 1;
2637 lwp->status_pending = wstat;
2638 }
2639 }
2640 else
2641 {
2642 if (debug_threads)
2643 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2644
2645 lwp = find_lwp_pid (pid_to_ptid (pid));
2646 if (lwp)
2647 {
2648 /* Leave this status pending for the next time we're able to
2649 report it. In the mean time, we'll report this lwp as
2650 dead to GDB, so GDB doesn't try to read registers and
2651 memory from it. This can only happen if this was the
2652 last thread of the process; otherwise, PID is removed
2653 from the thread tables before linux_wait_for_event
2654 returns. */
2655 mark_lwp_dead (lwp, wstat);
2656 }
2657 }
2658
2659 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2660 current_inferior = saved_inferior;
2661 else
2662 {
2663 if (debug_threads)
2664 fprintf (stderr, "Previously current thread died.\n");
2665
2666 if (non_stop)
2667 {
2668 /* We can't change the current inferior behind GDB's back,
2669 otherwise, a subsequent command may apply to the wrong
2670 process. */
2671 current_inferior = NULL;
2672 }
2673 else
2674 {
2675 /* Set a valid thread as current. */
2676 set_desired_inferior (0);
2677 }
2678 }
2679 }
2680
2681 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2682 move it out, because we need to report the stop event to GDB. For
2683 example, if the user puts a breakpoint in the jump pad, it's
2684 because she wants to debug it. */
2685
2686 static int
2687 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2688 {
2689 struct lwp_info *lwp = (struct lwp_info *) entry;
2690 struct thread_info *thread = get_lwp_thread (lwp);
2691
2692 gdb_assert (lwp->suspended == 0);
2693 gdb_assert (lwp->stopped);
2694
2695 /* Allow debugging the jump pad, gdb_collect, etc.. */
2696 return (supports_fast_tracepoints ()
2697 && in_process_agent_loaded ()
2698 && (gdb_breakpoint_here (lwp->stop_pc)
2699 || lwp->stopped_by_watchpoint
2700 || thread->last_resume_kind == resume_step)
2701 && linux_fast_tracepoint_collecting (lwp, NULL));
2702 }
2703
2704 static void
2705 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2706 {
2707 struct lwp_info *lwp = (struct lwp_info *) entry;
2708 struct thread_info *thread = get_lwp_thread (lwp);
2709 int *wstat;
2710
2711 gdb_assert (lwp->suspended == 0);
2712 gdb_assert (lwp->stopped);
2713
2714 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2715
2716 /* Allow debugging the jump pad, gdb_collect, etc. */
2717 if (!gdb_breakpoint_here (lwp->stop_pc)
2718 && !lwp->stopped_by_watchpoint
2719 && thread->last_resume_kind != resume_step
2720 && maybe_move_out_of_jump_pad (lwp, wstat))
2721 {
2722 if (debug_threads)
2723 fprintf (stderr,
2724 "LWP %ld needs stabilizing (in jump pad)\n",
2725 lwpid_of (lwp));
2726
2727 if (wstat)
2728 {
2729 lwp->status_pending_p = 0;
2730 enqueue_one_deferred_signal (lwp, wstat);
2731
2732 if (debug_threads)
2733 fprintf (stderr,
2734 "Signal %d for LWP %ld deferred "
2735 "(in jump pad)\n",
2736 WSTOPSIG (*wstat), lwpid_of (lwp));
2737 }
2738
2739 linux_resume_one_lwp (lwp, 0, 0, NULL);
2740 }
2741 else
2742 lwp->suspended++;
2743 }
2744
2745 static int
2746 lwp_running (struct inferior_list_entry *entry, void *data)
2747 {
2748 struct lwp_info *lwp = (struct lwp_info *) entry;
2749
2750 if (lwp->dead)
2751 return 0;
2752 if (lwp->stopped)
2753 return 0;
2754 return 1;
2755 }
2756
2757 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2758 If SUSPEND, then also increase the suspend count of every LWP,
2759 except EXCEPT. */
2760
2761 static void
2762 stop_all_lwps (int suspend, struct lwp_info *except)
2763 {
2764 stopping_threads = 1;
2765
2766 if (suspend)
2767 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2768 else
2769 find_inferior (&all_lwps, send_sigstop_callback, except);
2770 for_each_inferior (&all_lwps, wait_for_sigstop);
2771 stopping_threads = 0;
2772 }
2773
2774 /* Resume execution of the inferior process.
2775 If STEP is nonzero, single-step it.
2776 If SIGNAL is nonzero, give it that signal. */
2777
2778 static void
2779 linux_resume_one_lwp (struct lwp_info *lwp,
2780 int step, int signal, siginfo_t *info)
2781 {
2782 struct thread_info *saved_inferior;
2783 int fast_tp_collecting;
2784
2785 if (lwp->stopped == 0)
2786 return;
2787
2788 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2789
2790 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2791
2792 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2793 user used the "jump" command, or "set $pc = foo"). */
2794 if (lwp->stop_pc != get_pc (lwp))
2795 {
2796 /* Collecting 'while-stepping' actions doesn't make sense
2797 anymore. */
2798 release_while_stepping_state_list (get_lwp_thread (lwp));
2799 }
2800
2801 /* If we have pending signals or status, and a new signal, enqueue the
2802 signal. Also enqueue the signal if we are waiting to reinsert a
2803 breakpoint; it will be picked up again below. */
2804 if (signal != 0
2805 && (lwp->status_pending_p
2806 || lwp->pending_signals != NULL
2807 || lwp->bp_reinsert != 0
2808 || fast_tp_collecting))
2809 {
2810 struct pending_signals *p_sig;
2811 p_sig = xmalloc (sizeof (*p_sig));
2812 p_sig->prev = lwp->pending_signals;
2813 p_sig->signal = signal;
2814 if (info == NULL)
2815 memset (&p_sig->info, 0, sizeof (siginfo_t));
2816 else
2817 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2818 lwp->pending_signals = p_sig;
2819 }
2820
2821 if (lwp->status_pending_p)
2822 {
2823 if (debug_threads)
2824 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2825 " has pending status\n",
2826 lwpid_of (lwp), step ? "step" : "continue", signal,
2827 lwp->stop_expected ? "expected" : "not expected");
2828 return;
2829 }
2830
2831 saved_inferior = current_inferior;
2832 current_inferior = get_lwp_thread (lwp);
2833
2834 if (debug_threads)
2835 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2836 lwpid_of (lwp), step ? "step" : "continue", signal,
2837 lwp->stop_expected ? "expected" : "not expected");
2838
2839 /* This bit needs some thinking about. If we get a signal that
2840 we must report while a single-step reinsert is still pending,
2841 we often end up resuming the thread. It might be better to
2842 (ew) allow a stack of pending events; then we could be sure that
2843 the reinsert happened right away and not lose any signals.
2844
2845 Making this stack would also shrink the window in which breakpoints are
2846 uninserted (see comment in linux_wait_for_lwp) but not enough for
2847 complete correctness, so it won't solve that problem. It may be
2848 worthwhile just to solve this one, however. */
2849 if (lwp->bp_reinsert != 0)
2850 {
2851 if (debug_threads)
2852 fprintf (stderr, " pending reinsert at 0x%s\n",
2853 paddress (lwp->bp_reinsert));
2854
2855 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2856 {
2857 if (fast_tp_collecting == 0)
2858 {
2859 if (step == 0)
2860 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2861 if (lwp->suspended)
2862 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2863 lwp->suspended);
2864 }
2865
2866 step = 1;
2867 }
2868
2869 /* Postpone any pending signal. It was enqueued above. */
2870 signal = 0;
2871 }
2872
2873 if (fast_tp_collecting == 1)
2874 {
2875 if (debug_threads)
2876 fprintf (stderr, "\
2877 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2878 lwpid_of (lwp));
2879
2880 /* Postpone any pending signal. It was enqueued above. */
2881 signal = 0;
2882 }
2883 else if (fast_tp_collecting == 2)
2884 {
2885 if (debug_threads)
2886 fprintf (stderr, "\
2887 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2888 lwpid_of (lwp));
2889
2890 if (can_hardware_single_step ())
2891 step = 1;
2892 else
2893 fatal ("moving out of jump pad single-stepping"
2894 " not implemented on this target");
2895
2896 /* Postpone any pending signal. It was enqueued above. */
2897 signal = 0;
2898 }
2899
2900 /* If we have while-stepping actions in this thread set it stepping.
2901 If we have a signal to deliver, it may or may not be set to
2902 SIG_IGN, we don't know. Assume so, and allow collecting
2903 while-stepping into a signal handler. A possible smart thing to
2904 do would be to set an internal breakpoint at the signal return
2905 address, continue, and carry on catching this while-stepping
2906 action only when that breakpoint is hit. A future
2907 enhancement. */
2908 if (get_lwp_thread (lwp)->while_stepping != NULL
2909 && can_hardware_single_step ())
2910 {
2911 if (debug_threads)
2912 fprintf (stderr,
2913 "lwp %ld has a while-stepping action -> forcing step.\n",
2914 lwpid_of (lwp));
2915 step = 1;
2916 }
2917
2918 if (debug_threads && the_low_target.get_pc != NULL)
2919 {
2920 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2921 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2922 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2923 }
2924
2925 /* If we have pending signals, consume one unless we are trying to
2926 reinsert a breakpoint or we're trying to finish a fast tracepoint
2927 collect. */
2928 if (lwp->pending_signals != NULL
2929 && lwp->bp_reinsert == 0
2930 && fast_tp_collecting == 0)
2931 {
2932 struct pending_signals **p_sig;
2933
2934 p_sig = &lwp->pending_signals;
2935 while ((*p_sig)->prev != NULL)
2936 p_sig = &(*p_sig)->prev;
2937
2938 signal = (*p_sig)->signal;
2939 if ((*p_sig)->info.si_signo != 0)
2940 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2941
2942 free (*p_sig);
2943 *p_sig = NULL;
2944 }
2945
2946 if (the_low_target.prepare_to_resume != NULL)
2947 the_low_target.prepare_to_resume (lwp);
2948
2949 regcache_invalidate_one ((struct inferior_list_entry *)
2950 get_lwp_thread (lwp));
2951 errno = 0;
2952 lwp->stopped = 0;
2953 lwp->stopped_by_watchpoint = 0;
2954 lwp->stepping = step;
2955 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2956 /* Coerce to a uintptr_t first to avoid potential gcc warning
2957 of coercing an 8 byte integer to a 4 byte pointer. */
2958 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2959
2960 current_inferior = saved_inferior;
2961 if (errno)
2962 {
2963 /* ESRCH from ptrace either means that the thread was already
2964 running (an error) or that it is gone (a race condition). If
2965 it's gone, we will get a notification the next time we wait,
2966 so we can ignore the error. We could differentiate these
2967 two, but it's tricky without waiting; the thread still exists
2968 as a zombie, so sending it signal 0 would succeed. So just
2969 ignore ESRCH. */
2970 if (errno == ESRCH)
2971 return;
2972
2973 perror_with_name ("ptrace");
2974 }
2975 }
2976
2977 struct thread_resume_array
2978 {
2979 struct thread_resume *resume;
2980 size_t n;
2981 };
2982
2983 /* This function is called once per thread. We look up the thread
2984 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2985 resume request.
2986
2987 This algorithm is O(threads * resume elements), but resume elements
2988 is small (and will remain small at least until GDB supports thread
2989 suspension). */
2990 static int
2991 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
2992 {
2993 struct lwp_info *lwp;
2994 struct thread_info *thread;
2995 int ndx;
2996 struct thread_resume_array *r;
2997
2998 thread = (struct thread_info *) entry;
2999 lwp = get_thread_lwp (thread);
3000 r = arg;
3001
3002 for (ndx = 0; ndx < r->n; ndx++)
3003 {
3004 ptid_t ptid = r->resume[ndx].thread;
3005 if (ptid_equal (ptid, minus_one_ptid)
3006 || ptid_equal (ptid, entry->id)
3007 || (ptid_is_pid (ptid)
3008 && (ptid_get_pid (ptid) == pid_of (lwp)))
3009 || (ptid_get_lwp (ptid) == -1
3010 && (ptid_get_pid (ptid) == pid_of (lwp))))
3011 {
3012 if (r->resume[ndx].kind == resume_stop
3013 && thread->last_resume_kind == resume_stop)
3014 {
3015 if (debug_threads)
3016 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3017 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3018 ? "stopped"
3019 : "stopping",
3020 lwpid_of (lwp));
3021
3022 continue;
3023 }
3024
3025 lwp->resume = &r->resume[ndx];
3026 thread->last_resume_kind = lwp->resume->kind;
3027
3028 /* If we had a deferred signal to report, dequeue one now.
3029 This can happen if LWP gets more than one signal while
3030 trying to get out of a jump pad. */
3031 if (lwp->stopped
3032 && !lwp->status_pending_p
3033 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3034 {
3035 lwp->status_pending_p = 1;
3036
3037 if (debug_threads)
3038 fprintf (stderr,
3039 "Dequeueing deferred signal %d for LWP %ld, "
3040 "leaving status pending.\n",
3041 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3042 }
3043
3044 return 0;
3045 }
3046 }
3047
3048 /* No resume action for this thread. */
3049 lwp->resume = NULL;
3050
3051 return 0;
3052 }
3053
3054
3055 /* Set *FLAG_P if this lwp has an interesting status pending. */
3056 static int
3057 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3058 {
3059 struct lwp_info *lwp = (struct lwp_info *) entry;
3060
3061 /* LWPs which will not be resumed are not interesting, because
3062 we might not wait for them next time through linux_wait. */
3063 if (lwp->resume == NULL)
3064 return 0;
3065
3066 if (lwp->status_pending_p)
3067 * (int *) flag_p = 1;
3068
3069 return 0;
3070 }
3071
3072 /* Return 1 if this lwp that GDB wants running is stopped at an
3073 internal breakpoint that we need to step over. It assumes that any
3074 required STOP_PC adjustment has already been propagated to the
3075 inferior's regcache. */
3076
3077 static int
3078 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3079 {
3080 struct lwp_info *lwp = (struct lwp_info *) entry;
3081 struct thread_info *thread;
3082 struct thread_info *saved_inferior;
3083 CORE_ADDR pc;
3084
3085 /* LWPs which will not be resumed are not interesting, because we
3086 might not wait for them next time through linux_wait. */
3087
3088 if (!lwp->stopped)
3089 {
3090 if (debug_threads)
3091 fprintf (stderr,
3092 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3093 lwpid_of (lwp));
3094 return 0;
3095 }
3096
3097 thread = get_lwp_thread (lwp);
3098
3099 if (thread->last_resume_kind == resume_stop)
3100 {
3101 if (debug_threads)
3102 fprintf (stderr,
3103 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3104 lwpid_of (lwp));
3105 return 0;
3106 }
3107
3108 gdb_assert (lwp->suspended >= 0);
3109
3110 if (lwp->suspended)
3111 {
3112 if (debug_threads)
3113 fprintf (stderr,
3114 "Need step over [LWP %ld]? Ignoring, suspended\n",
3115 lwpid_of (lwp));
3116 return 0;
3117 }
3118
3119 if (!lwp->need_step_over)
3120 {
3121 if (debug_threads)
3122 fprintf (stderr,
3123 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3124 }
3125
3126 if (lwp->status_pending_p)
3127 {
3128 if (debug_threads)
3129 fprintf (stderr,
3130 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3131 lwpid_of (lwp));
3132 return 0;
3133 }
3134
3135 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3136 or we have. */
3137 pc = get_pc (lwp);
3138
3139 /* If the PC has changed since we stopped, then don't do anything,
3140 and let the breakpoint/tracepoint be hit. This happens if, for
3141 instance, GDB handled the decr_pc_after_break subtraction itself,
3142 GDB is OOL stepping this thread, or the user has issued a "jump"
3143 command, or poked thread's registers herself. */
3144 if (pc != lwp->stop_pc)
3145 {
3146 if (debug_threads)
3147 fprintf (stderr,
3148 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3149 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3150 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3151
3152 lwp->need_step_over = 0;
3153 return 0;
3154 }
3155
3156 saved_inferior = current_inferior;
3157 current_inferior = thread;
3158
3159 /* We can only step over breakpoints we know about. */
3160 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3161 {
3162 /* Don't step over a breakpoint that GDB expects to hit
3163 though. */
3164 if (gdb_breakpoint_here (pc))
3165 {
3166 if (debug_threads)
3167 fprintf (stderr,
3168 "Need step over [LWP %ld]? yes, but found"
3169 " GDB breakpoint at 0x%s; skipping step over\n",
3170 lwpid_of (lwp), paddress (pc));
3171
3172 current_inferior = saved_inferior;
3173 return 0;
3174 }
3175 else
3176 {
3177 if (debug_threads)
3178 fprintf (stderr,
3179 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
3180 lwpid_of (lwp), paddress (pc));
3181
3182 /* We've found an lwp that needs stepping over --- return 1 so
3183 that find_inferior stops looking. */
3184 current_inferior = saved_inferior;
3185
3186 /* If the step over is cancelled, this is set again. */
3187 lwp->need_step_over = 0;
3188 return 1;
3189 }
3190 }
3191
3192 current_inferior = saved_inferior;
3193
3194 if (debug_threads)
3195 fprintf (stderr,
3196 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3197 lwpid_of (lwp), paddress (pc));
3198
3199 return 0;
3200 }
3201
3202 /* Start a step-over operation on LWP. When LWP stopped at a
3203 breakpoint, to make progress, we need to remove the breakpoint out
3204 of the way. If we let other threads run while we do that, they may
3205 pass by the breakpoint location and miss hitting it. To avoid
3206 that, a step-over momentarily stops all threads while LWP is
3207 single-stepped while the breakpoint is temporarily uninserted from
3208 the inferior. When the single-step finishes, we reinsert the
3209 breakpoint, and let all threads that are supposed to be running,
3210 run again.
3211
3212 On targets that don't support hardware single-step, we don't
3213 currently support full software single-stepping. Instead, we only
3214 support stepping over the thread event breakpoint, by asking the
3215 low target where to place a reinsert breakpoint. Since this
3216 routine assumes the breakpoint being stepped over is a thread event
3217 breakpoint, it usually assumes the return address of the current
3218 function is a good enough place to set the reinsert breakpoint. */
3219
3220 static int
3221 start_step_over (struct lwp_info *lwp)
3222 {
3223 struct thread_info *saved_inferior;
3224 CORE_ADDR pc;
3225 int step;
3226
3227 if (debug_threads)
3228 fprintf (stderr,
3229 "Starting step-over on LWP %ld. Stopping all threads\n",
3230 lwpid_of (lwp));
3231
3232 stop_all_lwps (1, lwp);
3233 gdb_assert (lwp->suspended == 0);
3234
3235 if (debug_threads)
3236 fprintf (stderr, "Done stopping all threads for step-over.\n");
3237
3238 /* Note, we should always reach here with an already adjusted PC,
3239 either by GDB (if we're resuming due to GDB's request), or by our
3240 caller, if we just finished handling an internal breakpoint GDB
3241 shouldn't care about. */
3242 pc = get_pc (lwp);
3243
3244 saved_inferior = current_inferior;
3245 current_inferior = get_lwp_thread (lwp);
3246
3247 lwp->bp_reinsert = pc;
3248 uninsert_breakpoints_at (pc);
3249 uninsert_fast_tracepoint_jumps_at (pc);
3250
3251 if (can_hardware_single_step ())
3252 {
3253 step = 1;
3254 }
3255 else
3256 {
3257 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3258 set_reinsert_breakpoint (raddr);
3259 step = 0;
3260 }
3261
3262 current_inferior = saved_inferior;
3263
3264 linux_resume_one_lwp (lwp, step, 0, NULL);
3265
3266 /* Require next event from this LWP. */
3267 step_over_bkpt = lwp->head.id;
3268 return 1;
3269 }
3270
3271 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3272 start_step_over, if still there, and delete any reinsert
3273 breakpoints we've set, on non hardware single-step targets. */
3274
3275 static int
3276 finish_step_over (struct lwp_info *lwp)
3277 {
3278 if (lwp->bp_reinsert != 0)
3279 {
3280 if (debug_threads)
3281 fprintf (stderr, "Finished step over.\n");
3282
3283 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3284 may be no breakpoint to reinsert there by now. */
3285 reinsert_breakpoints_at (lwp->bp_reinsert);
3286 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3287
3288 lwp->bp_reinsert = 0;
3289
3290 /* Delete any software-single-step reinsert breakpoints. No
3291 longer needed. We don't have to worry about other threads
3292 hitting this trap, and later not being able to explain it,
3293 because we were stepping over a breakpoint, and we hold all
3294 threads but LWP stopped while doing that. */
3295 if (!can_hardware_single_step ())
3296 delete_reinsert_breakpoints ();
3297
3298 step_over_bkpt = null_ptid;
3299 return 1;
3300 }
3301 else
3302 return 0;
3303 }
3304
3305 /* This function is called once per thread. We check the thread's resume
3306 request, which will tell us whether to resume, step, or leave the thread
3307 stopped; and what signal, if any, it should be sent.
3308
3309 For threads which we aren't explicitly told otherwise, we preserve
3310 the stepping flag; this is used for stepping over gdbserver-placed
3311 breakpoints.
3312
3313 If pending_flags was set in any thread, we queue any needed
3314 signals, since we won't actually resume. We already have a pending
3315 event to report, so we don't need to preserve any step requests;
3316 they should be re-issued if necessary. */
3317
3318 static int
3319 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3320 {
3321 struct lwp_info *lwp;
3322 struct thread_info *thread;
3323 int step;
3324 int leave_all_stopped = * (int *) arg;
3325 int leave_pending;
3326
3327 thread = (struct thread_info *) entry;
3328 lwp = get_thread_lwp (thread);
3329
3330 if (lwp->resume == NULL)
3331 return 0;
3332
3333 if (lwp->resume->kind == resume_stop)
3334 {
3335 if (debug_threads)
3336 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3337
3338 if (!lwp->stopped)
3339 {
3340 if (debug_threads)
3341 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3342
3343 /* Stop the thread, and wait for the event asynchronously,
3344 through the event loop. */
3345 send_sigstop (lwp);
3346 }
3347 else
3348 {
3349 if (debug_threads)
3350 fprintf (stderr, "already stopped LWP %ld\n",
3351 lwpid_of (lwp));
3352
3353 /* The LWP may have been stopped in an internal event that
3354 was not meant to be notified back to GDB (e.g., gdbserver
3355 breakpoint), so we should be reporting a stop event in
3356 this case too. */
3357
3358 /* If the thread already has a pending SIGSTOP, this is a
3359 no-op. Otherwise, something later will presumably resume
3360 the thread and this will cause it to cancel any pending
3361 operation, due to last_resume_kind == resume_stop. If
3362 the thread already has a pending status to report, we
3363 will still report it the next time we wait - see
3364 status_pending_p_callback. */
3365
3366 /* If we already have a pending signal to report, then
3367 there's no need to queue a SIGSTOP, as this means we're
3368 midway through moving the LWP out of the jumppad, and we
3369 will report the pending signal as soon as that is
3370 finished. */
3371 if (lwp->pending_signals_to_report == NULL)
3372 send_sigstop (lwp);
3373 }
3374
3375 /* For stop requests, we're done. */
3376 lwp->resume = NULL;
3377 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3378 return 0;
3379 }
3380
3381 /* If this thread which is about to be resumed has a pending status,
3382 then don't resume any threads - we can just report the pending
3383 status. Make sure to queue any signals that would otherwise be
3384 sent. In all-stop mode, we do this decision based on if *any*
3385 thread has a pending status. If there's a thread that needs the
3386 step-over-breakpoint dance, then don't resume any other thread
3387 but that particular one. */
3388 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3389
3390 if (!leave_pending)
3391 {
3392 if (debug_threads)
3393 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3394
3395 step = (lwp->resume->kind == resume_step);
3396 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3397 }
3398 else
3399 {
3400 if (debug_threads)
3401 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3402
3403 /* If we have a new signal, enqueue the signal. */
3404 if (lwp->resume->sig != 0)
3405 {
3406 struct pending_signals *p_sig;
3407 p_sig = xmalloc (sizeof (*p_sig));
3408 p_sig->prev = lwp->pending_signals;
3409 p_sig->signal = lwp->resume->sig;
3410 memset (&p_sig->info, 0, sizeof (siginfo_t));
3411
3412 /* If this is the same signal we were previously stopped by,
3413 make sure to queue its siginfo. We can ignore the return
3414 value of ptrace; if it fails, we'll skip
3415 PTRACE_SETSIGINFO. */
3416 if (WIFSTOPPED (lwp->last_status)
3417 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3418 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3419
3420 lwp->pending_signals = p_sig;
3421 }
3422 }
3423
3424 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3425 lwp->resume = NULL;
3426 return 0;
3427 }
3428
3429 static void
3430 linux_resume (struct thread_resume *resume_info, size_t n)
3431 {
3432 struct thread_resume_array array = { resume_info, n };
3433 struct lwp_info *need_step_over = NULL;
3434 int any_pending;
3435 int leave_all_stopped;
3436
3437 find_inferior (&all_threads, linux_set_resume_request, &array);
3438
3439 /* If there is a thread which would otherwise be resumed, which has
3440 a pending status, then don't resume any threads - we can just
3441 report the pending status. Make sure to queue any signals that
3442 would otherwise be sent. In non-stop mode, we'll apply this
3443 logic to each thread individually. We consume all pending events
3444 before considering to start a step-over (in all-stop). */
3445 any_pending = 0;
3446 if (!non_stop)
3447 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3448
3449 /* If there is a thread which would otherwise be resumed, which is
3450 stopped at a breakpoint that needs stepping over, then don't
3451 resume any threads - have it step over the breakpoint with all
3452 other threads stopped, then resume all threads again. Make sure
3453 to queue any signals that would otherwise be delivered or
3454 queued. */
3455 if (!any_pending && supports_breakpoints ())
3456 need_step_over
3457 = (struct lwp_info *) find_inferior (&all_lwps,
3458 need_step_over_p, NULL);
3459
3460 leave_all_stopped = (need_step_over != NULL || any_pending);
3461
3462 if (debug_threads)
3463 {
3464 if (need_step_over != NULL)
3465 fprintf (stderr, "Not resuming all, need step over\n");
3466 else if (any_pending)
3467 fprintf (stderr,
3468 "Not resuming, all-stop and found "
3469 "an LWP with pending status\n");
3470 else
3471 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3472 }
3473
3474 /* Even if we're leaving threads stopped, queue all signals we'd
3475 otherwise deliver. */
3476 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3477
3478 if (need_step_over)
3479 start_step_over (need_step_over);
3480 }
3481
3482 /* This function is called once per thread. We check the thread's
3483 last resume request, which will tell us whether to resume, step, or
3484 leave the thread stopped. Any signal the client requested to be
3485 delivered has already been enqueued at this point.
3486
3487 If any thread that GDB wants running is stopped at an internal
3488 breakpoint that needs stepping over, we start a step-over operation
3489 on that particular thread, and leave all others stopped. */
3490
3491 static int
3492 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3493 {
3494 struct lwp_info *lwp = (struct lwp_info *) entry;
3495 struct thread_info *thread;
3496 int step;
3497
3498 if (lwp == except)
3499 return 0;
3500
3501 if (debug_threads)
3502 fprintf (stderr,
3503 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3504
3505 if (!lwp->stopped)
3506 {
3507 if (debug_threads)
3508 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3509 return 0;
3510 }
3511
3512 thread = get_lwp_thread (lwp);
3513
3514 if (thread->last_resume_kind == resume_stop
3515 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3516 {
3517 if (debug_threads)
3518 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3519 lwpid_of (lwp));
3520 return 0;
3521 }
3522
3523 if (lwp->status_pending_p)
3524 {
3525 if (debug_threads)
3526 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3527 lwpid_of (lwp));
3528 return 0;
3529 }
3530
3531 gdb_assert (lwp->suspended >= 0);
3532
3533 if (lwp->suspended)
3534 {
3535 if (debug_threads)
3536 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3537 return 0;
3538 }
3539
3540 if (thread->last_resume_kind == resume_stop
3541 && lwp->pending_signals_to_report == NULL
3542 && lwp->collecting_fast_tracepoint == 0)
3543 {
3544 /* We haven't reported this LWP as stopped yet (otherwise, the
3545 last_status.kind check above would catch it, and we wouldn't
3546 reach here. This LWP may have been momentarily paused by a
3547 stop_all_lwps call while handling for example, another LWP's
3548 step-over. In that case, the pending expected SIGSTOP signal
3549 that was queued at vCont;t handling time will have already
3550 been consumed by wait_for_sigstop, and so we need to requeue
3551 another one here. Note that if the LWP already has a SIGSTOP
3552 pending, this is a no-op. */
3553
3554 if (debug_threads)
3555 fprintf (stderr,
3556 "Client wants LWP %ld to stop. "
3557 "Making sure it has a SIGSTOP pending\n",
3558 lwpid_of (lwp));
3559
3560 send_sigstop (lwp);
3561 }
3562
3563 step = thread->last_resume_kind == resume_step;
3564 linux_resume_one_lwp (lwp, step, 0, NULL);
3565 return 0;
3566 }
3567
3568 static int
3569 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3570 {
3571 struct lwp_info *lwp = (struct lwp_info *) entry;
3572
3573 if (lwp == except)
3574 return 0;
3575
3576 lwp->suspended--;
3577 gdb_assert (lwp->suspended >= 0);
3578
3579 return proceed_one_lwp (entry, except);
3580 }
3581
3582 /* When we finish a step-over, set threads running again. If there's
3583 another thread that may need a step-over, now's the time to start
3584 it. Eventually, we'll move all threads past their breakpoints. */
3585
3586 static void
3587 proceed_all_lwps (void)
3588 {
3589 struct lwp_info *need_step_over;
3590
3591 /* If there is a thread which would otherwise be resumed, which is
3592 stopped at a breakpoint that needs stepping over, then don't
3593 resume any threads - have it step over the breakpoint with all
3594 other threads stopped, then resume all threads again. */
3595
3596 if (supports_breakpoints ())
3597 {
3598 need_step_over
3599 = (struct lwp_info *) find_inferior (&all_lwps,
3600 need_step_over_p, NULL);
3601
3602 if (need_step_over != NULL)
3603 {
3604 if (debug_threads)
3605 fprintf (stderr, "proceed_all_lwps: found "
3606 "thread %ld needing a step-over\n",
3607 lwpid_of (need_step_over));
3608
3609 start_step_over (need_step_over);
3610 return;
3611 }
3612 }
3613
3614 if (debug_threads)
3615 fprintf (stderr, "Proceeding, no step-over needed\n");
3616
3617 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3618 }
3619
3620 /* Stopped LWPs that the client wanted to be running, that don't have
3621 pending statuses, are set to run again, except for EXCEPT, if not
3622 NULL. This undoes a stop_all_lwps call. */
3623
3624 static void
3625 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3626 {
3627 if (debug_threads)
3628 {
3629 if (except)
3630 fprintf (stderr,
3631 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3632 else
3633 fprintf (stderr,
3634 "unstopping all lwps\n");
3635 }
3636
3637 if (unsuspend)
3638 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3639 else
3640 find_inferior (&all_lwps, proceed_one_lwp, except);
3641 }
3642
3643 #ifdef HAVE_LINUX_USRREGS
3644
3645 int
3646 register_addr (int regnum)
3647 {
3648 int addr;
3649
3650 if (regnum < 0 || regnum >= the_low_target.num_regs)
3651 error ("Invalid register number %d.", regnum);
3652
3653 addr = the_low_target.regmap[regnum];
3654
3655 return addr;
3656 }
3657
3658 /* Fetch one register. */
3659 static void
3660 fetch_register (struct regcache *regcache, int regno)
3661 {
3662 CORE_ADDR regaddr;
3663 int i, size;
3664 char *buf;
3665 int pid;
3666
3667 if (regno >= the_low_target.num_regs)
3668 return;
3669 if ((*the_low_target.cannot_fetch_register) (regno))
3670 return;
3671
3672 regaddr = register_addr (regno);
3673 if (regaddr == -1)
3674 return;
3675
3676 pid = lwpid_of (get_thread_lwp (current_inferior));
3677 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3678 & - sizeof (PTRACE_XFER_TYPE));
3679 buf = alloca (size);
3680 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3681 {
3682 errno = 0;
3683 *(PTRACE_XFER_TYPE *) (buf + i) =
3684 ptrace (PTRACE_PEEKUSER, pid,
3685 /* Coerce to a uintptr_t first to avoid potential gcc warning
3686 of coercing an 8 byte integer to a 4 byte pointer. */
3687 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
3688 regaddr += sizeof (PTRACE_XFER_TYPE);
3689 if (errno != 0)
3690 error ("reading register %d: %s", regno, strerror (errno));
3691 }
3692
3693 if (the_low_target.supply_ptrace_register)
3694 the_low_target.supply_ptrace_register (regcache, regno, buf);
3695 else
3696 supply_register (regcache, regno, buf);
3697 }
3698
3699 /* Fetch all registers, or just one, from the child process. */
3700 static void
3701 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
3702 {
3703 if (regno == -1)
3704 for (regno = 0; regno < the_low_target.num_regs; regno++)
3705 fetch_register (regcache, regno);
3706 else
3707 fetch_register (regcache, regno);
3708 }
3709
3710 /* Store our register values back into the inferior.
3711 If REGNO is -1, do this for all registers.
3712 Otherwise, REGNO specifies which register (so we can save time). */
3713 static void
3714 usr_store_inferior_registers (struct regcache *regcache, int regno)
3715 {
3716 CORE_ADDR regaddr;
3717 int i, size;
3718 char *buf;
3719 int pid;
3720
3721 if (regno >= 0)
3722 {
3723 if (regno >= the_low_target.num_regs)
3724 return;
3725
3726 if ((*the_low_target.cannot_store_register) (regno) == 1)
3727 return;
3728
3729 regaddr = register_addr (regno);
3730 if (regaddr == -1)
3731 return;
3732 errno = 0;
3733 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3734 & - sizeof (PTRACE_XFER_TYPE);
3735 buf = alloca (size);
3736 memset (buf, 0, size);
3737
3738 if (the_low_target.collect_ptrace_register)
3739 the_low_target.collect_ptrace_register (regcache, regno, buf);
3740 else
3741 collect_register (regcache, regno, buf);
3742
3743 pid = lwpid_of (get_thread_lwp (current_inferior));
3744 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3745 {
3746 errno = 0;
3747 ptrace (PTRACE_POKEUSER, pid,
3748 /* Coerce to a uintptr_t first to avoid potential gcc warning
3749 about coercing an 8 byte integer to a 4 byte pointer. */
3750 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3751 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3752 if (errno != 0)
3753 {
3754 /* At this point, ESRCH should mean the process is
3755 already gone, in which case we simply ignore attempts
3756 to change its registers. See also the related
3757 comment in linux_resume_one_lwp. */
3758 if (errno == ESRCH)
3759 return;
3760
3761 if ((*the_low_target.cannot_store_register) (regno) == 0)
3762 error ("writing register %d: %s", regno, strerror (errno));
3763 }
3764 regaddr += sizeof (PTRACE_XFER_TYPE);
3765 }
3766 }
3767 else
3768 for (regno = 0; regno < the_low_target.num_regs; regno++)
3769 usr_store_inferior_registers (regcache, regno);
3770 }
3771 #endif /* HAVE_LINUX_USRREGS */
3772
3773
3774
3775 #ifdef HAVE_LINUX_REGSETS
3776
3777 static int
3778 regsets_fetch_inferior_registers (struct regcache *regcache)
3779 {
3780 struct regset_info *regset;
3781 int saw_general_regs = 0;
3782 int pid;
3783 struct iovec iov;
3784
3785 regset = target_regsets;
3786
3787 pid = lwpid_of (get_thread_lwp (current_inferior));
3788 while (regset->size >= 0)
3789 {
3790 void *buf, *data;
3791 int nt_type, res;
3792
3793 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3794 {
3795 regset ++;
3796 continue;
3797 }
3798
3799 buf = xmalloc (regset->size);
3800
3801 nt_type = regset->nt_type;
3802 if (nt_type)
3803 {
3804 iov.iov_base = buf;
3805 iov.iov_len = regset->size;
3806 data = (void *) &iov;
3807 }
3808 else
3809 data = buf;
3810
3811 #ifndef __sparc__
3812 res = ptrace (regset->get_request, pid, nt_type, data);
3813 #else
3814 res = ptrace (regset->get_request, pid, data, nt_type);
3815 #endif
3816 if (res < 0)
3817 {
3818 if (errno == EIO)
3819 {
3820 /* If we get EIO on a regset, do not try it again for
3821 this process. */
3822 disabled_regsets[regset - target_regsets] = 1;
3823 free (buf);
3824 continue;
3825 }
3826 else
3827 {
3828 char s[256];
3829 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3830 pid);
3831 perror (s);
3832 }
3833 }
3834 else if (regset->type == GENERAL_REGS)
3835 saw_general_regs = 1;
3836 regset->store_function (regcache, buf);
3837 regset ++;
3838 free (buf);
3839 }
3840 if (saw_general_regs)
3841 return 0;
3842 else
3843 return 1;
3844 }
3845
3846 static int
3847 regsets_store_inferior_registers (struct regcache *regcache)
3848 {
3849 struct regset_info *regset;
3850 int saw_general_regs = 0;
3851 int pid;
3852 struct iovec iov;
3853
3854 regset = target_regsets;
3855
3856 pid = lwpid_of (get_thread_lwp (current_inferior));
3857 while (regset->size >= 0)
3858 {
3859 void *buf, *data;
3860 int nt_type, res;
3861
3862 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3863 {
3864 regset ++;
3865 continue;
3866 }
3867
3868 buf = xmalloc (regset->size);
3869
3870 /* First fill the buffer with the current register set contents,
3871 in case there are any items in the kernel's regset that are
3872 not in gdbserver's regcache. */
3873
3874 nt_type = regset->nt_type;
3875 if (nt_type)
3876 {
3877 iov.iov_base = buf;
3878 iov.iov_len = regset->size;
3879 data = (void *) &iov;
3880 }
3881 else
3882 data = buf;
3883
3884 #ifndef __sparc__
3885 res = ptrace (regset->get_request, pid, nt_type, data);
3886 #else
3887 res = ptrace (regset->get_request, pid, &iov, data);
3888 #endif
3889
3890 if (res == 0)
3891 {
3892 /* Then overlay our cached registers on that. */
3893 regset->fill_function (regcache, buf);
3894
3895 /* Only now do we write the register set. */
3896 #ifndef __sparc__
3897 res = ptrace (regset->set_request, pid, nt_type, data);
3898 #else
3899 res = ptrace (regset->set_request, pid, data, nt_type);
3900 #endif
3901 }
3902
3903 if (res < 0)
3904 {
3905 if (errno == EIO)
3906 {
3907 /* If we get EIO on a regset, do not try it again for
3908 this process. */
3909 disabled_regsets[regset - target_regsets] = 1;
3910 free (buf);
3911 continue;
3912 }
3913 else if (errno == ESRCH)
3914 {
3915 /* At this point, ESRCH should mean the process is
3916 already gone, in which case we simply ignore attempts
3917 to change its registers. See also the related
3918 comment in linux_resume_one_lwp. */
3919 free (buf);
3920 return 0;
3921 }
3922 else
3923 {
3924 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3925 }
3926 }
3927 else if (regset->type == GENERAL_REGS)
3928 saw_general_regs = 1;
3929 regset ++;
3930 free (buf);
3931 }
3932 if (saw_general_regs)
3933 return 0;
3934 else
3935 return 1;
3936 return 0;
3937 }
3938
3939 #endif /* HAVE_LINUX_REGSETS */
3940
3941
3942 void
3943 linux_fetch_registers (struct regcache *regcache, int regno)
3944 {
3945 #ifdef HAVE_LINUX_REGSETS
3946 if (regsets_fetch_inferior_registers (regcache) == 0)
3947 return;
3948 #endif
3949 #ifdef HAVE_LINUX_USRREGS
3950 usr_fetch_inferior_registers (regcache, regno);
3951 #endif
3952 }
3953
3954 void
3955 linux_store_registers (struct regcache *regcache, int regno)
3956 {
3957 #ifdef HAVE_LINUX_REGSETS
3958 if (regsets_store_inferior_registers (regcache) == 0)
3959 return;
3960 #endif
3961 #ifdef HAVE_LINUX_USRREGS
3962 usr_store_inferior_registers (regcache, regno);
3963 #endif
3964 }
3965
3966
3967 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3968 to debugger memory starting at MYADDR. */
3969
3970 static int
3971 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
3972 {
3973 register int i;
3974 /* Round starting address down to longword boundary. */
3975 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3976 /* Round ending address up; get number of longwords that makes. */
3977 register int count
3978 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
3979 / sizeof (PTRACE_XFER_TYPE);
3980 /* Allocate buffer of that many longwords. */
3981 register PTRACE_XFER_TYPE *buffer
3982 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3983 int fd;
3984 char filename[64];
3985 int pid = lwpid_of (get_thread_lwp (current_inferior));
3986
3987 /* Try using /proc. Don't bother for one word. */
3988 if (len >= 3 * sizeof (long))
3989 {
3990 /* We could keep this file open and cache it - possibly one per
3991 thread. That requires some juggling, but is even faster. */
3992 sprintf (filename, "/proc/%d/mem", pid);
3993 fd = open (filename, O_RDONLY | O_LARGEFILE);
3994 if (fd == -1)
3995 goto no_proc;
3996
3997 /* If pread64 is available, use it. It's faster if the kernel
3998 supports it (only one syscall), and it's 64-bit safe even on
3999 32-bit platforms (for instance, SPARC debugging a SPARC64
4000 application). */
4001 #ifdef HAVE_PREAD64
4002 if (pread64 (fd, myaddr, len, memaddr) != len)
4003 #else
4004 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4005 #endif
4006 {
4007 close (fd);
4008 goto no_proc;
4009 }
4010
4011 close (fd);
4012 return 0;
4013 }
4014
4015 no_proc:
4016 /* Read all the longwords */
4017 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4018 {
4019 errno = 0;
4020 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4021 about coercing an 8 byte integer to a 4 byte pointer. */
4022 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4023 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4024 if (errno)
4025 return errno;
4026 }
4027
4028 /* Copy appropriate bytes out of the buffer. */
4029 memcpy (myaddr,
4030 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4031 len);
4032
4033 return 0;
4034 }
4035
4036 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4037 memory at MEMADDR. On failure (cannot write to the inferior)
4038 returns the value of errno. */
4039
4040 static int
4041 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4042 {
4043 register int i;
4044 /* Round starting address down to longword boundary. */
4045 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4046 /* Round ending address up; get number of longwords that makes. */
4047 register int count
4048 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
4049 /* Allocate buffer of that many longwords. */
4050 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4051 int pid = lwpid_of (get_thread_lwp (current_inferior));
4052
4053 if (debug_threads)
4054 {
4055 /* Dump up to four bytes. */
4056 unsigned int val = * (unsigned int *) myaddr;
4057 if (len == 1)
4058 val = val & 0xff;
4059 else if (len == 2)
4060 val = val & 0xffff;
4061 else if (len == 3)
4062 val = val & 0xffffff;
4063 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4064 val, (long)memaddr);
4065 }
4066
4067 /* Fill start and end extra bytes of buffer with existing memory data. */
4068
4069 errno = 0;
4070 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4071 about coercing an 8 byte integer to a 4 byte pointer. */
4072 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4073 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4074 if (errno)
4075 return errno;
4076
4077 if (count > 1)
4078 {
4079 errno = 0;
4080 buffer[count - 1]
4081 = ptrace (PTRACE_PEEKTEXT, pid,
4082 /* Coerce to a uintptr_t first to avoid potential gcc warning
4083 about coercing an 8 byte integer to a 4 byte pointer. */
4084 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4085 * sizeof (PTRACE_XFER_TYPE)),
4086 0);
4087 if (errno)
4088 return errno;
4089 }
4090
4091 /* Copy data to be written over corresponding part of buffer. */
4092
4093 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
4094
4095 /* Write the entire buffer. */
4096
4097 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4098 {
4099 errno = 0;
4100 ptrace (PTRACE_POKETEXT, pid,
4101 /* Coerce to a uintptr_t first to avoid potential gcc warning
4102 about coercing an 8 byte integer to a 4 byte pointer. */
4103 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4104 (PTRACE_ARG4_TYPE) buffer[i]);
4105 if (errno)
4106 return errno;
4107 }
4108
4109 return 0;
4110 }
4111
4112 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4113 static int linux_supports_tracefork_flag;
4114
4115 static void
4116 linux_enable_event_reporting (int pid)
4117 {
4118 if (!linux_supports_tracefork_flag)
4119 return;
4120
4121 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4122 }
4123
4124 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4125
4126 static int
4127 linux_tracefork_grandchild (void *arg)
4128 {
4129 _exit (0);
4130 }
4131
4132 #define STACK_SIZE 4096
4133
4134 static int
4135 linux_tracefork_child (void *arg)
4136 {
4137 ptrace (PTRACE_TRACEME, 0, 0, 0);
4138 kill (getpid (), SIGSTOP);
4139
4140 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4141
4142 if (fork () == 0)
4143 linux_tracefork_grandchild (NULL);
4144
4145 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4146
4147 #ifdef __ia64__
4148 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4149 CLONE_VM | SIGCHLD, NULL);
4150 #else
4151 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
4152 CLONE_VM | SIGCHLD, NULL);
4153 #endif
4154
4155 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4156
4157 _exit (0);
4158 }
4159
4160 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4161 sure that we can enable the option, and that it had the desired
4162 effect. */
4163
4164 static void
4165 linux_test_for_tracefork (void)
4166 {
4167 int child_pid, ret, status;
4168 long second_pid;
4169 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4170 char *stack = xmalloc (STACK_SIZE * 4);
4171 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4172
4173 linux_supports_tracefork_flag = 0;
4174
4175 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4176
4177 child_pid = fork ();
4178 if (child_pid == 0)
4179 linux_tracefork_child (NULL);
4180
4181 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4182
4183 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4184 #ifdef __ia64__
4185 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4186 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4187 #else /* !__ia64__ */
4188 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4189 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4190 #endif /* !__ia64__ */
4191
4192 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4193
4194 if (child_pid == -1)
4195 perror_with_name ("clone");
4196
4197 ret = my_waitpid (child_pid, &status, 0);
4198 if (ret == -1)
4199 perror_with_name ("waitpid");
4200 else if (ret != child_pid)
4201 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4202 if (! WIFSTOPPED (status))
4203 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4204
4205 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4206 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4207 if (ret != 0)
4208 {
4209 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4210 if (ret != 0)
4211 {
4212 warning ("linux_test_for_tracefork: failed to kill child");
4213 return;
4214 }
4215
4216 ret = my_waitpid (child_pid, &status, 0);
4217 if (ret != child_pid)
4218 warning ("linux_test_for_tracefork: failed to wait for killed child");
4219 else if (!WIFSIGNALED (status))
4220 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4221 "killed child", status);
4222
4223 return;
4224 }
4225
4226 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4227 if (ret != 0)
4228 warning ("linux_test_for_tracefork: failed to resume child");
4229
4230 ret = my_waitpid (child_pid, &status, 0);
4231
4232 if (ret == child_pid && WIFSTOPPED (status)
4233 && status >> 16 == PTRACE_EVENT_FORK)
4234 {
4235 second_pid = 0;
4236 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4237 if (ret == 0 && second_pid != 0)
4238 {
4239 int second_status;
4240
4241 linux_supports_tracefork_flag = 1;
4242 my_waitpid (second_pid, &second_status, 0);
4243 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4244 if (ret != 0)
4245 warning ("linux_test_for_tracefork: failed to kill second child");
4246 my_waitpid (second_pid, &status, 0);
4247 }
4248 }
4249 else
4250 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4251 "(%d, status 0x%x)", ret, status);
4252
4253 do
4254 {
4255 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4256 if (ret != 0)
4257 warning ("linux_test_for_tracefork: failed to kill child");
4258 my_waitpid (child_pid, &status, 0);
4259 }
4260 while (WIFSTOPPED (status));
4261
4262 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4263 free (stack);
4264 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4265 }
4266
4267
4268 static void
4269 linux_look_up_symbols (void)
4270 {
4271 #ifdef USE_THREAD_DB
4272 struct process_info *proc = current_process ();
4273
4274 if (proc->private->thread_db != NULL)
4275 return;
4276
4277 /* If the kernel supports tracing forks then it also supports tracing
4278 clones, and then we don't need to use the magic thread event breakpoint
4279 to learn about threads. */
4280 thread_db_init (!linux_supports_tracefork_flag);
4281 #endif
4282 }
4283
4284 static void
4285 linux_request_interrupt (void)
4286 {
4287 extern unsigned long signal_pid;
4288
4289 if (!ptid_equal (cont_thread, null_ptid)
4290 && !ptid_equal (cont_thread, minus_one_ptid))
4291 {
4292 struct lwp_info *lwp;
4293 int lwpid;
4294
4295 lwp = get_thread_lwp (current_inferior);
4296 lwpid = lwpid_of (lwp);
4297 kill_lwp (lwpid, SIGINT);
4298 }
4299 else
4300 kill_lwp (signal_pid, SIGINT);
4301 }
4302
4303 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4304 to debugger memory starting at MYADDR. */
4305
4306 static int
4307 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4308 {
4309 char filename[PATH_MAX];
4310 int fd, n;
4311 int pid = lwpid_of (get_thread_lwp (current_inferior));
4312
4313 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4314
4315 fd = open (filename, O_RDONLY);
4316 if (fd < 0)
4317 return -1;
4318
4319 if (offset != (CORE_ADDR) 0
4320 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4321 n = -1;
4322 else
4323 n = read (fd, myaddr, len);
4324
4325 close (fd);
4326
4327 return n;
4328 }
4329
4330 /* These breakpoint and watchpoint related wrapper functions simply
4331 pass on the function call if the target has registered a
4332 corresponding function. */
4333
4334 static int
4335 linux_insert_point (char type, CORE_ADDR addr, int len)
4336 {
4337 if (the_low_target.insert_point != NULL)
4338 return the_low_target.insert_point (type, addr, len);
4339 else
4340 /* Unsupported (see target.h). */
4341 return 1;
4342 }
4343
4344 static int
4345 linux_remove_point (char type, CORE_ADDR addr, int len)
4346 {
4347 if (the_low_target.remove_point != NULL)
4348 return the_low_target.remove_point (type, addr, len);
4349 else
4350 /* Unsupported (see target.h). */
4351 return 1;
4352 }
4353
4354 static int
4355 linux_stopped_by_watchpoint (void)
4356 {
4357 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4358
4359 return lwp->stopped_by_watchpoint;
4360 }
4361
4362 static CORE_ADDR
4363 linux_stopped_data_address (void)
4364 {
4365 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4366
4367 return lwp->stopped_data_address;
4368 }
4369
4370 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4371 #if defined(__mcoldfire__)
4372 /* These should really be defined in the kernel's ptrace.h header. */
4373 #define PT_TEXT_ADDR 49*4
4374 #define PT_DATA_ADDR 50*4
4375 #define PT_TEXT_END_ADDR 51*4
4376 #endif
4377
4378 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4379 to tell gdb about. */
4380
4381 static int
4382 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4383 {
4384 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4385 unsigned long text, text_end, data;
4386 int pid = lwpid_of (get_thread_lwp (current_inferior));
4387
4388 errno = 0;
4389
4390 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4391 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4392 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4393
4394 if (errno == 0)
4395 {
4396 /* Both text and data offsets produced at compile-time (and so
4397 used by gdb) are relative to the beginning of the program,
4398 with the data segment immediately following the text segment.
4399 However, the actual runtime layout in memory may put the data
4400 somewhere else, so when we send gdb a data base-address, we
4401 use the real data base address and subtract the compile-time
4402 data base-address from it (which is just the length of the
4403 text segment). BSS immediately follows data in both
4404 cases. */
4405 *text_p = text;
4406 *data_p = data - (text_end - text);
4407
4408 return 1;
4409 }
4410 #endif
4411 return 0;
4412 }
4413 #endif
4414
4415 static int
4416 compare_ints (const void *xa, const void *xb)
4417 {
4418 int a = *(const int *)xa;
4419 int b = *(const int *)xb;
4420
4421 return a - b;
4422 }
4423
4424 static int *
4425 unique (int *b, int *e)
4426 {
4427 int *d = b;
4428 while (++b != e)
4429 if (*d != *b)
4430 *++d = *b;
4431 return ++d;
4432 }
4433
4434 /* Given PID, iterates over all threads in that process.
4435
4436 Information about each thread, in a format suitable for qXfer:osdata:thread
4437 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
4438 initialized, and the caller is responsible for finishing and appending '\0'
4439 to it.
4440
4441 The list of cores that threads are running on is assigned to *CORES, if it
4442 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
4443 should free *CORES. */
4444
4445 static void
4446 list_threads (int pid, struct buffer *buffer, char **cores)
4447 {
4448 int count = 0;
4449 int allocated = 10;
4450 int *core_numbers = xmalloc (sizeof (int) * allocated);
4451 char pathname[128];
4452 DIR *dir;
4453 struct dirent *dp;
4454 struct stat statbuf;
4455
4456 sprintf (pathname, "/proc/%d/task", pid);
4457 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
4458 {
4459 dir = opendir (pathname);
4460 if (!dir)
4461 {
4462 free (core_numbers);
4463 return;
4464 }
4465
4466 while ((dp = readdir (dir)) != NULL)
4467 {
4468 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
4469
4470 if (lwp != 0)
4471 {
4472 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
4473
4474 if (core != -1)
4475 {
4476 char s[sizeof ("4294967295")];
4477 sprintf (s, "%u", core);
4478
4479 if (count == allocated)
4480 {
4481 allocated *= 2;
4482 core_numbers = realloc (core_numbers,
4483 sizeof (int) * allocated);
4484 }
4485 core_numbers[count++] = core;
4486 if (buffer)
4487 buffer_xml_printf (buffer,
4488 "<item>"
4489 "<column name=\"pid\">%d</column>"
4490 "<column name=\"tid\">%s</column>"
4491 "<column name=\"core\">%s</column>"
4492 "</item>", pid, dp->d_name, s);
4493 }
4494 else
4495 {
4496 if (buffer)
4497 buffer_xml_printf (buffer,
4498 "<item>"
4499 "<column name=\"pid\">%d</column>"
4500 "<column name=\"tid\">%s</column>"
4501 "</item>", pid, dp->d_name);
4502 }
4503 }
4504 }
4505 }
4506
4507 if (cores)
4508 {
4509 *cores = NULL;
4510 if (count > 0)
4511 {
4512 struct buffer buffer2;
4513 int *b;
4514 int *e;
4515 qsort (core_numbers, count, sizeof (int), compare_ints);
4516
4517 /* Remove duplicates. */
4518 b = core_numbers;
4519 e = unique (b, core_numbers + count);
4520
4521 buffer_init (&buffer2);
4522
4523 for (b = core_numbers; b != e; ++b)
4524 {
4525 char number[sizeof ("4294967295")];
4526 sprintf (number, "%u", *b);
4527 buffer_xml_printf (&buffer2, "%s%s",
4528 (b == core_numbers) ? "" : ",", number);
4529 }
4530 buffer_grow_str0 (&buffer2, "");
4531
4532 *cores = buffer_finish (&buffer2);
4533 }
4534 }
4535 free (core_numbers);
4536 }
4537
4538 static void
4539 show_process (int pid, const char *username, struct buffer *buffer)
4540 {
4541 char pathname[128];
4542 FILE *f;
4543 char cmd[MAXPATHLEN + 1];
4544
4545 sprintf (pathname, "/proc/%d/cmdline", pid);
4546
4547 if ((f = fopen (pathname, "r")) != NULL)
4548 {
4549 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
4550 if (len > 0)
4551 {
4552 char *cores = 0;
4553 int i;
4554 for (i = 0; i < len; i++)
4555 if (cmd[i] == '\0')
4556 cmd[i] = ' ';
4557 cmd[len] = '\0';
4558
4559 buffer_xml_printf (buffer,
4560 "<item>"
4561 "<column name=\"pid\">%d</column>"
4562 "<column name=\"user\">%s</column>"
4563 "<column name=\"command\">%s</column>",
4564 pid,
4565 username,
4566 cmd);
4567
4568 /* This only collects core numbers, and does not print threads. */
4569 list_threads (pid, NULL, &cores);
4570
4571 if (cores)
4572 {
4573 buffer_xml_printf (buffer,
4574 "<column name=\"cores\">%s</column>", cores);
4575 free (cores);
4576 }
4577
4578 buffer_xml_printf (buffer, "</item>");
4579 }
4580 fclose (f);
4581 }
4582 }
4583
4584 static int
4585 linux_qxfer_osdata (const char *annex,
4586 unsigned char *readbuf, unsigned const char *writebuf,
4587 CORE_ADDR offset, int len)
4588 {
4589 /* We make the process list snapshot when the object starts to be
4590 read. */
4591 static const char *buf;
4592 static long len_avail = -1;
4593 static struct buffer buffer;
4594 int processes = 0;
4595 int threads = 0;
4596
4597 DIR *dirp;
4598
4599 if (strcmp (annex, "processes") == 0)
4600 processes = 1;
4601 else if (strcmp (annex, "threads") == 0)
4602 threads = 1;
4603 else
4604 return 0;
4605
4606 if (!readbuf || writebuf)
4607 return 0;
4608
4609 if (offset == 0)
4610 {
4611 if (len_avail != -1 && len_avail != 0)
4612 buffer_free (&buffer);
4613 len_avail = 0;
4614 buf = NULL;
4615 buffer_init (&buffer);
4616 if (processes)
4617 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
4618 else if (threads)
4619 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
4620
4621 dirp = opendir ("/proc");
4622 if (dirp)
4623 {
4624 struct dirent *dp;
4625 while ((dp = readdir (dirp)) != NULL)
4626 {
4627 struct stat statbuf;
4628 char procentry[sizeof ("/proc/4294967295")];
4629
4630 if (!isdigit (dp->d_name[0])
4631 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
4632 continue;
4633
4634 sprintf (procentry, "/proc/%s", dp->d_name);
4635 if (stat (procentry, &statbuf) == 0
4636 && S_ISDIR (statbuf.st_mode))
4637 {
4638 int pid = (int) strtoul (dp->d_name, NULL, 10);
4639
4640 if (processes)
4641 {
4642 struct passwd *entry = getpwuid (statbuf.st_uid);
4643 show_process (pid, entry ? entry->pw_name : "?", &buffer);
4644 }
4645 else if (threads)
4646 {
4647 list_threads (pid, &buffer, NULL);
4648 }
4649 }
4650 }
4651
4652 closedir (dirp);
4653 }
4654 buffer_grow_str0 (&buffer, "</osdata>\n");
4655 buf = buffer_finish (&buffer);
4656 len_avail = strlen (buf);
4657 }
4658
4659 if (offset >= len_avail)
4660 {
4661 /* Done. Get rid of the data. */
4662 buffer_free (&buffer);
4663 buf = NULL;
4664 len_avail = 0;
4665 return 0;
4666 }
4667
4668 if (len > len_avail - offset)
4669 len = len_avail - offset;
4670 memcpy (readbuf, buf + offset, len);
4671
4672 return len;
4673 }
4674
4675 /* Convert a native/host siginfo object, into/from the siginfo in the
4676 layout of the inferiors' architecture. */
4677
4678 static void
4679 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4680 {
4681 int done = 0;
4682
4683 if (the_low_target.siginfo_fixup != NULL)
4684 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4685
4686 /* If there was no callback, or the callback didn't do anything,
4687 then just do a straight memcpy. */
4688 if (!done)
4689 {
4690 if (direction == 1)
4691 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4692 else
4693 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4694 }
4695 }
4696
4697 static int
4698 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4699 unsigned const char *writebuf, CORE_ADDR offset, int len)
4700 {
4701 int pid;
4702 struct siginfo siginfo;
4703 char inf_siginfo[sizeof (struct siginfo)];
4704
4705 if (current_inferior == NULL)
4706 return -1;
4707
4708 pid = lwpid_of (get_thread_lwp (current_inferior));
4709
4710 if (debug_threads)
4711 fprintf (stderr, "%s siginfo for lwp %d.\n",
4712 readbuf != NULL ? "Reading" : "Writing",
4713 pid);
4714
4715 if (offset > sizeof (siginfo))
4716 return -1;
4717
4718 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4719 return -1;
4720
4721 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4722 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4723 inferior with a 64-bit GDBSERVER should look the same as debugging it
4724 with a 32-bit GDBSERVER, we need to convert it. */
4725 siginfo_fixup (&siginfo, inf_siginfo, 0);
4726
4727 if (offset + len > sizeof (siginfo))
4728 len = sizeof (siginfo) - offset;
4729
4730 if (readbuf != NULL)
4731 memcpy (readbuf, inf_siginfo + offset, len);
4732 else
4733 {
4734 memcpy (inf_siginfo + offset, writebuf, len);
4735
4736 /* Convert back to ptrace layout before flushing it out. */
4737 siginfo_fixup (&siginfo, inf_siginfo, 1);
4738
4739 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4740 return -1;
4741 }
4742
4743 return len;
4744 }
4745
4746 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4747 so we notice when children change state; as the handler for the
4748 sigsuspend in my_waitpid. */
4749
4750 static void
4751 sigchld_handler (int signo)
4752 {
4753 int old_errno = errno;
4754
4755 if (debug_threads)
4756 {
4757 do
4758 {
4759 /* fprintf is not async-signal-safe, so call write
4760 directly. */
4761 if (write (2, "sigchld_handler\n",
4762 sizeof ("sigchld_handler\n") - 1) < 0)
4763 break; /* just ignore */
4764 } while (0);
4765 }
4766
4767 if (target_is_async_p ())
4768 async_file_mark (); /* trigger a linux_wait */
4769
4770 errno = old_errno;
4771 }
4772
4773 static int
4774 linux_supports_non_stop (void)
4775 {
4776 return 1;
4777 }
4778
4779 static int
4780 linux_async (int enable)
4781 {
4782 int previous = (linux_event_pipe[0] != -1);
4783
4784 if (debug_threads)
4785 fprintf (stderr, "linux_async (%d), previous=%d\n",
4786 enable, previous);
4787
4788 if (previous != enable)
4789 {
4790 sigset_t mask;
4791 sigemptyset (&mask);
4792 sigaddset (&mask, SIGCHLD);
4793
4794 sigprocmask (SIG_BLOCK, &mask, NULL);
4795
4796 if (enable)
4797 {
4798 if (pipe (linux_event_pipe) == -1)
4799 fatal ("creating event pipe failed.");
4800
4801 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4802 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4803
4804 /* Register the event loop handler. */
4805 add_file_handler (linux_event_pipe[0],
4806 handle_target_event, NULL);
4807
4808 /* Always trigger a linux_wait. */
4809 async_file_mark ();
4810 }
4811 else
4812 {
4813 delete_file_handler (linux_event_pipe[0]);
4814
4815 close (linux_event_pipe[0]);
4816 close (linux_event_pipe[1]);
4817 linux_event_pipe[0] = -1;
4818 linux_event_pipe[1] = -1;
4819 }
4820
4821 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4822 }
4823
4824 return previous;
4825 }
4826
4827 static int
4828 linux_start_non_stop (int nonstop)
4829 {
4830 /* Register or unregister from event-loop accordingly. */
4831 linux_async (nonstop);
4832 return 0;
4833 }
4834
4835 static int
4836 linux_supports_multi_process (void)
4837 {
4838 return 1;
4839 }
4840
4841
4842 /* Enumerate spufs IDs for process PID. */
4843 static int
4844 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4845 {
4846 int pos = 0;
4847 int written = 0;
4848 char path[128];
4849 DIR *dir;
4850 struct dirent *entry;
4851
4852 sprintf (path, "/proc/%ld/fd", pid);
4853 dir = opendir (path);
4854 if (!dir)
4855 return -1;
4856
4857 rewinddir (dir);
4858 while ((entry = readdir (dir)) != NULL)
4859 {
4860 struct stat st;
4861 struct statfs stfs;
4862 int fd;
4863
4864 fd = atoi (entry->d_name);
4865 if (!fd)
4866 continue;
4867
4868 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4869 if (stat (path, &st) != 0)
4870 continue;
4871 if (!S_ISDIR (st.st_mode))
4872 continue;
4873
4874 if (statfs (path, &stfs) != 0)
4875 continue;
4876 if (stfs.f_type != SPUFS_MAGIC)
4877 continue;
4878
4879 if (pos >= offset && pos + 4 <= offset + len)
4880 {
4881 *(unsigned int *)(buf + pos - offset) = fd;
4882 written += 4;
4883 }
4884 pos += 4;
4885 }
4886
4887 closedir (dir);
4888 return written;
4889 }
4890
4891 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4892 object type, using the /proc file system. */
4893 static int
4894 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4895 unsigned const char *writebuf,
4896 CORE_ADDR offset, int len)
4897 {
4898 long pid = lwpid_of (get_thread_lwp (current_inferior));
4899 char buf[128];
4900 int fd = 0;
4901 int ret = 0;
4902
4903 if (!writebuf && !readbuf)
4904 return -1;
4905
4906 if (!*annex)
4907 {
4908 if (!readbuf)
4909 return -1;
4910 else
4911 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4912 }
4913
4914 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4915 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4916 if (fd <= 0)
4917 return -1;
4918
4919 if (offset != 0
4920 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4921 {
4922 close (fd);
4923 return 0;
4924 }
4925
4926 if (writebuf)
4927 ret = write (fd, writebuf, (size_t) len);
4928 else
4929 ret = read (fd, readbuf, (size_t) len);
4930
4931 close (fd);
4932 return ret;
4933 }
4934
4935 static int
4936 linux_core_of_thread (ptid_t ptid)
4937 {
4938 char filename[sizeof ("/proc//task//stat")
4939 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4940 + 1];
4941 FILE *f;
4942 char *content = NULL;
4943 char *p;
4944 char *ts = 0;
4945 int content_read = 0;
4946 int i;
4947 int core;
4948
4949 sprintf (filename, "/proc/%d/task/%ld/stat",
4950 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4951 f = fopen (filename, "r");
4952 if (!f)
4953 return -1;
4954
4955 for (;;)
4956 {
4957 int n;
4958 content = realloc (content, content_read + 1024);
4959 n = fread (content + content_read, 1, 1024, f);
4960 content_read += n;
4961 if (n < 1024)
4962 {
4963 content[content_read] = '\0';
4964 break;
4965 }
4966 }
4967
4968 p = strchr (content, '(');
4969
4970 /* Skip ")". */
4971 if (p != NULL)
4972 p = strchr (p, ')');
4973 if (p != NULL)
4974 p++;
4975
4976 /* If the first field after program name has index 0, then core number is
4977 the field with index 36. There's no constant for that anywhere. */
4978 if (p != NULL)
4979 p = strtok_r (p, " ", &ts);
4980 for (i = 0; p != NULL && i != 36; ++i)
4981 p = strtok_r (NULL, " ", &ts);
4982
4983 if (p == NULL || sscanf (p, "%d", &core) == 0)
4984 core = -1;
4985
4986 free (content);
4987 fclose (f);
4988
4989 return core;
4990 }
4991
4992 static void
4993 linux_process_qsupported (const char *query)
4994 {
4995 if (the_low_target.process_qsupported != NULL)
4996 the_low_target.process_qsupported (query);
4997 }
4998
4999 static int
5000 linux_supports_tracepoints (void)
5001 {
5002 if (*the_low_target.supports_tracepoints == NULL)
5003 return 0;
5004
5005 return (*the_low_target.supports_tracepoints) ();
5006 }
5007
5008 static CORE_ADDR
5009 linux_read_pc (struct regcache *regcache)
5010 {
5011 if (the_low_target.get_pc == NULL)
5012 return 0;
5013
5014 return (*the_low_target.get_pc) (regcache);
5015 }
5016
5017 static void
5018 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5019 {
5020 gdb_assert (the_low_target.set_pc != NULL);
5021
5022 (*the_low_target.set_pc) (regcache, pc);
5023 }
5024
5025 static int
5026 linux_thread_stopped (struct thread_info *thread)
5027 {
5028 return get_thread_lwp (thread)->stopped;
5029 }
5030
5031 /* This exposes stop-all-threads functionality to other modules. */
5032
5033 static void
5034 linux_pause_all (int freeze)
5035 {
5036 stop_all_lwps (freeze, NULL);
5037 }
5038
5039 /* This exposes unstop-all-threads functionality to other gdbserver
5040 modules. */
5041
5042 static void
5043 linux_unpause_all (int unfreeze)
5044 {
5045 unstop_all_lwps (unfreeze, NULL);
5046 }
5047
5048 static int
5049 linux_prepare_to_access_memory (void)
5050 {
5051 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5052 running LWP. */
5053 if (non_stop)
5054 linux_pause_all (1);
5055 return 0;
5056 }
5057
5058 static void
5059 linux_done_accessing_memory (void)
5060 {
5061 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5062 running LWP. */
5063 if (non_stop)
5064 linux_unpause_all (1);
5065 }
5066
5067 static int
5068 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5069 CORE_ADDR collector,
5070 CORE_ADDR lockaddr,
5071 ULONGEST orig_size,
5072 CORE_ADDR *jump_entry,
5073 unsigned char *jjump_pad_insn,
5074 ULONGEST *jjump_pad_insn_size,
5075 CORE_ADDR *adjusted_insn_addr,
5076 CORE_ADDR *adjusted_insn_addr_end)
5077 {
5078 return (*the_low_target.install_fast_tracepoint_jump_pad)
5079 (tpoint, tpaddr, collector, lockaddr, orig_size,
5080 jump_entry, jjump_pad_insn, jjump_pad_insn_size,
5081 adjusted_insn_addr, adjusted_insn_addr_end);
5082 }
5083
5084 static struct emit_ops *
5085 linux_emit_ops (void)
5086 {
5087 if (the_low_target.emit_ops != NULL)
5088 return (*the_low_target.emit_ops) ();
5089 else
5090 return NULL;
5091 }
5092
5093 static struct target_ops linux_target_ops = {
5094 linux_create_inferior,
5095 linux_attach,
5096 linux_kill,
5097 linux_detach,
5098 linux_mourn,
5099 linux_join,
5100 linux_thread_alive,
5101 linux_resume,
5102 linux_wait,
5103 linux_fetch_registers,
5104 linux_store_registers,
5105 linux_prepare_to_access_memory,
5106 linux_done_accessing_memory,
5107 linux_read_memory,
5108 linux_write_memory,
5109 linux_look_up_symbols,
5110 linux_request_interrupt,
5111 linux_read_auxv,
5112 linux_insert_point,
5113 linux_remove_point,
5114 linux_stopped_by_watchpoint,
5115 linux_stopped_data_address,
5116 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5117 linux_read_offsets,
5118 #else
5119 NULL,
5120 #endif
5121 #ifdef USE_THREAD_DB
5122 thread_db_get_tls_address,
5123 #else
5124 NULL,
5125 #endif
5126 linux_qxfer_spu,
5127 hostio_last_error_from_errno,
5128 linux_qxfer_osdata,
5129 linux_xfer_siginfo,
5130 linux_supports_non_stop,
5131 linux_async,
5132 linux_start_non_stop,
5133 linux_supports_multi_process,
5134 #ifdef USE_THREAD_DB
5135 thread_db_handle_monitor_command,
5136 #else
5137 NULL,
5138 #endif
5139 linux_core_of_thread,
5140 linux_process_qsupported,
5141 linux_supports_tracepoints,
5142 linux_read_pc,
5143 linux_write_pc,
5144 linux_thread_stopped,
5145 NULL,
5146 linux_pause_all,
5147 linux_unpause_all,
5148 linux_cancel_breakpoints,
5149 linux_stabilize_threads,
5150 linux_install_fast_tracepoint_jump_pad,
5151 linux_emit_ops
5152 };
5153
5154 static void
5155 linux_init_signals ()
5156 {
5157 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5158 to find what the cancel signal actually is. */
5159 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5160 signal (__SIGRTMIN+1, SIG_IGN);
5161 #endif
5162 }
5163
5164 void
5165 initialize_low (void)
5166 {
5167 struct sigaction sigchld_action;
5168 memset (&sigchld_action, 0, sizeof (sigchld_action));
5169 set_target_ops (&linux_target_ops);
5170 set_breakpoint_data (the_low_target.breakpoint,
5171 the_low_target.breakpoint_len);
5172 linux_init_signals ();
5173 linux_test_for_tracefork ();
5174 #ifdef HAVE_LINUX_REGSETS
5175 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5176 ;
5177 disabled_regsets = xmalloc (num_regsets);
5178 #endif
5179
5180 sigchld_action.sa_handler = sigchld_handler;
5181 sigemptyset (&sigchld_action.sa_mask);
5182 sigchld_action.sa_flags = SA_RESTART;
5183 sigaction (SIGCHLD, &sigchld_action, NULL);
5184 }
This page took 0.146695 seconds and 4 git commands to generate.