59187ee68f8b959f9428214999e4c68d5c92b8df
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #ifndef ELFMAG0
43 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
44 then ELFMAG0 will have been defined. If it didn't get included by
45 gdb_proc_service.h then including it will likely introduce a duplicate
46 definition of elf_fpregset_t. */
47 #include <elf.h>
48 #endif
49
50 #ifndef SPUFS_MAGIC
51 #define SPUFS_MAGIC 0x23c9b64e
52 #endif
53
54 #ifndef PTRACE_GETSIGINFO
55 # define PTRACE_GETSIGINFO 0x4202
56 # define PTRACE_SETSIGINFO 0x4203
57 #endif
58
59 #ifndef O_LARGEFILE
60 #define O_LARGEFILE 0
61 #endif
62
63 /* If the system headers did not provide the constants, hard-code the normal
64 values. */
65 #ifndef PTRACE_EVENT_FORK
66
67 #define PTRACE_SETOPTIONS 0x4200
68 #define PTRACE_GETEVENTMSG 0x4201
69
70 /* options set using PTRACE_SETOPTIONS */
71 #define PTRACE_O_TRACESYSGOOD 0x00000001
72 #define PTRACE_O_TRACEFORK 0x00000002
73 #define PTRACE_O_TRACEVFORK 0x00000004
74 #define PTRACE_O_TRACECLONE 0x00000008
75 #define PTRACE_O_TRACEEXEC 0x00000010
76 #define PTRACE_O_TRACEVFORKDONE 0x00000020
77 #define PTRACE_O_TRACEEXIT 0x00000040
78
79 /* Wait extended result codes for the above trace options. */
80 #define PTRACE_EVENT_FORK 1
81 #define PTRACE_EVENT_VFORK 2
82 #define PTRACE_EVENT_CLONE 3
83 #define PTRACE_EVENT_EXEC 4
84 #define PTRACE_EVENT_VFORK_DONE 5
85 #define PTRACE_EVENT_EXIT 6
86
87 #endif /* PTRACE_EVENT_FORK */
88
89 /* We can't always assume that this flag is available, but all systems
90 with the ptrace event handlers also have __WALL, so it's safe to use
91 in some contexts. */
92 #ifndef __WALL
93 #define __WALL 0x40000000 /* Wait for any child. */
94 #endif
95
96 #ifndef W_STOPCODE
97 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
98 #endif
99
100 #ifdef __UCLIBC__
101 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
102 #define HAS_NOMMU
103 #endif
104 #endif
105
106 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
107 representation of the thread ID.
108
109 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
110 the same as the LWP ID.
111
112 ``all_processes'' is keyed by the "overall process ID", which
113 GNU/Linux calls tgid, "thread group ID". */
114
115 struct inferior_list all_lwps;
116
117 /* A list of all unknown processes which receive stop signals. Some other
118 process will presumably claim each of these as forked children
119 momentarily. */
120
121 struct inferior_list stopped_pids;
122
123 /* FIXME this is a bit of a hack, and could be removed. */
124 int stopping_threads;
125
126 /* FIXME make into a target method? */
127 int using_threads = 1;
128
129 /* This flag is true iff we've just created or attached to our first
130 inferior but it has not stopped yet. As soon as it does, we need
131 to call the low target's arch_setup callback. Doing this only on
132 the first inferior avoids reinializing the architecture on every
133 inferior, and avoids messing with the register caches of the
134 already running inferiors. NOTE: this assumes all inferiors under
135 control of gdbserver have the same architecture. */
136 static int new_inferior;
137
138 static void linux_resume_one_lwp (struct lwp_info *lwp,
139 int step, int signal, siginfo_t *info);
140 static void linux_resume (struct thread_resume *resume_info, size_t n);
141 static void stop_all_lwps (void);
142 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
143 static void *add_lwp (ptid_t ptid);
144 static int linux_stopped_by_watchpoint (void);
145 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
146 static int linux_core_of_thread (ptid_t ptid);
147 static void proceed_all_lwps (void);
148 static void unstop_all_lwps (struct lwp_info *except);
149 static int finish_step_over (struct lwp_info *lwp);
150 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
151 static int kill_lwp (unsigned long lwpid, int signo);
152
153 /* True if the low target can hardware single-step. Such targets
154 don't need a BREAKPOINT_REINSERT_ADDR callback. */
155
156 static int
157 can_hardware_single_step (void)
158 {
159 return (the_low_target.breakpoint_reinsert_addr == NULL);
160 }
161
162 /* True if the low target supports memory breakpoints. If so, we'll
163 have a GET_PC implementation. */
164
165 static int
166 supports_breakpoints (void)
167 {
168 return (the_low_target.get_pc != NULL);
169 }
170
171 struct pending_signals
172 {
173 int signal;
174 siginfo_t info;
175 struct pending_signals *prev;
176 };
177
178 #define PTRACE_ARG3_TYPE void *
179 #define PTRACE_ARG4_TYPE void *
180 #define PTRACE_XFER_TYPE long
181
182 #ifdef HAVE_LINUX_REGSETS
183 static char *disabled_regsets;
184 static int num_regsets;
185 #endif
186
187 /* The read/write ends of the pipe registered as waitable file in the
188 event loop. */
189 static int linux_event_pipe[2] = { -1, -1 };
190
191 /* True if we're currently in async mode. */
192 #define target_is_async_p() (linux_event_pipe[0] != -1)
193
194 static void send_sigstop (struct inferior_list_entry *entry);
195 static void wait_for_sigstop (struct inferior_list_entry *entry);
196
197 /* Accepts an integer PID; Returns a string representing a file that
198 can be opened to get info for the child process.
199 Space for the result is malloc'd, caller must free. */
200
201 char *
202 linux_child_pid_to_exec_file (int pid)
203 {
204 char *name1, *name2;
205
206 name1 = xmalloc (MAXPATHLEN);
207 name2 = xmalloc (MAXPATHLEN);
208 memset (name2, 0, MAXPATHLEN);
209
210 sprintf (name1, "/proc/%d/exe", pid);
211 if (readlink (name1, name2, MAXPATHLEN) > 0)
212 {
213 free (name1);
214 return name2;
215 }
216 else
217 {
218 free (name2);
219 return name1;
220 }
221 }
222
223 /* Return non-zero if HEADER is a 64-bit ELF file. */
224
225 static int
226 elf_64_header_p (const Elf64_Ehdr *header)
227 {
228 return (header->e_ident[EI_MAG0] == ELFMAG0
229 && header->e_ident[EI_MAG1] == ELFMAG1
230 && header->e_ident[EI_MAG2] == ELFMAG2
231 && header->e_ident[EI_MAG3] == ELFMAG3
232 && header->e_ident[EI_CLASS] == ELFCLASS64);
233 }
234
235 /* Return non-zero if FILE is a 64-bit ELF file,
236 zero if the file is not a 64-bit ELF file,
237 and -1 if the file is not accessible or doesn't exist. */
238
239 int
240 elf_64_file_p (const char *file)
241 {
242 Elf64_Ehdr header;
243 int fd;
244
245 fd = open (file, O_RDONLY);
246 if (fd < 0)
247 return -1;
248
249 if (read (fd, &header, sizeof (header)) != sizeof (header))
250 {
251 close (fd);
252 return 0;
253 }
254 close (fd);
255
256 return elf_64_header_p (&header);
257 }
258
259 static void
260 delete_lwp (struct lwp_info *lwp)
261 {
262 remove_thread (get_lwp_thread (lwp));
263 remove_inferior (&all_lwps, &lwp->head);
264 free (lwp->arch_private);
265 free (lwp);
266 }
267
268 /* Add a process to the common process list, and set its private
269 data. */
270
271 static struct process_info *
272 linux_add_process (int pid, int attached)
273 {
274 struct process_info *proc;
275
276 /* Is this the first process? If so, then set the arch. */
277 if (all_processes.head == NULL)
278 new_inferior = 1;
279
280 proc = add_process (pid, attached);
281 proc->private = xcalloc (1, sizeof (*proc->private));
282
283 if (the_low_target.new_process != NULL)
284 proc->private->arch_private = the_low_target.new_process ();
285
286 return proc;
287 }
288
289 /* Remove a process from the common process list,
290 also freeing all private data. */
291
292 static void
293 linux_remove_process (struct process_info *process)
294 {
295 struct process_info_private *priv = process->private;
296
297 free (priv->arch_private);
298 free (priv);
299 remove_process (process);
300 }
301
302 /* Wrapper function for waitpid which handles EINTR, and emulates
303 __WALL for systems where that is not available. */
304
305 static int
306 my_waitpid (int pid, int *status, int flags)
307 {
308 int ret, out_errno;
309
310 if (debug_threads)
311 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
312
313 if (flags & __WALL)
314 {
315 sigset_t block_mask, org_mask, wake_mask;
316 int wnohang;
317
318 wnohang = (flags & WNOHANG) != 0;
319 flags &= ~(__WALL | __WCLONE);
320 flags |= WNOHANG;
321
322 /* Block all signals while here. This avoids knowing about
323 LinuxThread's signals. */
324 sigfillset (&block_mask);
325 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
326
327 /* ... except during the sigsuspend below. */
328 sigemptyset (&wake_mask);
329
330 while (1)
331 {
332 /* Since all signals are blocked, there's no need to check
333 for EINTR here. */
334 ret = waitpid (pid, status, flags);
335 out_errno = errno;
336
337 if (ret == -1 && out_errno != ECHILD)
338 break;
339 else if (ret > 0)
340 break;
341
342 if (flags & __WCLONE)
343 {
344 /* We've tried both flavors now. If WNOHANG is set,
345 there's nothing else to do, just bail out. */
346 if (wnohang)
347 break;
348
349 if (debug_threads)
350 fprintf (stderr, "blocking\n");
351
352 /* Block waiting for signals. */
353 sigsuspend (&wake_mask);
354 }
355
356 flags ^= __WCLONE;
357 }
358
359 sigprocmask (SIG_SETMASK, &org_mask, NULL);
360 }
361 else
362 {
363 do
364 ret = waitpid (pid, status, flags);
365 while (ret == -1 && errno == EINTR);
366 out_errno = errno;
367 }
368
369 if (debug_threads)
370 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
371 pid, flags, status ? *status : -1, ret);
372
373 errno = out_errno;
374 return ret;
375 }
376
377 /* Handle a GNU/Linux extended wait response. If we see a clone
378 event, we need to add the new LWP to our list (and not report the
379 trap to higher layers). */
380
381 static void
382 handle_extended_wait (struct lwp_info *event_child, int wstat)
383 {
384 int event = wstat >> 16;
385 struct lwp_info *new_lwp;
386
387 if (event == PTRACE_EVENT_CLONE)
388 {
389 ptid_t ptid;
390 unsigned long new_pid;
391 int ret, status = W_STOPCODE (SIGSTOP);
392
393 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
394
395 /* If we haven't already seen the new PID stop, wait for it now. */
396 if (! pull_pid_from_list (&stopped_pids, new_pid))
397 {
398 /* The new child has a pending SIGSTOP. We can't affect it until it
399 hits the SIGSTOP, but we're already attached. */
400
401 ret = my_waitpid (new_pid, &status, __WALL);
402
403 if (ret == -1)
404 perror_with_name ("waiting for new child");
405 else if (ret != new_pid)
406 warning ("wait returned unexpected PID %d", ret);
407 else if (!WIFSTOPPED (status))
408 warning ("wait returned unexpected status 0x%x", status);
409 }
410
411 ptrace (PTRACE_SETOPTIONS, new_pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
412
413 ptid = ptid_build (pid_of (event_child), new_pid, 0);
414 new_lwp = (struct lwp_info *) add_lwp (ptid);
415 add_thread (ptid, new_lwp);
416
417 /* Either we're going to immediately resume the new thread
418 or leave it stopped. linux_resume_one_lwp is a nop if it
419 thinks the thread is currently running, so set this first
420 before calling linux_resume_one_lwp. */
421 new_lwp->stopped = 1;
422
423 /* Normally we will get the pending SIGSTOP. But in some cases
424 we might get another signal delivered to the group first.
425 If we do get another signal, be sure not to lose it. */
426 if (WSTOPSIG (status) == SIGSTOP)
427 {
428 if (stopping_threads)
429 new_lwp->stop_pc = get_stop_pc (new_lwp);
430 else
431 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
432 }
433 else
434 {
435 new_lwp->stop_expected = 1;
436
437 if (stopping_threads)
438 {
439 new_lwp->stop_pc = get_stop_pc (new_lwp);
440 new_lwp->status_pending_p = 1;
441 new_lwp->status_pending = status;
442 }
443 else
444 /* Pass the signal on. This is what GDB does - except
445 shouldn't we really report it instead? */
446 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
447 }
448
449 /* Always resume the current thread. If we are stopping
450 threads, it will have a pending SIGSTOP; we may as well
451 collect it now. */
452 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
453 }
454 }
455
456 /* Return the PC as read from the regcache of LWP, without any
457 adjustment. */
458
459 static CORE_ADDR
460 get_pc (struct lwp_info *lwp)
461 {
462 struct thread_info *saved_inferior;
463 struct regcache *regcache;
464 CORE_ADDR pc;
465
466 if (the_low_target.get_pc == NULL)
467 return 0;
468
469 saved_inferior = current_inferior;
470 current_inferior = get_lwp_thread (lwp);
471
472 regcache = get_thread_regcache (current_inferior, 1);
473 pc = (*the_low_target.get_pc) (regcache);
474
475 if (debug_threads)
476 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
477
478 current_inferior = saved_inferior;
479 return pc;
480 }
481
482 /* This function should only be called if LWP got a SIGTRAP.
483 The SIGTRAP could mean several things.
484
485 On i386, where decr_pc_after_break is non-zero:
486 If we were single-stepping this process using PTRACE_SINGLESTEP,
487 we will get only the one SIGTRAP (even if the instruction we
488 stepped over was a breakpoint). The value of $eip will be the
489 next instruction.
490 If we continue the process using PTRACE_CONT, we will get a
491 SIGTRAP when we hit a breakpoint. The value of $eip will be
492 the instruction after the breakpoint (i.e. needs to be
493 decremented). If we report the SIGTRAP to GDB, we must also
494 report the undecremented PC. If we cancel the SIGTRAP, we
495 must resume at the decremented PC.
496
497 (Presumably, not yet tested) On a non-decr_pc_after_break machine
498 with hardware or kernel single-step:
499 If we single-step over a breakpoint instruction, our PC will
500 point at the following instruction. If we continue and hit a
501 breakpoint instruction, our PC will point at the breakpoint
502 instruction. */
503
504 static CORE_ADDR
505 get_stop_pc (struct lwp_info *lwp)
506 {
507 CORE_ADDR stop_pc;
508
509 if (the_low_target.get_pc == NULL)
510 return 0;
511
512 stop_pc = get_pc (lwp);
513
514 if (WSTOPSIG (lwp->last_status) == SIGTRAP
515 && !lwp->stepping
516 && !lwp->stopped_by_watchpoint
517 && lwp->last_status >> 16 == 0)
518 stop_pc -= the_low_target.decr_pc_after_break;
519
520 if (debug_threads)
521 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
522
523 return stop_pc;
524 }
525
526 static void *
527 add_lwp (ptid_t ptid)
528 {
529 struct lwp_info *lwp;
530
531 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
532 memset (lwp, 0, sizeof (*lwp));
533
534 lwp->head.id = ptid;
535
536 lwp->last_resume_kind = resume_continue;
537
538 if (the_low_target.new_thread != NULL)
539 lwp->arch_private = the_low_target.new_thread ();
540
541 add_inferior_to_list (&all_lwps, &lwp->head);
542
543 return lwp;
544 }
545
546 /* Start an inferior process and returns its pid.
547 ALLARGS is a vector of program-name and args. */
548
549 static int
550 linux_create_inferior (char *program, char **allargs)
551 {
552 struct lwp_info *new_lwp;
553 int pid;
554 ptid_t ptid;
555
556 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
557 pid = vfork ();
558 #else
559 pid = fork ();
560 #endif
561 if (pid < 0)
562 perror_with_name ("fork");
563
564 if (pid == 0)
565 {
566 ptrace (PTRACE_TRACEME, 0, 0, 0);
567
568 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
569 signal (__SIGRTMIN + 1, SIG_DFL);
570 #endif
571
572 setpgid (0, 0);
573
574 execv (program, allargs);
575 if (errno == ENOENT)
576 execvp (program, allargs);
577
578 fprintf (stderr, "Cannot exec %s: %s.\n", program,
579 strerror (errno));
580 fflush (stderr);
581 _exit (0177);
582 }
583
584 linux_add_process (pid, 0);
585
586 ptid = ptid_build (pid, pid, 0);
587 new_lwp = add_lwp (ptid);
588 add_thread (ptid, new_lwp);
589 new_lwp->must_set_ptrace_flags = 1;
590
591 return pid;
592 }
593
594 /* Attach to an inferior process. */
595
596 static void
597 linux_attach_lwp_1 (unsigned long lwpid, int initial)
598 {
599 ptid_t ptid;
600 struct lwp_info *new_lwp;
601
602 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
603 {
604 if (!initial)
605 {
606 /* If we fail to attach to an LWP, just warn. */
607 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
608 strerror (errno), errno);
609 fflush (stderr);
610 return;
611 }
612 else
613 /* If we fail to attach to a process, report an error. */
614 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
615 strerror (errno), errno);
616 }
617
618 if (initial)
619 /* NOTE/FIXME: This lwp might have not been the tgid. */
620 ptid = ptid_build (lwpid, lwpid, 0);
621 else
622 {
623 /* Note that extracting the pid from the current inferior is
624 safe, since we're always called in the context of the same
625 process as this new thread. */
626 int pid = pid_of (get_thread_lwp (current_inferior));
627 ptid = ptid_build (pid, lwpid, 0);
628 }
629
630 new_lwp = (struct lwp_info *) add_lwp (ptid);
631 add_thread (ptid, new_lwp);
632
633 /* We need to wait for SIGSTOP before being able to make the next
634 ptrace call on this LWP. */
635 new_lwp->must_set_ptrace_flags = 1;
636
637 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
638 brings it to a halt.
639
640 There are several cases to consider here:
641
642 1) gdbserver has already attached to the process and is being notified
643 of a new thread that is being created.
644 In this case we should ignore that SIGSTOP and resume the
645 process. This is handled below by setting stop_expected = 1,
646 and the fact that add_lwp sets last_resume_kind ==
647 resume_continue.
648
649 2) This is the first thread (the process thread), and we're attaching
650 to it via attach_inferior.
651 In this case we want the process thread to stop.
652 This is handled by having linux_attach set last_resume_kind ==
653 resume_stop after we return.
654 ??? If the process already has several threads we leave the other
655 threads running.
656
657 3) GDB is connecting to gdbserver and is requesting an enumeration of all
658 existing threads.
659 In this case we want the thread to stop.
660 FIXME: This case is currently not properly handled.
661 We should wait for the SIGSTOP but don't. Things work apparently
662 because enough time passes between when we ptrace (ATTACH) and when
663 gdb makes the next ptrace call on the thread.
664
665 On the other hand, if we are currently trying to stop all threads, we
666 should treat the new thread as if we had sent it a SIGSTOP. This works
667 because we are guaranteed that the add_lwp call above added us to the
668 end of the list, and so the new thread has not yet reached
669 wait_for_sigstop (but will). */
670 new_lwp->stop_expected = 1;
671 }
672
673 void
674 linux_attach_lwp (unsigned long lwpid)
675 {
676 linux_attach_lwp_1 (lwpid, 0);
677 }
678
679 int
680 linux_attach (unsigned long pid)
681 {
682 struct lwp_info *lwp;
683
684 linux_attach_lwp_1 (pid, 1);
685
686 linux_add_process (pid, 1);
687
688 if (!non_stop)
689 {
690 /* Don't ignore the initial SIGSTOP if we just attached to this
691 process. It will be collected by wait shortly. */
692 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
693 ptid_build (pid, pid, 0));
694 lwp->last_resume_kind = resume_stop;
695 }
696
697 return 0;
698 }
699
700 struct counter
701 {
702 int pid;
703 int count;
704 };
705
706 static int
707 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
708 {
709 struct counter *counter = args;
710
711 if (ptid_get_pid (entry->id) == counter->pid)
712 {
713 if (++counter->count > 1)
714 return 1;
715 }
716
717 return 0;
718 }
719
720 static int
721 last_thread_of_process_p (struct thread_info *thread)
722 {
723 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
724 int pid = ptid_get_pid (ptid);
725 struct counter counter = { pid , 0 };
726
727 return (find_inferior (&all_threads,
728 second_thread_of_pid_p, &counter) == NULL);
729 }
730
731 /* Kill the inferior lwp. */
732
733 static int
734 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
735 {
736 struct thread_info *thread = (struct thread_info *) entry;
737 struct lwp_info *lwp = get_thread_lwp (thread);
738 int wstat;
739 int pid = * (int *) args;
740
741 if (ptid_get_pid (entry->id) != pid)
742 return 0;
743
744 /* We avoid killing the first thread here, because of a Linux kernel (at
745 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
746 the children get a chance to be reaped, it will remain a zombie
747 forever. */
748
749 if (lwpid_of (lwp) == pid)
750 {
751 if (debug_threads)
752 fprintf (stderr, "lkop: is last of process %s\n",
753 target_pid_to_str (entry->id));
754 return 0;
755 }
756
757 /* If we're killing a running inferior, make sure it is stopped
758 first, as PTRACE_KILL will not work otherwise. */
759 if (!lwp->stopped)
760 send_sigstop (&lwp->head);
761
762 do
763 {
764 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
765
766 /* Make sure it died. The loop is most likely unnecessary. */
767 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
768 } while (pid > 0 && WIFSTOPPED (wstat));
769
770 return 0;
771 }
772
773 static int
774 linux_kill (int pid)
775 {
776 struct process_info *process;
777 struct lwp_info *lwp;
778 struct thread_info *thread;
779 int wstat;
780 int lwpid;
781
782 process = find_process_pid (pid);
783 if (process == NULL)
784 return -1;
785
786 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
787
788 /* See the comment in linux_kill_one_lwp. We did not kill the first
789 thread in the list, so do so now. */
790 lwp = find_lwp_pid (pid_to_ptid (pid));
791 thread = get_lwp_thread (lwp);
792
793 if (debug_threads)
794 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
795 lwpid_of (lwp), pid);
796
797 /* If we're killing a running inferior, make sure it is stopped
798 first, as PTRACE_KILL will not work otherwise. */
799 if (!lwp->stopped)
800 send_sigstop (&lwp->head);
801
802 do
803 {
804 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
805
806 /* Make sure it died. The loop is most likely unnecessary. */
807 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
808 } while (lwpid > 0 && WIFSTOPPED (wstat));
809
810 #ifdef USE_THREAD_DB
811 thread_db_free (process, 0);
812 #endif
813 delete_lwp (lwp);
814 linux_remove_process (process);
815 return 0;
816 }
817
818 static int
819 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
820 {
821 struct thread_info *thread = (struct thread_info *) entry;
822 struct lwp_info *lwp = get_thread_lwp (thread);
823 int pid = * (int *) args;
824
825 if (ptid_get_pid (entry->id) != pid)
826 return 0;
827
828 /* If we're detaching from a running inferior, make sure it is
829 stopped first, as PTRACE_DETACH will not work otherwise. */
830 if (!lwp->stopped)
831 {
832 int lwpid = lwpid_of (lwp);
833
834 stopping_threads = 1;
835 send_sigstop (&lwp->head);
836
837 /* If this detects a new thread through a clone event, the new
838 thread is appended to the end of the lwp list, so we'll
839 eventually detach from it. */
840 wait_for_sigstop (&lwp->head);
841 stopping_threads = 0;
842
843 /* If LWP exits while we're trying to stop it, there's nothing
844 left to do. */
845 lwp = find_lwp_pid (pid_to_ptid (lwpid));
846 if (lwp == NULL)
847 return 0;
848 }
849
850 /* If this process is stopped but is expecting a SIGSTOP, then make
851 sure we take care of that now. This isn't absolutely guaranteed
852 to collect the SIGSTOP, but is fairly likely to. */
853 if (lwp->stop_expected)
854 {
855 int wstat;
856 /* Clear stop_expected, so that the SIGSTOP will be reported. */
857 lwp->stop_expected = 0;
858 if (lwp->stopped)
859 linux_resume_one_lwp (lwp, 0, 0, NULL);
860 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
861 }
862
863 /* Flush any pending changes to the process's registers. */
864 regcache_invalidate_one ((struct inferior_list_entry *)
865 get_lwp_thread (lwp));
866
867 /* Finally, let it resume. */
868 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
869
870 delete_lwp (lwp);
871 return 0;
872 }
873
874 static int
875 any_thread_of (struct inferior_list_entry *entry, void *args)
876 {
877 int *pid_p = args;
878
879 if (ptid_get_pid (entry->id) == *pid_p)
880 return 1;
881
882 return 0;
883 }
884
885 static int
886 linux_detach (int pid)
887 {
888 struct process_info *process;
889
890 process = find_process_pid (pid);
891 if (process == NULL)
892 return -1;
893
894 #ifdef USE_THREAD_DB
895 thread_db_free (process, 1);
896 #endif
897
898 current_inferior =
899 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
900
901 delete_all_breakpoints ();
902 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
903 linux_remove_process (process);
904 return 0;
905 }
906
907 static void
908 linux_join (int pid)
909 {
910 int status, ret;
911 struct process_info *process;
912
913 process = find_process_pid (pid);
914 if (process == NULL)
915 return;
916
917 do {
918 ret = my_waitpid (pid, &status, 0);
919 if (WIFEXITED (status) || WIFSIGNALED (status))
920 break;
921 } while (ret != -1 || errno != ECHILD);
922 }
923
924 /* Return nonzero if the given thread is still alive. */
925 static int
926 linux_thread_alive (ptid_t ptid)
927 {
928 struct lwp_info *lwp = find_lwp_pid (ptid);
929
930 /* We assume we always know if a thread exits. If a whole process
931 exited but we still haven't been able to report it to GDB, we'll
932 hold on to the last lwp of the dead process. */
933 if (lwp != NULL)
934 return !lwp->dead;
935 else
936 return 0;
937 }
938
939 /* Return 1 if this lwp has an interesting status pending. */
940 static int
941 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
942 {
943 struct lwp_info *lwp = (struct lwp_info *) entry;
944 ptid_t ptid = * (ptid_t *) arg;
945 struct thread_info *thread = get_lwp_thread (lwp);
946
947 /* Check if we're only interested in events from a specific process
948 or its lwps. */
949 if (!ptid_equal (minus_one_ptid, ptid)
950 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
951 return 0;
952
953 thread = get_lwp_thread (lwp);
954
955 /* If we got a `vCont;t', but we haven't reported a stop yet, do
956 report any status pending the LWP may have. */
957 if (lwp->last_resume_kind == resume_stop
958 && thread->last_status.kind == TARGET_WAITKIND_STOPPED)
959 return 0;
960
961 return lwp->status_pending_p;
962 }
963
964 static int
965 same_lwp (struct inferior_list_entry *entry, void *data)
966 {
967 ptid_t ptid = *(ptid_t *) data;
968 int lwp;
969
970 if (ptid_get_lwp (ptid) != 0)
971 lwp = ptid_get_lwp (ptid);
972 else
973 lwp = ptid_get_pid (ptid);
974
975 if (ptid_get_lwp (entry->id) == lwp)
976 return 1;
977
978 return 0;
979 }
980
981 struct lwp_info *
982 find_lwp_pid (ptid_t ptid)
983 {
984 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
985 }
986
987 static struct lwp_info *
988 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
989 {
990 int ret;
991 int to_wait_for = -1;
992 struct lwp_info *child = NULL;
993
994 if (debug_threads)
995 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
996
997 if (ptid_equal (ptid, minus_one_ptid))
998 to_wait_for = -1; /* any child */
999 else
1000 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1001
1002 options |= __WALL;
1003
1004 retry:
1005
1006 ret = my_waitpid (to_wait_for, wstatp, options);
1007 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1008 return NULL;
1009 else if (ret == -1)
1010 perror_with_name ("waitpid");
1011
1012 if (debug_threads
1013 && (!WIFSTOPPED (*wstatp)
1014 || (WSTOPSIG (*wstatp) != 32
1015 && WSTOPSIG (*wstatp) != 33)))
1016 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1017
1018 child = find_lwp_pid (pid_to_ptid (ret));
1019
1020 /* If we didn't find a process, one of two things presumably happened:
1021 - A process we started and then detached from has exited. Ignore it.
1022 - A process we are controlling has forked and the new child's stop
1023 was reported to us by the kernel. Save its PID. */
1024 if (child == NULL && WIFSTOPPED (*wstatp))
1025 {
1026 add_pid_to_list (&stopped_pids, ret);
1027 goto retry;
1028 }
1029 else if (child == NULL)
1030 goto retry;
1031
1032 child->stopped = 1;
1033
1034 child->last_status = *wstatp;
1035
1036 /* Architecture-specific setup after inferior is running.
1037 This needs to happen after we have attached to the inferior
1038 and it is stopped for the first time, but before we access
1039 any inferior registers. */
1040 if (new_inferior)
1041 {
1042 the_low_target.arch_setup ();
1043 #ifdef HAVE_LINUX_REGSETS
1044 memset (disabled_regsets, 0, num_regsets);
1045 #endif
1046 new_inferior = 0;
1047 }
1048
1049 /* Fetch the possibly triggered data watchpoint info and store it in
1050 CHILD.
1051
1052 On some archs, like x86, that use debug registers to set
1053 watchpoints, it's possible that the way to know which watched
1054 address trapped, is to check the register that is used to select
1055 which address to watch. Problem is, between setting the
1056 watchpoint and reading back which data address trapped, the user
1057 may change the set of watchpoints, and, as a consequence, GDB
1058 changes the debug registers in the inferior. To avoid reading
1059 back a stale stopped-data-address when that happens, we cache in
1060 LP the fact that a watchpoint trapped, and the corresponding data
1061 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1062 changes the debug registers meanwhile, we have the cached data we
1063 can rely on. */
1064
1065 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1066 {
1067 if (the_low_target.stopped_by_watchpoint == NULL)
1068 {
1069 child->stopped_by_watchpoint = 0;
1070 }
1071 else
1072 {
1073 struct thread_info *saved_inferior;
1074
1075 saved_inferior = current_inferior;
1076 current_inferior = get_lwp_thread (child);
1077
1078 child->stopped_by_watchpoint
1079 = the_low_target.stopped_by_watchpoint ();
1080
1081 if (child->stopped_by_watchpoint)
1082 {
1083 if (the_low_target.stopped_data_address != NULL)
1084 child->stopped_data_address
1085 = the_low_target.stopped_data_address ();
1086 else
1087 child->stopped_data_address = 0;
1088 }
1089
1090 current_inferior = saved_inferior;
1091 }
1092 }
1093
1094 /* Store the STOP_PC, with adjustment applied. This depends on the
1095 architecture being defined already (so that CHILD has a valid
1096 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1097 not). */
1098 if (WIFSTOPPED (*wstatp))
1099 child->stop_pc = get_stop_pc (child);
1100
1101 if (debug_threads
1102 && WIFSTOPPED (*wstatp)
1103 && the_low_target.get_pc != NULL)
1104 {
1105 struct thread_info *saved_inferior = current_inferior;
1106 struct regcache *regcache;
1107 CORE_ADDR pc;
1108
1109 current_inferior = get_lwp_thread (child);
1110 regcache = get_thread_regcache (current_inferior, 1);
1111 pc = (*the_low_target.get_pc) (regcache);
1112 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1113 current_inferior = saved_inferior;
1114 }
1115
1116 return child;
1117 }
1118
1119 /* Arrange for a breakpoint to be hit again later. We don't keep the
1120 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1121 will handle the current event, eventually we will resume this LWP,
1122 and this breakpoint will trap again. */
1123
1124 static int
1125 cancel_breakpoint (struct lwp_info *lwp)
1126 {
1127 struct thread_info *saved_inferior;
1128 struct regcache *regcache;
1129
1130 /* There's nothing to do if we don't support breakpoints. */
1131 if (!supports_breakpoints ())
1132 return 0;
1133
1134 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1135
1136 /* breakpoint_at reads from current inferior. */
1137 saved_inferior = current_inferior;
1138 current_inferior = get_lwp_thread (lwp);
1139
1140 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1141 {
1142 if (debug_threads)
1143 fprintf (stderr,
1144 "CB: Push back breakpoint for %s\n",
1145 target_pid_to_str (lwp->head.id));
1146
1147 /* Back up the PC if necessary. */
1148 if (the_low_target.decr_pc_after_break)
1149 {
1150 struct regcache *regcache
1151 = get_thread_regcache (get_lwp_thread (lwp), 1);
1152 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1153 }
1154
1155 current_inferior = saved_inferior;
1156 return 1;
1157 }
1158 else
1159 {
1160 if (debug_threads)
1161 fprintf (stderr,
1162 "CB: No breakpoint found at %s for [%s]\n",
1163 paddress (lwp->stop_pc),
1164 target_pid_to_str (lwp->head.id));
1165 }
1166
1167 current_inferior = saved_inferior;
1168 return 0;
1169 }
1170
1171 /* When the event-loop is doing a step-over, this points at the thread
1172 being stepped. */
1173 ptid_t step_over_bkpt;
1174
1175 /* Wait for an event from child PID. If PID is -1, wait for any
1176 child. Store the stop status through the status pointer WSTAT.
1177 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1178 event was found and OPTIONS contains WNOHANG. Return the PID of
1179 the stopped child otherwise. */
1180
1181 static int
1182 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1183 {
1184 struct lwp_info *event_child, *requested_child;
1185
1186 event_child = NULL;
1187 requested_child = NULL;
1188
1189 /* Check for a lwp with a pending status. */
1190
1191 if (ptid_equal (ptid, minus_one_ptid)
1192 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1193 {
1194 event_child = (struct lwp_info *)
1195 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1196 if (debug_threads && event_child)
1197 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1198 }
1199 else
1200 {
1201 requested_child = find_lwp_pid (ptid);
1202
1203 if (requested_child->status_pending_p)
1204 event_child = requested_child;
1205 }
1206
1207 if (event_child != NULL)
1208 {
1209 if (debug_threads)
1210 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1211 lwpid_of (event_child), event_child->status_pending);
1212 *wstat = event_child->status_pending;
1213 event_child->status_pending_p = 0;
1214 event_child->status_pending = 0;
1215 current_inferior = get_lwp_thread (event_child);
1216 return lwpid_of (event_child);
1217 }
1218
1219 /* We only enter this loop if no process has a pending wait status. Thus
1220 any action taken in response to a wait status inside this loop is
1221 responding as soon as we detect the status, not after any pending
1222 events. */
1223 while (1)
1224 {
1225 event_child = linux_wait_for_lwp (ptid, wstat, options);
1226
1227 if ((options & WNOHANG) && event_child == NULL)
1228 {
1229 if (debug_threads)
1230 fprintf (stderr, "WNOHANG set, no event found\n");
1231 return 0;
1232 }
1233
1234 if (event_child == NULL)
1235 error ("event from unknown child");
1236
1237 current_inferior = get_lwp_thread (event_child);
1238
1239 /* Check for thread exit. */
1240 if (! WIFSTOPPED (*wstat))
1241 {
1242 if (debug_threads)
1243 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1244
1245 /* If the last thread is exiting, just return. */
1246 if (last_thread_of_process_p (current_inferior))
1247 {
1248 if (debug_threads)
1249 fprintf (stderr, "LWP %ld is last lwp of process\n",
1250 lwpid_of (event_child));
1251 return lwpid_of (event_child);
1252 }
1253
1254 if (!non_stop)
1255 {
1256 current_inferior = (struct thread_info *) all_threads.head;
1257 if (debug_threads)
1258 fprintf (stderr, "Current inferior is now %ld\n",
1259 lwpid_of (get_thread_lwp (current_inferior)));
1260 }
1261 else
1262 {
1263 current_inferior = NULL;
1264 if (debug_threads)
1265 fprintf (stderr, "Current inferior is now <NULL>\n");
1266 }
1267
1268 /* If we were waiting for this particular child to do something...
1269 well, it did something. */
1270 if (requested_child != NULL)
1271 {
1272 int lwpid = lwpid_of (event_child);
1273
1274 /* Cancel the step-over operation --- the thread that
1275 started it is gone. */
1276 if (finish_step_over (event_child))
1277 unstop_all_lwps (event_child);
1278 delete_lwp (event_child);
1279 return lwpid;
1280 }
1281
1282 delete_lwp (event_child);
1283
1284 /* Wait for a more interesting event. */
1285 continue;
1286 }
1287
1288 if (event_child->must_set_ptrace_flags)
1289 {
1290 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1291 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
1292 event_child->must_set_ptrace_flags = 0;
1293 }
1294
1295 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1296 && *wstat >> 16 != 0)
1297 {
1298 handle_extended_wait (event_child, *wstat);
1299 continue;
1300 }
1301
1302 /* If GDB is not interested in this signal, don't stop other
1303 threads, and don't report it to GDB. Just resume the
1304 inferior right away. We do this for threading-related
1305 signals as well as any that GDB specifically requested we
1306 ignore. But never ignore SIGSTOP if we sent it ourselves,
1307 and do not ignore signals when stepping - they may require
1308 special handling to skip the signal handler. */
1309 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1310 thread library? */
1311 if (WIFSTOPPED (*wstat)
1312 && !event_child->stepping
1313 && (
1314 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1315 (current_process ()->private->thread_db != NULL
1316 && (WSTOPSIG (*wstat) == __SIGRTMIN
1317 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1318 ||
1319 #endif
1320 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1321 && !(WSTOPSIG (*wstat) == SIGSTOP
1322 && event_child->stop_expected))))
1323 {
1324 siginfo_t info, *info_p;
1325
1326 if (debug_threads)
1327 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1328 WSTOPSIG (*wstat), lwpid_of (event_child));
1329
1330 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1331 info_p = &info;
1332 else
1333 info_p = NULL;
1334 linux_resume_one_lwp (event_child, event_child->stepping,
1335 WSTOPSIG (*wstat), info_p);
1336 continue;
1337 }
1338
1339 if (WIFSTOPPED (*wstat)
1340 && WSTOPSIG (*wstat) == SIGSTOP
1341 && event_child->stop_expected)
1342 {
1343 int should_stop;
1344
1345 if (debug_threads)
1346 fprintf (stderr, "Expected stop.\n");
1347 event_child->stop_expected = 0;
1348
1349 should_stop = (event_child->last_resume_kind == resume_stop
1350 || stopping_threads);
1351
1352 if (!should_stop)
1353 {
1354 linux_resume_one_lwp (event_child,
1355 event_child->stepping, 0, NULL);
1356 continue;
1357 }
1358 }
1359
1360 return lwpid_of (event_child);
1361 }
1362
1363 /* NOTREACHED */
1364 return 0;
1365 }
1366
1367 static int
1368 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1369 {
1370 ptid_t wait_ptid;
1371
1372 if (ptid_is_pid (ptid))
1373 {
1374 /* A request to wait for a specific tgid. This is not possible
1375 with waitpid, so instead, we wait for any child, and leave
1376 children we're not interested in right now with a pending
1377 status to report later. */
1378 wait_ptid = minus_one_ptid;
1379 }
1380 else
1381 wait_ptid = ptid;
1382
1383 while (1)
1384 {
1385 int event_pid;
1386
1387 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1388
1389 if (event_pid > 0
1390 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1391 {
1392 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1393
1394 if (! WIFSTOPPED (*wstat))
1395 mark_lwp_dead (event_child, *wstat);
1396 else
1397 {
1398 event_child->status_pending_p = 1;
1399 event_child->status_pending = *wstat;
1400 }
1401 }
1402 else
1403 return event_pid;
1404 }
1405 }
1406
1407
1408 /* Count the LWP's that have had events. */
1409
1410 static int
1411 count_events_callback (struct inferior_list_entry *entry, void *data)
1412 {
1413 struct lwp_info *lp = (struct lwp_info *) entry;
1414 int *count = data;
1415
1416 gdb_assert (count != NULL);
1417
1418 /* Count only resumed LWPs that have a SIGTRAP event pending that
1419 should be reported to GDB. */
1420 if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1421 && lp->last_resume_kind != resume_stop
1422 && lp->status_pending_p
1423 && WIFSTOPPED (lp->status_pending)
1424 && WSTOPSIG (lp->status_pending) == SIGTRAP
1425 && !breakpoint_inserted_here (lp->stop_pc))
1426 (*count)++;
1427
1428 return 0;
1429 }
1430
1431 /* Select the LWP (if any) that is currently being single-stepped. */
1432
1433 static int
1434 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1435 {
1436 struct lwp_info *lp = (struct lwp_info *) entry;
1437
1438 if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1439 && lp->last_resume_kind == resume_step
1440 && lp->status_pending_p)
1441 return 1;
1442 else
1443 return 0;
1444 }
1445
1446 /* Select the Nth LWP that has had a SIGTRAP event that should be
1447 reported to GDB. */
1448
1449 static int
1450 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1451 {
1452 struct lwp_info *lp = (struct lwp_info *) entry;
1453 int *selector = data;
1454
1455 gdb_assert (selector != NULL);
1456
1457 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1458 if (lp->last_resume_kind != resume_stop
1459 && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1460 && lp->status_pending_p
1461 && WIFSTOPPED (lp->status_pending)
1462 && WSTOPSIG (lp->status_pending) == SIGTRAP
1463 && !breakpoint_inserted_here (lp->stop_pc))
1464 if ((*selector)-- == 0)
1465 return 1;
1466
1467 return 0;
1468 }
1469
1470 static int
1471 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1472 {
1473 struct lwp_info *lp = (struct lwp_info *) entry;
1474 struct lwp_info *event_lp = data;
1475
1476 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1477 if (lp == event_lp)
1478 return 0;
1479
1480 /* If a LWP other than the LWP that we're reporting an event for has
1481 hit a GDB breakpoint (as opposed to some random trap signal),
1482 then just arrange for it to hit it again later. We don't keep
1483 the SIGTRAP status and don't forward the SIGTRAP signal to the
1484 LWP. We will handle the current event, eventually we will resume
1485 all LWPs, and this one will get its breakpoint trap again.
1486
1487 If we do not do this, then we run the risk that the user will
1488 delete or disable the breakpoint, but the LWP will have already
1489 tripped on it. */
1490
1491 if (lp->last_resume_kind != resume_stop
1492 && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1493 && lp->status_pending_p
1494 && WIFSTOPPED (lp->status_pending)
1495 && WSTOPSIG (lp->status_pending) == SIGTRAP
1496 && !lp->stepping
1497 && !lp->stopped_by_watchpoint
1498 && cancel_breakpoint (lp))
1499 /* Throw away the SIGTRAP. */
1500 lp->status_pending_p = 0;
1501
1502 return 0;
1503 }
1504
1505 /* Select one LWP out of those that have events pending. */
1506
1507 static void
1508 select_event_lwp (struct lwp_info **orig_lp)
1509 {
1510 int num_events = 0;
1511 int random_selector;
1512 struct lwp_info *event_lp;
1513
1514 /* Give preference to any LWP that is being single-stepped. */
1515 event_lp
1516 = (struct lwp_info *) find_inferior (&all_lwps,
1517 select_singlestep_lwp_callback, NULL);
1518 if (event_lp != NULL)
1519 {
1520 if (debug_threads)
1521 fprintf (stderr,
1522 "SEL: Select single-step %s\n",
1523 target_pid_to_str (ptid_of (event_lp)));
1524 }
1525 else
1526 {
1527 /* No single-stepping LWP. Select one at random, out of those
1528 which have had SIGTRAP events. */
1529
1530 /* First see how many SIGTRAP events we have. */
1531 find_inferior (&all_lwps, count_events_callback, &num_events);
1532
1533 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1534 random_selector = (int)
1535 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1536
1537 if (debug_threads && num_events > 1)
1538 fprintf (stderr,
1539 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1540 num_events, random_selector);
1541
1542 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1543 select_event_lwp_callback,
1544 &random_selector);
1545 }
1546
1547 if (event_lp != NULL)
1548 {
1549 /* Switch the event LWP. */
1550 *orig_lp = event_lp;
1551 }
1552 }
1553
1554 /* Set this inferior LWP's state as "want-stopped". We won't resume
1555 this LWP until the client gives us another action for it. */
1556
1557 static void
1558 gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1559 {
1560 struct lwp_info *lwp = (struct lwp_info *) entry;
1561 struct thread_info *thread = get_lwp_thread (lwp);
1562
1563 /* Most threads are stopped implicitly (all-stop); tag that with
1564 signal 0. The thread being explicitly reported stopped to the
1565 client, gets it's status fixed up afterwards. */
1566 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1567 thread->last_status.value.sig = TARGET_SIGNAL_0;
1568
1569 lwp->last_resume_kind = resume_stop;
1570 }
1571
1572 /* Set all LWP's states as "want-stopped". */
1573
1574 static void
1575 gdb_wants_all_stopped (void)
1576 {
1577 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1578 }
1579
1580 /* Wait for process, returns status. */
1581
1582 static ptid_t
1583 linux_wait_1 (ptid_t ptid,
1584 struct target_waitstatus *ourstatus, int target_options)
1585 {
1586 int w;
1587 struct thread_info *thread = NULL;
1588 struct lwp_info *event_child = NULL;
1589 int options;
1590 int pid;
1591 int step_over_finished;
1592 int bp_explains_trap;
1593 int maybe_internal_trap;
1594 int report_to_gdb;
1595
1596 /* Translate generic target options into linux options. */
1597 options = __WALL;
1598 if (target_options & TARGET_WNOHANG)
1599 options |= WNOHANG;
1600
1601 retry:
1602 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1603
1604 /* If we were only supposed to resume one thread, only wait for
1605 that thread - if it's still alive. If it died, however - which
1606 can happen if we're coming from the thread death case below -
1607 then we need to make sure we restart the other threads. We could
1608 pick a thread at random or restart all; restarting all is less
1609 arbitrary. */
1610 if (!non_stop
1611 && !ptid_equal (cont_thread, null_ptid)
1612 && !ptid_equal (cont_thread, minus_one_ptid))
1613 {
1614 thread = (struct thread_info *) find_inferior_id (&all_threads,
1615 cont_thread);
1616
1617 /* No stepping, no signal - unless one is pending already, of course. */
1618 if (thread == NULL)
1619 {
1620 struct thread_resume resume_info;
1621 resume_info.thread = minus_one_ptid;
1622 resume_info.kind = resume_continue;
1623 resume_info.sig = 0;
1624 linux_resume (&resume_info, 1);
1625 }
1626 else
1627 ptid = cont_thread;
1628 }
1629
1630 if (ptid_equal (step_over_bkpt, null_ptid))
1631 pid = linux_wait_for_event (ptid, &w, options);
1632 else
1633 {
1634 if (debug_threads)
1635 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1636 target_pid_to_str (step_over_bkpt));
1637 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1638 }
1639
1640 if (pid == 0) /* only if TARGET_WNOHANG */
1641 return null_ptid;
1642
1643 event_child = get_thread_lwp (current_inferior);
1644
1645 /* If we are waiting for a particular child, and it exited,
1646 linux_wait_for_event will return its exit status. Similarly if
1647 the last child exited. If this is not the last child, however,
1648 do not report it as exited until there is a 'thread exited' response
1649 available in the remote protocol. Instead, just wait for another event.
1650 This should be safe, because if the thread crashed we will already
1651 have reported the termination signal to GDB; that should stop any
1652 in-progress stepping operations, etc.
1653
1654 Report the exit status of the last thread to exit. This matches
1655 LinuxThreads' behavior. */
1656
1657 if (last_thread_of_process_p (current_inferior))
1658 {
1659 if (WIFEXITED (w) || WIFSIGNALED (w))
1660 {
1661 int pid = pid_of (event_child);
1662 struct process_info *process = find_process_pid (pid);
1663
1664 #ifdef USE_THREAD_DB
1665 thread_db_free (process, 0);
1666 #endif
1667 delete_lwp (event_child);
1668 linux_remove_process (process);
1669
1670 current_inferior = NULL;
1671
1672 if (WIFEXITED (w))
1673 {
1674 ourstatus->kind = TARGET_WAITKIND_EXITED;
1675 ourstatus->value.integer = WEXITSTATUS (w);
1676
1677 if (debug_threads)
1678 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1679 }
1680 else
1681 {
1682 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1683 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1684
1685 if (debug_threads)
1686 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1687
1688 }
1689
1690 return pid_to_ptid (pid);
1691 }
1692 }
1693 else
1694 {
1695 if (!WIFSTOPPED (w))
1696 goto retry;
1697 }
1698
1699 /* If this event was not handled before, and is not a SIGTRAP, we
1700 report it. SIGILL and SIGSEGV are also treated as traps in case
1701 a breakpoint is inserted at the current PC. If this target does
1702 not support internal breakpoints at all, we also report the
1703 SIGTRAP without further processing; it's of no concern to us. */
1704 maybe_internal_trap
1705 = (supports_breakpoints ()
1706 && (WSTOPSIG (w) == SIGTRAP
1707 || ((WSTOPSIG (w) == SIGILL
1708 || WSTOPSIG (w) == SIGSEGV)
1709 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1710
1711 if (maybe_internal_trap)
1712 {
1713 /* Handle anything that requires bookkeeping before deciding to
1714 report the event or continue waiting. */
1715
1716 /* First check if we can explain the SIGTRAP with an internal
1717 breakpoint, or if we should possibly report the event to GDB.
1718 Do this before anything that may remove or insert a
1719 breakpoint. */
1720 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1721
1722 /* We have a SIGTRAP, possibly a step-over dance has just
1723 finished. If so, tweak the state machine accordingly,
1724 reinsert breakpoints and delete any reinsert (software
1725 single-step) breakpoints. */
1726 step_over_finished = finish_step_over (event_child);
1727
1728 /* Now invoke the callbacks of any internal breakpoints there. */
1729 check_breakpoints (event_child->stop_pc);
1730
1731 if (bp_explains_trap)
1732 {
1733 /* If we stepped or ran into an internal breakpoint, we've
1734 already handled it. So next time we resume (from this
1735 PC), we should step over it. */
1736 if (debug_threads)
1737 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1738
1739 if (breakpoint_here (event_child->stop_pc))
1740 event_child->need_step_over = 1;
1741 }
1742 }
1743 else
1744 {
1745 /* We have some other signal, possibly a step-over dance was in
1746 progress, and it should be cancelled too. */
1747 step_over_finished = finish_step_over (event_child);
1748 }
1749
1750 /* We have all the data we need. Either report the event to GDB, or
1751 resume threads and keep waiting for more. */
1752
1753 /* Check If GDB would be interested in this event. If GDB wanted
1754 this thread to single step, we always want to report the SIGTRAP,
1755 and let GDB handle it. Watchpoints should always be reported.
1756 So should signals we can't explain. A SIGTRAP we can't explain
1757 could be a GDB breakpoint --- we may or not support Z0
1758 breakpoints. If we do, we're be able to handle GDB breakpoints
1759 on top of internal breakpoints, by handling the internal
1760 breakpoint and still reporting the event to GDB. If we don't,
1761 we're out of luck, GDB won't see the breakpoint hit. */
1762 report_to_gdb = (!maybe_internal_trap
1763 || event_child->last_resume_kind == resume_step
1764 || event_child->stopped_by_watchpoint
1765 || (!step_over_finished && !bp_explains_trap)
1766 || gdb_breakpoint_here (event_child->stop_pc));
1767
1768 /* We found no reason GDB would want us to stop. We either hit one
1769 of our own breakpoints, or finished an internal step GDB
1770 shouldn't know about. */
1771 if (!report_to_gdb)
1772 {
1773 if (debug_threads)
1774 {
1775 if (bp_explains_trap)
1776 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1777 if (step_over_finished)
1778 fprintf (stderr, "Step-over finished.\n");
1779 }
1780
1781 /* We're not reporting this breakpoint to GDB, so apply the
1782 decr_pc_after_break adjustment to the inferior's regcache
1783 ourselves. */
1784
1785 if (the_low_target.set_pc != NULL)
1786 {
1787 struct regcache *regcache
1788 = get_thread_regcache (get_lwp_thread (event_child), 1);
1789 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1790 }
1791
1792 /* We've finished stepping over a breakpoint. We've stopped all
1793 LWPs momentarily except the stepping one. This is where we
1794 resume them all again. We're going to keep waiting, so use
1795 proceed, which handles stepping over the next breakpoint. */
1796 if (debug_threads)
1797 fprintf (stderr, "proceeding all threads.\n");
1798 proceed_all_lwps ();
1799 goto retry;
1800 }
1801
1802 if (debug_threads)
1803 {
1804 if (event_child->last_resume_kind == resume_step)
1805 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1806 if (event_child->stopped_by_watchpoint)
1807 fprintf (stderr, "Stopped by watchpoint.\n");
1808 if (gdb_breakpoint_here (event_child->stop_pc))
1809 fprintf (stderr, "Stopped by GDB breakpoint.\n");
1810 if (debug_threads)
1811 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1812 }
1813
1814 /* Alright, we're going to report a stop. */
1815
1816 if (!non_stop)
1817 {
1818 /* In all-stop, stop all threads. */
1819 stop_all_lwps ();
1820
1821 /* If we're not waiting for a specific LWP, choose an event LWP
1822 from among those that have had events. Giving equal priority
1823 to all LWPs that have had events helps prevent
1824 starvation. */
1825 if (ptid_equal (ptid, minus_one_ptid))
1826 {
1827 event_child->status_pending_p = 1;
1828 event_child->status_pending = w;
1829
1830 select_event_lwp (&event_child);
1831
1832 event_child->status_pending_p = 0;
1833 w = event_child->status_pending;
1834 }
1835
1836 /* Now that we've selected our final event LWP, cancel any
1837 breakpoints in other LWPs that have hit a GDB breakpoint.
1838 See the comment in cancel_breakpoints_callback to find out
1839 why. */
1840 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1841 }
1842 else
1843 {
1844 /* If we just finished a step-over, then all threads had been
1845 momentarily paused. In all-stop, that's fine, we want
1846 threads stopped by now anyway. In non-stop, we need to
1847 re-resume threads that GDB wanted to be running. */
1848 if (step_over_finished)
1849 unstop_all_lwps (event_child);
1850 }
1851
1852 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1853
1854 /* Do this before the gdb_wants_all_stopped calls below, since they
1855 always set last_resume_kind to resume_stop. */
1856 if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) == SIGSTOP)
1857 {
1858 /* A thread that has been requested to stop by GDB with vCont;t,
1859 and it stopped cleanly, so report as SIG0. The use of
1860 SIGSTOP is an implementation detail. */
1861 ourstatus->value.sig = TARGET_SIGNAL_0;
1862 }
1863 else if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) != SIGSTOP)
1864 {
1865 /* A thread that has been requested to stop by GDB with vCont;t,
1866 but, it stopped for other reasons. */
1867 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1868 }
1869 else
1870 {
1871 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1872 }
1873
1874 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1875
1876 if (!non_stop)
1877 {
1878 /* From GDB's perspective, all-stop mode always stops all
1879 threads implicitly. Tag all threads as "want-stopped". */
1880 gdb_wants_all_stopped ();
1881 }
1882 else
1883 {
1884 /* We're reporting this LWP as stopped. Update it's
1885 "want-stopped" state to what the client wants, until it gets
1886 a new resume action. */
1887 gdb_wants_lwp_stopped (&event_child->head);
1888 }
1889
1890 if (debug_threads)
1891 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1892 target_pid_to_str (ptid_of (event_child)),
1893 ourstatus->kind,
1894 ourstatus->value.sig);
1895
1896 get_lwp_thread (event_child)->last_status = *ourstatus;
1897 return ptid_of (event_child);
1898 }
1899
1900 /* Get rid of any pending event in the pipe. */
1901 static void
1902 async_file_flush (void)
1903 {
1904 int ret;
1905 char buf;
1906
1907 do
1908 ret = read (linux_event_pipe[0], &buf, 1);
1909 while (ret >= 0 || (ret == -1 && errno == EINTR));
1910 }
1911
1912 /* Put something in the pipe, so the event loop wakes up. */
1913 static void
1914 async_file_mark (void)
1915 {
1916 int ret;
1917
1918 async_file_flush ();
1919
1920 do
1921 ret = write (linux_event_pipe[1], "+", 1);
1922 while (ret == 0 || (ret == -1 && errno == EINTR));
1923
1924 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1925 be awakened anyway. */
1926 }
1927
1928 static ptid_t
1929 linux_wait (ptid_t ptid,
1930 struct target_waitstatus *ourstatus, int target_options)
1931 {
1932 ptid_t event_ptid;
1933
1934 if (debug_threads)
1935 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1936
1937 /* Flush the async file first. */
1938 if (target_is_async_p ())
1939 async_file_flush ();
1940
1941 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1942
1943 /* If at least one stop was reported, there may be more. A single
1944 SIGCHLD can signal more than one child stop. */
1945 if (target_is_async_p ()
1946 && (target_options & TARGET_WNOHANG) != 0
1947 && !ptid_equal (event_ptid, null_ptid))
1948 async_file_mark ();
1949
1950 return event_ptid;
1951 }
1952
1953 /* Send a signal to an LWP. */
1954
1955 static int
1956 kill_lwp (unsigned long lwpid, int signo)
1957 {
1958 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1959 fails, then we are not using nptl threads and we should be using kill. */
1960
1961 #ifdef __NR_tkill
1962 {
1963 static int tkill_failed;
1964
1965 if (!tkill_failed)
1966 {
1967 int ret;
1968
1969 errno = 0;
1970 ret = syscall (__NR_tkill, lwpid, signo);
1971 if (errno != ENOSYS)
1972 return ret;
1973 tkill_failed = 1;
1974 }
1975 }
1976 #endif
1977
1978 return kill (lwpid, signo);
1979 }
1980
1981 static void
1982 send_sigstop (struct inferior_list_entry *entry)
1983 {
1984 struct lwp_info *lwp = (struct lwp_info *) entry;
1985 int pid;
1986
1987 if (lwp->stopped)
1988 return;
1989
1990 pid = lwpid_of (lwp);
1991
1992 /* If we already have a pending stop signal for this process, don't
1993 send another. */
1994 if (lwp->stop_expected)
1995 {
1996 if (debug_threads)
1997 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
1998
1999 return;
2000 }
2001
2002 if (debug_threads)
2003 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2004
2005 lwp->stop_expected = 1;
2006 kill_lwp (pid, SIGSTOP);
2007 }
2008
2009 static void
2010 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2011 {
2012 /* It's dead, really. */
2013 lwp->dead = 1;
2014
2015 /* Store the exit status for later. */
2016 lwp->status_pending_p = 1;
2017 lwp->status_pending = wstat;
2018
2019 /* Prevent trying to stop it. */
2020 lwp->stopped = 1;
2021
2022 /* No further stops are expected from a dead lwp. */
2023 lwp->stop_expected = 0;
2024 }
2025
2026 static void
2027 wait_for_sigstop (struct inferior_list_entry *entry)
2028 {
2029 struct lwp_info *lwp = (struct lwp_info *) entry;
2030 struct thread_info *saved_inferior;
2031 int wstat;
2032 ptid_t saved_tid;
2033 ptid_t ptid;
2034 int pid;
2035
2036 if (lwp->stopped)
2037 {
2038 if (debug_threads)
2039 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2040 lwpid_of (lwp));
2041 return;
2042 }
2043
2044 saved_inferior = current_inferior;
2045 if (saved_inferior != NULL)
2046 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2047 else
2048 saved_tid = null_ptid; /* avoid bogus unused warning */
2049
2050 ptid = lwp->head.id;
2051
2052 if (debug_threads)
2053 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2054
2055 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2056
2057 /* If we stopped with a non-SIGSTOP signal, save it for later
2058 and record the pending SIGSTOP. If the process exited, just
2059 return. */
2060 if (WIFSTOPPED (wstat))
2061 {
2062 if (debug_threads)
2063 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2064 lwpid_of (lwp), WSTOPSIG (wstat));
2065
2066 if (WSTOPSIG (wstat) != SIGSTOP)
2067 {
2068 if (debug_threads)
2069 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2070 lwpid_of (lwp), wstat);
2071
2072 lwp->status_pending_p = 1;
2073 lwp->status_pending = wstat;
2074 }
2075 }
2076 else
2077 {
2078 if (debug_threads)
2079 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2080
2081 lwp = find_lwp_pid (pid_to_ptid (pid));
2082 if (lwp)
2083 {
2084 /* Leave this status pending for the next time we're able to
2085 report it. In the mean time, we'll report this lwp as
2086 dead to GDB, so GDB doesn't try to read registers and
2087 memory from it. This can only happen if this was the
2088 last thread of the process; otherwise, PID is removed
2089 from the thread tables before linux_wait_for_event
2090 returns. */
2091 mark_lwp_dead (lwp, wstat);
2092 }
2093 }
2094
2095 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2096 current_inferior = saved_inferior;
2097 else
2098 {
2099 if (debug_threads)
2100 fprintf (stderr, "Previously current thread died.\n");
2101
2102 if (non_stop)
2103 {
2104 /* We can't change the current inferior behind GDB's back,
2105 otherwise, a subsequent command may apply to the wrong
2106 process. */
2107 current_inferior = NULL;
2108 }
2109 else
2110 {
2111 /* Set a valid thread as current. */
2112 set_desired_inferior (0);
2113 }
2114 }
2115 }
2116
2117 static void
2118 stop_all_lwps (void)
2119 {
2120 stopping_threads = 1;
2121 for_each_inferior (&all_lwps, send_sigstop);
2122 for_each_inferior (&all_lwps, wait_for_sigstop);
2123 stopping_threads = 0;
2124 }
2125
2126 /* Resume execution of the inferior process.
2127 If STEP is nonzero, single-step it.
2128 If SIGNAL is nonzero, give it that signal. */
2129
2130 static void
2131 linux_resume_one_lwp (struct lwp_info *lwp,
2132 int step, int signal, siginfo_t *info)
2133 {
2134 struct thread_info *saved_inferior;
2135
2136 if (lwp->stopped == 0)
2137 return;
2138
2139 /* If we have pending signals or status, and a new signal, enqueue the
2140 signal. Also enqueue the signal if we are waiting to reinsert a
2141 breakpoint; it will be picked up again below. */
2142 if (signal != 0
2143 && (lwp->status_pending_p || lwp->pending_signals != NULL
2144 || lwp->bp_reinsert != 0))
2145 {
2146 struct pending_signals *p_sig;
2147 p_sig = xmalloc (sizeof (*p_sig));
2148 p_sig->prev = lwp->pending_signals;
2149 p_sig->signal = signal;
2150 if (info == NULL)
2151 memset (&p_sig->info, 0, sizeof (siginfo_t));
2152 else
2153 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2154 lwp->pending_signals = p_sig;
2155 }
2156
2157 if (lwp->status_pending_p)
2158 {
2159 if (debug_threads)
2160 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2161 " has pending status\n",
2162 lwpid_of (lwp), step ? "step" : "continue", signal,
2163 lwp->stop_expected ? "expected" : "not expected");
2164 return;
2165 }
2166
2167 saved_inferior = current_inferior;
2168 current_inferior = get_lwp_thread (lwp);
2169
2170 if (debug_threads)
2171 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2172 lwpid_of (lwp), step ? "step" : "continue", signal,
2173 lwp->stop_expected ? "expected" : "not expected");
2174
2175 /* This bit needs some thinking about. If we get a signal that
2176 we must report while a single-step reinsert is still pending,
2177 we often end up resuming the thread. It might be better to
2178 (ew) allow a stack of pending events; then we could be sure that
2179 the reinsert happened right away and not lose any signals.
2180
2181 Making this stack would also shrink the window in which breakpoints are
2182 uninserted (see comment in linux_wait_for_lwp) but not enough for
2183 complete correctness, so it won't solve that problem. It may be
2184 worthwhile just to solve this one, however. */
2185 if (lwp->bp_reinsert != 0)
2186 {
2187 if (debug_threads)
2188 fprintf (stderr, " pending reinsert at 0x%s\n",
2189 paddress (lwp->bp_reinsert));
2190
2191 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2192 {
2193 if (step == 0)
2194 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2195
2196 step = 1;
2197 }
2198
2199 /* Postpone any pending signal. It was enqueued above. */
2200 signal = 0;
2201 }
2202
2203 if (debug_threads && the_low_target.get_pc != NULL)
2204 {
2205 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2206 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2207 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2208 }
2209
2210 /* If we have pending signals, consume one unless we are trying to reinsert
2211 a breakpoint. */
2212 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
2213 {
2214 struct pending_signals **p_sig;
2215
2216 p_sig = &lwp->pending_signals;
2217 while ((*p_sig)->prev != NULL)
2218 p_sig = &(*p_sig)->prev;
2219
2220 signal = (*p_sig)->signal;
2221 if ((*p_sig)->info.si_signo != 0)
2222 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2223
2224 free (*p_sig);
2225 *p_sig = NULL;
2226 }
2227
2228 if (the_low_target.prepare_to_resume != NULL)
2229 the_low_target.prepare_to_resume (lwp);
2230
2231 regcache_invalidate_one ((struct inferior_list_entry *)
2232 get_lwp_thread (lwp));
2233 errno = 0;
2234 lwp->stopped = 0;
2235 lwp->stopped_by_watchpoint = 0;
2236 lwp->stepping = step;
2237 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2238 /* Coerce to a uintptr_t first to avoid potential gcc warning
2239 of coercing an 8 byte integer to a 4 byte pointer. */
2240 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2241
2242 current_inferior = saved_inferior;
2243 if (errno)
2244 {
2245 /* ESRCH from ptrace either means that the thread was already
2246 running (an error) or that it is gone (a race condition). If
2247 it's gone, we will get a notification the next time we wait,
2248 so we can ignore the error. We could differentiate these
2249 two, but it's tricky without waiting; the thread still exists
2250 as a zombie, so sending it signal 0 would succeed. So just
2251 ignore ESRCH. */
2252 if (errno == ESRCH)
2253 return;
2254
2255 perror_with_name ("ptrace");
2256 }
2257 }
2258
2259 struct thread_resume_array
2260 {
2261 struct thread_resume *resume;
2262 size_t n;
2263 };
2264
2265 /* This function is called once per thread. We look up the thread
2266 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2267 resume request.
2268
2269 This algorithm is O(threads * resume elements), but resume elements
2270 is small (and will remain small at least until GDB supports thread
2271 suspension). */
2272 static int
2273 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
2274 {
2275 struct lwp_info *lwp;
2276 struct thread_info *thread;
2277 int ndx;
2278 struct thread_resume_array *r;
2279
2280 thread = (struct thread_info *) entry;
2281 lwp = get_thread_lwp (thread);
2282 r = arg;
2283
2284 for (ndx = 0; ndx < r->n; ndx++)
2285 {
2286 ptid_t ptid = r->resume[ndx].thread;
2287 if (ptid_equal (ptid, minus_one_ptid)
2288 || ptid_equal (ptid, entry->id)
2289 || (ptid_is_pid (ptid)
2290 && (ptid_get_pid (ptid) == pid_of (lwp)))
2291 || (ptid_get_lwp (ptid) == -1
2292 && (ptid_get_pid (ptid) == pid_of (lwp))))
2293 {
2294 if (r->resume[ndx].kind == resume_stop
2295 && lwp->last_resume_kind == resume_stop)
2296 {
2297 if (debug_threads)
2298 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2299 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2300 ? "stopped"
2301 : "stopping",
2302 lwpid_of (lwp));
2303
2304 continue;
2305 }
2306
2307 lwp->resume = &r->resume[ndx];
2308 lwp->last_resume_kind = lwp->resume->kind;
2309 return 0;
2310 }
2311 }
2312
2313 /* No resume action for this thread. */
2314 lwp->resume = NULL;
2315
2316 return 0;
2317 }
2318
2319
2320 /* Set *FLAG_P if this lwp has an interesting status pending. */
2321 static int
2322 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
2323 {
2324 struct lwp_info *lwp = (struct lwp_info *) entry;
2325
2326 /* LWPs which will not be resumed are not interesting, because
2327 we might not wait for them next time through linux_wait. */
2328 if (lwp->resume == NULL)
2329 return 0;
2330
2331 if (lwp->status_pending_p)
2332 * (int *) flag_p = 1;
2333
2334 return 0;
2335 }
2336
2337 /* Return 1 if this lwp that GDB wants running is stopped at an
2338 internal breakpoint that we need to step over. It assumes that any
2339 required STOP_PC adjustment has already been propagated to the
2340 inferior's regcache. */
2341
2342 static int
2343 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2344 {
2345 struct lwp_info *lwp = (struct lwp_info *) entry;
2346 struct thread_info *saved_inferior;
2347 CORE_ADDR pc;
2348
2349 /* LWPs which will not be resumed are not interesting, because we
2350 might not wait for them next time through linux_wait. */
2351
2352 if (!lwp->stopped)
2353 {
2354 if (debug_threads)
2355 fprintf (stderr,
2356 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2357 lwpid_of (lwp));
2358 return 0;
2359 }
2360
2361 if (lwp->last_resume_kind == resume_stop)
2362 {
2363 if (debug_threads)
2364 fprintf (stderr,
2365 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2366 lwpid_of (lwp));
2367 return 0;
2368 }
2369
2370 if (!lwp->need_step_over)
2371 {
2372 if (debug_threads)
2373 fprintf (stderr,
2374 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2375 }
2376
2377 if (lwp->status_pending_p)
2378 {
2379 if (debug_threads)
2380 fprintf (stderr,
2381 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2382 lwpid_of (lwp));
2383 return 0;
2384 }
2385
2386 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2387 or we have. */
2388 pc = get_pc (lwp);
2389
2390 /* If the PC has changed since we stopped, then don't do anything,
2391 and let the breakpoint/tracepoint be hit. This happens if, for
2392 instance, GDB handled the decr_pc_after_break subtraction itself,
2393 GDB is OOL stepping this thread, or the user has issued a "jump"
2394 command, or poked thread's registers herself. */
2395 if (pc != lwp->stop_pc)
2396 {
2397 if (debug_threads)
2398 fprintf (stderr,
2399 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2400 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2401 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2402
2403 lwp->need_step_over = 0;
2404 return 0;
2405 }
2406
2407 saved_inferior = current_inferior;
2408 current_inferior = get_lwp_thread (lwp);
2409
2410 /* We can only step over breakpoints we know about. */
2411 if (breakpoint_here (pc))
2412 {
2413 /* Don't step over a breakpoint that GDB expects to hit
2414 though. */
2415 if (gdb_breakpoint_here (pc))
2416 {
2417 if (debug_threads)
2418 fprintf (stderr,
2419 "Need step over [LWP %ld]? yes, but found"
2420 " GDB breakpoint at 0x%s; skipping step over\n",
2421 lwpid_of (lwp), paddress (pc));
2422
2423 current_inferior = saved_inferior;
2424 return 0;
2425 }
2426 else
2427 {
2428 if (debug_threads)
2429 fprintf (stderr,
2430 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2431 lwpid_of (lwp), paddress (pc));
2432
2433 /* We've found an lwp that needs stepping over --- return 1 so
2434 that find_inferior stops looking. */
2435 current_inferior = saved_inferior;
2436
2437 /* If the step over is cancelled, this is set again. */
2438 lwp->need_step_over = 0;
2439 return 1;
2440 }
2441 }
2442
2443 current_inferior = saved_inferior;
2444
2445 if (debug_threads)
2446 fprintf (stderr,
2447 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2448 lwpid_of (lwp), paddress (pc));
2449
2450 return 0;
2451 }
2452
2453 /* Start a step-over operation on LWP. When LWP stopped at a
2454 breakpoint, to make progress, we need to remove the breakpoint out
2455 of the way. If we let other threads run while we do that, they may
2456 pass by the breakpoint location and miss hitting it. To avoid
2457 that, a step-over momentarily stops all threads while LWP is
2458 single-stepped while the breakpoint is temporarily uninserted from
2459 the inferior. When the single-step finishes, we reinsert the
2460 breakpoint, and let all threads that are supposed to be running,
2461 run again.
2462
2463 On targets that don't support hardware single-step, we don't
2464 currently support full software single-stepping. Instead, we only
2465 support stepping over the thread event breakpoint, by asking the
2466 low target where to place a reinsert breakpoint. Since this
2467 routine assumes the breakpoint being stepped over is a thread event
2468 breakpoint, it usually assumes the return address of the current
2469 function is a good enough place to set the reinsert breakpoint. */
2470
2471 static int
2472 start_step_over (struct lwp_info *lwp)
2473 {
2474 struct thread_info *saved_inferior;
2475 CORE_ADDR pc;
2476 int step;
2477
2478 if (debug_threads)
2479 fprintf (stderr,
2480 "Starting step-over on LWP %ld. Stopping all threads\n",
2481 lwpid_of (lwp));
2482
2483 stop_all_lwps ();
2484
2485 if (debug_threads)
2486 fprintf (stderr, "Done stopping all threads for step-over.\n");
2487
2488 /* Note, we should always reach here with an already adjusted PC,
2489 either by GDB (if we're resuming due to GDB's request), or by our
2490 caller, if we just finished handling an internal breakpoint GDB
2491 shouldn't care about. */
2492 pc = get_pc (lwp);
2493
2494 saved_inferior = current_inferior;
2495 current_inferior = get_lwp_thread (lwp);
2496
2497 lwp->bp_reinsert = pc;
2498 uninsert_breakpoints_at (pc);
2499
2500 if (can_hardware_single_step ())
2501 {
2502 step = 1;
2503 }
2504 else
2505 {
2506 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2507 set_reinsert_breakpoint (raddr);
2508 step = 0;
2509 }
2510
2511 current_inferior = saved_inferior;
2512
2513 linux_resume_one_lwp (lwp, step, 0, NULL);
2514
2515 /* Require next event from this LWP. */
2516 step_over_bkpt = lwp->head.id;
2517 return 1;
2518 }
2519
2520 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
2521 start_step_over, if still there, and delete any reinsert
2522 breakpoints we've set, on non hardware single-step targets. */
2523
2524 static int
2525 finish_step_over (struct lwp_info *lwp)
2526 {
2527 if (lwp->bp_reinsert != 0)
2528 {
2529 if (debug_threads)
2530 fprintf (stderr, "Finished step over.\n");
2531
2532 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2533 may be no breakpoint to reinsert there by now. */
2534 reinsert_breakpoints_at (lwp->bp_reinsert);
2535
2536 lwp->bp_reinsert = 0;
2537
2538 /* Delete any software-single-step reinsert breakpoints. No
2539 longer needed. We don't have to worry about other threads
2540 hitting this trap, and later not being able to explain it,
2541 because we were stepping over a breakpoint, and we hold all
2542 threads but LWP stopped while doing that. */
2543 if (!can_hardware_single_step ())
2544 delete_reinsert_breakpoints ();
2545
2546 step_over_bkpt = null_ptid;
2547 return 1;
2548 }
2549 else
2550 return 0;
2551 }
2552
2553 /* This function is called once per thread. We check the thread's resume
2554 request, which will tell us whether to resume, step, or leave the thread
2555 stopped; and what signal, if any, it should be sent.
2556
2557 For threads which we aren't explicitly told otherwise, we preserve
2558 the stepping flag; this is used for stepping over gdbserver-placed
2559 breakpoints.
2560
2561 If pending_flags was set in any thread, we queue any needed
2562 signals, since we won't actually resume. We already have a pending
2563 event to report, so we don't need to preserve any step requests;
2564 they should be re-issued if necessary. */
2565
2566 static int
2567 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
2568 {
2569 struct lwp_info *lwp;
2570 struct thread_info *thread;
2571 int step;
2572 int leave_all_stopped = * (int *) arg;
2573 int leave_pending;
2574
2575 thread = (struct thread_info *) entry;
2576 lwp = get_thread_lwp (thread);
2577
2578 if (lwp->resume == NULL)
2579 return 0;
2580
2581 if (lwp->resume->kind == resume_stop)
2582 {
2583 if (debug_threads)
2584 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
2585
2586 if (!lwp->stopped)
2587 {
2588 if (debug_threads)
2589 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
2590
2591 /* Stop the thread, and wait for the event asynchronously,
2592 through the event loop. */
2593 send_sigstop (&lwp->head);
2594 }
2595 else
2596 {
2597 if (debug_threads)
2598 fprintf (stderr, "already stopped LWP %ld\n",
2599 lwpid_of (lwp));
2600
2601 /* The LWP may have been stopped in an internal event that
2602 was not meant to be notified back to GDB (e.g., gdbserver
2603 breakpoint), so we should be reporting a stop event in
2604 this case too. */
2605
2606 /* If the thread already has a pending SIGSTOP, this is a
2607 no-op. Otherwise, something later will presumably resume
2608 the thread and this will cause it to cancel any pending
2609 operation, due to last_resume_kind == resume_stop. If
2610 the thread already has a pending status to report, we
2611 will still report it the next time we wait - see
2612 status_pending_p_callback. */
2613 send_sigstop (&lwp->head);
2614 }
2615
2616 /* For stop requests, we're done. */
2617 lwp->resume = NULL;
2618 get_lwp_thread (lwp)->last_status.kind = TARGET_WAITKIND_IGNORE;
2619 return 0;
2620 }
2621
2622 /* If this thread which is about to be resumed has a pending status,
2623 then don't resume any threads - we can just report the pending
2624 status. Make sure to queue any signals that would otherwise be
2625 sent. In all-stop mode, we do this decision based on if *any*
2626 thread has a pending status. If there's a thread that needs the
2627 step-over-breakpoint dance, then don't resume any other thread
2628 but that particular one. */
2629 leave_pending = (lwp->status_pending_p || leave_all_stopped);
2630
2631 if (!leave_pending)
2632 {
2633 if (debug_threads)
2634 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
2635
2636 step = (lwp->resume->kind == resume_step);
2637 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
2638 get_lwp_thread (lwp)->last_status.kind = TARGET_WAITKIND_IGNORE;
2639 }
2640 else
2641 {
2642 if (debug_threads)
2643 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
2644
2645 /* If we have a new signal, enqueue the signal. */
2646 if (lwp->resume->sig != 0)
2647 {
2648 struct pending_signals *p_sig;
2649 p_sig = xmalloc (sizeof (*p_sig));
2650 p_sig->prev = lwp->pending_signals;
2651 p_sig->signal = lwp->resume->sig;
2652 memset (&p_sig->info, 0, sizeof (siginfo_t));
2653
2654 /* If this is the same signal we were previously stopped by,
2655 make sure to queue its siginfo. We can ignore the return
2656 value of ptrace; if it fails, we'll skip
2657 PTRACE_SETSIGINFO. */
2658 if (WIFSTOPPED (lwp->last_status)
2659 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2660 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2661
2662 lwp->pending_signals = p_sig;
2663 }
2664 }
2665
2666 lwp->resume = NULL;
2667 return 0;
2668 }
2669
2670 static void
2671 linux_resume (struct thread_resume *resume_info, size_t n)
2672 {
2673 struct thread_resume_array array = { resume_info, n };
2674 struct lwp_info *need_step_over = NULL;
2675 int any_pending;
2676 int leave_all_stopped;
2677
2678 find_inferior (&all_threads, linux_set_resume_request, &array);
2679
2680 /* If there is a thread which would otherwise be resumed, which has
2681 a pending status, then don't resume any threads - we can just
2682 report the pending status. Make sure to queue any signals that
2683 would otherwise be sent. In non-stop mode, we'll apply this
2684 logic to each thread individually. We consume all pending events
2685 before considering to start a step-over (in all-stop). */
2686 any_pending = 0;
2687 if (!non_stop)
2688 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2689
2690 /* If there is a thread which would otherwise be resumed, which is
2691 stopped at a breakpoint that needs stepping over, then don't
2692 resume any threads - have it step over the breakpoint with all
2693 other threads stopped, then resume all threads again. Make sure
2694 to queue any signals that would otherwise be delivered or
2695 queued. */
2696 if (!any_pending && supports_breakpoints ())
2697 need_step_over
2698 = (struct lwp_info *) find_inferior (&all_lwps,
2699 need_step_over_p, NULL);
2700
2701 leave_all_stopped = (need_step_over != NULL || any_pending);
2702
2703 if (debug_threads)
2704 {
2705 if (need_step_over != NULL)
2706 fprintf (stderr, "Not resuming all, need step over\n");
2707 else if (any_pending)
2708 fprintf (stderr,
2709 "Not resuming, all-stop and found "
2710 "an LWP with pending status\n");
2711 else
2712 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2713 }
2714
2715 /* Even if we're leaving threads stopped, queue all signals we'd
2716 otherwise deliver. */
2717 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2718
2719 if (need_step_over)
2720 start_step_over (need_step_over);
2721 }
2722
2723 /* This function is called once per thread. We check the thread's
2724 last resume request, which will tell us whether to resume, step, or
2725 leave the thread stopped. Any signal the client requested to be
2726 delivered has already been enqueued at this point.
2727
2728 If any thread that GDB wants running is stopped at an internal
2729 breakpoint that needs stepping over, we start a step-over operation
2730 on that particular thread, and leave all others stopped. */
2731
2732 static void
2733 proceed_one_lwp (struct inferior_list_entry *entry)
2734 {
2735 struct lwp_info *lwp;
2736 int step;
2737
2738 lwp = (struct lwp_info *) entry;
2739
2740 if (debug_threads)
2741 fprintf (stderr,
2742 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2743
2744 if (!lwp->stopped)
2745 {
2746 if (debug_threads)
2747 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
2748 return;
2749 }
2750
2751 if (lwp->last_resume_kind == resume_stop)
2752 {
2753 if (debug_threads)
2754 fprintf (stderr, " client wants LWP %ld stopped\n", lwpid_of (lwp));
2755 return;
2756 }
2757
2758 if (lwp->status_pending_p)
2759 {
2760 if (debug_threads)
2761 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2762 lwpid_of (lwp));
2763 return;
2764 }
2765
2766 if (lwp->suspended)
2767 {
2768 if (debug_threads)
2769 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
2770 return;
2771 }
2772
2773 step = lwp->last_resume_kind == resume_step;
2774 linux_resume_one_lwp (lwp, step, 0, NULL);
2775 }
2776
2777 /* When we finish a step-over, set threads running again. If there's
2778 another thread that may need a step-over, now's the time to start
2779 it. Eventually, we'll move all threads past their breakpoints. */
2780
2781 static void
2782 proceed_all_lwps (void)
2783 {
2784 struct lwp_info *need_step_over;
2785
2786 /* If there is a thread which would otherwise be resumed, which is
2787 stopped at a breakpoint that needs stepping over, then don't
2788 resume any threads - have it step over the breakpoint with all
2789 other threads stopped, then resume all threads again. */
2790
2791 if (supports_breakpoints ())
2792 {
2793 need_step_over
2794 = (struct lwp_info *) find_inferior (&all_lwps,
2795 need_step_over_p, NULL);
2796
2797 if (need_step_over != NULL)
2798 {
2799 if (debug_threads)
2800 fprintf (stderr, "proceed_all_lwps: found "
2801 "thread %ld needing a step-over\n",
2802 lwpid_of (need_step_over));
2803
2804 start_step_over (need_step_over);
2805 return;
2806 }
2807 }
2808
2809 if (debug_threads)
2810 fprintf (stderr, "Proceeding, no step-over needed\n");
2811
2812 for_each_inferior (&all_lwps, proceed_one_lwp);
2813 }
2814
2815 /* Stopped LWPs that the client wanted to be running, that don't have
2816 pending statuses, are set to run again, except for EXCEPT, if not
2817 NULL. This undoes a stop_all_lwps call. */
2818
2819 static void
2820 unstop_all_lwps (struct lwp_info *except)
2821 {
2822 if (debug_threads)
2823 {
2824 if (except)
2825 fprintf (stderr,
2826 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
2827 else
2828 fprintf (stderr,
2829 "unstopping all lwps\n");
2830 }
2831
2832 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2833 if (except != NULL)
2834 ++except->suspended;
2835
2836 for_each_inferior (&all_lwps, proceed_one_lwp);
2837
2838 if (except != NULL)
2839 --except->suspended;
2840 }
2841
2842 #ifdef HAVE_LINUX_USRREGS
2843
2844 int
2845 register_addr (int regnum)
2846 {
2847 int addr;
2848
2849 if (regnum < 0 || regnum >= the_low_target.num_regs)
2850 error ("Invalid register number %d.", regnum);
2851
2852 addr = the_low_target.regmap[regnum];
2853
2854 return addr;
2855 }
2856
2857 /* Fetch one register. */
2858 static void
2859 fetch_register (struct regcache *regcache, int regno)
2860 {
2861 CORE_ADDR regaddr;
2862 int i, size;
2863 char *buf;
2864 int pid;
2865
2866 if (regno >= the_low_target.num_regs)
2867 return;
2868 if ((*the_low_target.cannot_fetch_register) (regno))
2869 return;
2870
2871 regaddr = register_addr (regno);
2872 if (regaddr == -1)
2873 return;
2874
2875 pid = lwpid_of (get_thread_lwp (current_inferior));
2876 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2877 & - sizeof (PTRACE_XFER_TYPE));
2878 buf = alloca (size);
2879 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2880 {
2881 errno = 0;
2882 *(PTRACE_XFER_TYPE *) (buf + i) =
2883 ptrace (PTRACE_PEEKUSER, pid,
2884 /* Coerce to a uintptr_t first to avoid potential gcc warning
2885 of coercing an 8 byte integer to a 4 byte pointer. */
2886 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
2887 regaddr += sizeof (PTRACE_XFER_TYPE);
2888 if (errno != 0)
2889 error ("reading register %d: %s", regno, strerror (errno));
2890 }
2891
2892 if (the_low_target.supply_ptrace_register)
2893 the_low_target.supply_ptrace_register (regcache, regno, buf);
2894 else
2895 supply_register (regcache, regno, buf);
2896 }
2897
2898 /* Fetch all registers, or just one, from the child process. */
2899 static void
2900 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
2901 {
2902 if (regno == -1)
2903 for (regno = 0; regno < the_low_target.num_regs; regno++)
2904 fetch_register (regcache, regno);
2905 else
2906 fetch_register (regcache, regno);
2907 }
2908
2909 /* Store our register values back into the inferior.
2910 If REGNO is -1, do this for all registers.
2911 Otherwise, REGNO specifies which register (so we can save time). */
2912 static void
2913 usr_store_inferior_registers (struct regcache *regcache, int regno)
2914 {
2915 CORE_ADDR regaddr;
2916 int i, size;
2917 char *buf;
2918 int pid;
2919
2920 if (regno >= 0)
2921 {
2922 if (regno >= the_low_target.num_regs)
2923 return;
2924
2925 if ((*the_low_target.cannot_store_register) (regno) == 1)
2926 return;
2927
2928 regaddr = register_addr (regno);
2929 if (regaddr == -1)
2930 return;
2931 errno = 0;
2932 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2933 & - sizeof (PTRACE_XFER_TYPE);
2934 buf = alloca (size);
2935 memset (buf, 0, size);
2936
2937 if (the_low_target.collect_ptrace_register)
2938 the_low_target.collect_ptrace_register (regcache, regno, buf);
2939 else
2940 collect_register (regcache, regno, buf);
2941
2942 pid = lwpid_of (get_thread_lwp (current_inferior));
2943 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2944 {
2945 errno = 0;
2946 ptrace (PTRACE_POKEUSER, pid,
2947 /* Coerce to a uintptr_t first to avoid potential gcc warning
2948 about coercing an 8 byte integer to a 4 byte pointer. */
2949 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
2950 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
2951 if (errno != 0)
2952 {
2953 /* At this point, ESRCH should mean the process is
2954 already gone, in which case we simply ignore attempts
2955 to change its registers. See also the related
2956 comment in linux_resume_one_lwp. */
2957 if (errno == ESRCH)
2958 return;
2959
2960 if ((*the_low_target.cannot_store_register) (regno) == 0)
2961 error ("writing register %d: %s", regno, strerror (errno));
2962 }
2963 regaddr += sizeof (PTRACE_XFER_TYPE);
2964 }
2965 }
2966 else
2967 for (regno = 0; regno < the_low_target.num_regs; regno++)
2968 usr_store_inferior_registers (regcache, regno);
2969 }
2970 #endif /* HAVE_LINUX_USRREGS */
2971
2972
2973
2974 #ifdef HAVE_LINUX_REGSETS
2975
2976 static int
2977 regsets_fetch_inferior_registers (struct regcache *regcache)
2978 {
2979 struct regset_info *regset;
2980 int saw_general_regs = 0;
2981 int pid;
2982
2983 regset = target_regsets;
2984
2985 pid = lwpid_of (get_thread_lwp (current_inferior));
2986 while (regset->size >= 0)
2987 {
2988 void *buf;
2989 int res;
2990
2991 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2992 {
2993 regset ++;
2994 continue;
2995 }
2996
2997 buf = xmalloc (regset->size);
2998 #ifndef __sparc__
2999 res = ptrace (regset->get_request, pid, 0, buf);
3000 #else
3001 res = ptrace (regset->get_request, pid, buf, 0);
3002 #endif
3003 if (res < 0)
3004 {
3005 if (errno == EIO)
3006 {
3007 /* If we get EIO on a regset, do not try it again for
3008 this process. */
3009 disabled_regsets[regset - target_regsets] = 1;
3010 free (buf);
3011 continue;
3012 }
3013 else
3014 {
3015 char s[256];
3016 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3017 pid);
3018 perror (s);
3019 }
3020 }
3021 else if (regset->type == GENERAL_REGS)
3022 saw_general_regs = 1;
3023 regset->store_function (regcache, buf);
3024 regset ++;
3025 free (buf);
3026 }
3027 if (saw_general_regs)
3028 return 0;
3029 else
3030 return 1;
3031 }
3032
3033 static int
3034 regsets_store_inferior_registers (struct regcache *regcache)
3035 {
3036 struct regset_info *regset;
3037 int saw_general_regs = 0;
3038 int pid;
3039
3040 regset = target_regsets;
3041
3042 pid = lwpid_of (get_thread_lwp (current_inferior));
3043 while (regset->size >= 0)
3044 {
3045 void *buf;
3046 int res;
3047
3048 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3049 {
3050 regset ++;
3051 continue;
3052 }
3053
3054 buf = xmalloc (regset->size);
3055
3056 /* First fill the buffer with the current register set contents,
3057 in case there are any items in the kernel's regset that are
3058 not in gdbserver's regcache. */
3059 #ifndef __sparc__
3060 res = ptrace (regset->get_request, pid, 0, buf);
3061 #else
3062 res = ptrace (regset->get_request, pid, buf, 0);
3063 #endif
3064
3065 if (res == 0)
3066 {
3067 /* Then overlay our cached registers on that. */
3068 regset->fill_function (regcache, buf);
3069
3070 /* Only now do we write the register set. */
3071 #ifndef __sparc__
3072 res = ptrace (regset->set_request, pid, 0, buf);
3073 #else
3074 res = ptrace (regset->set_request, pid, buf, 0);
3075 #endif
3076 }
3077
3078 if (res < 0)
3079 {
3080 if (errno == EIO)
3081 {
3082 /* If we get EIO on a regset, do not try it again for
3083 this process. */
3084 disabled_regsets[regset - target_regsets] = 1;
3085 free (buf);
3086 continue;
3087 }
3088 else if (errno == ESRCH)
3089 {
3090 /* At this point, ESRCH should mean the process is
3091 already gone, in which case we simply ignore attempts
3092 to change its registers. See also the related
3093 comment in linux_resume_one_lwp. */
3094 free (buf);
3095 return 0;
3096 }
3097 else
3098 {
3099 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3100 }
3101 }
3102 else if (regset->type == GENERAL_REGS)
3103 saw_general_regs = 1;
3104 regset ++;
3105 free (buf);
3106 }
3107 if (saw_general_regs)
3108 return 0;
3109 else
3110 return 1;
3111 return 0;
3112 }
3113
3114 #endif /* HAVE_LINUX_REGSETS */
3115
3116
3117 void
3118 linux_fetch_registers (struct regcache *regcache, int regno)
3119 {
3120 #ifdef HAVE_LINUX_REGSETS
3121 if (regsets_fetch_inferior_registers (regcache) == 0)
3122 return;
3123 #endif
3124 #ifdef HAVE_LINUX_USRREGS
3125 usr_fetch_inferior_registers (regcache, regno);
3126 #endif
3127 }
3128
3129 void
3130 linux_store_registers (struct regcache *regcache, int regno)
3131 {
3132 #ifdef HAVE_LINUX_REGSETS
3133 if (regsets_store_inferior_registers (regcache) == 0)
3134 return;
3135 #endif
3136 #ifdef HAVE_LINUX_USRREGS
3137 usr_store_inferior_registers (regcache, regno);
3138 #endif
3139 }
3140
3141
3142 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3143 to debugger memory starting at MYADDR. */
3144
3145 static int
3146 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
3147 {
3148 register int i;
3149 /* Round starting address down to longword boundary. */
3150 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3151 /* Round ending address up; get number of longwords that makes. */
3152 register int count
3153 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
3154 / sizeof (PTRACE_XFER_TYPE);
3155 /* Allocate buffer of that many longwords. */
3156 register PTRACE_XFER_TYPE *buffer
3157 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3158 int fd;
3159 char filename[64];
3160 int pid = lwpid_of (get_thread_lwp (current_inferior));
3161
3162 /* Try using /proc. Don't bother for one word. */
3163 if (len >= 3 * sizeof (long))
3164 {
3165 /* We could keep this file open and cache it - possibly one per
3166 thread. That requires some juggling, but is even faster. */
3167 sprintf (filename, "/proc/%d/mem", pid);
3168 fd = open (filename, O_RDONLY | O_LARGEFILE);
3169 if (fd == -1)
3170 goto no_proc;
3171
3172 /* If pread64 is available, use it. It's faster if the kernel
3173 supports it (only one syscall), and it's 64-bit safe even on
3174 32-bit platforms (for instance, SPARC debugging a SPARC64
3175 application). */
3176 #ifdef HAVE_PREAD64
3177 if (pread64 (fd, myaddr, len, memaddr) != len)
3178 #else
3179 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
3180 #endif
3181 {
3182 close (fd);
3183 goto no_proc;
3184 }
3185
3186 close (fd);
3187 return 0;
3188 }
3189
3190 no_proc:
3191 /* Read all the longwords */
3192 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3193 {
3194 errno = 0;
3195 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3196 about coercing an 8 byte integer to a 4 byte pointer. */
3197 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3198 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3199 if (errno)
3200 return errno;
3201 }
3202
3203 /* Copy appropriate bytes out of the buffer. */
3204 memcpy (myaddr,
3205 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3206 len);
3207
3208 return 0;
3209 }
3210
3211 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3212 memory at MEMADDR. On failure (cannot write to the inferior)
3213 returns the value of errno. */
3214
3215 static int
3216 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
3217 {
3218 register int i;
3219 /* Round starting address down to longword boundary. */
3220 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3221 /* Round ending address up; get number of longwords that makes. */
3222 register int count
3223 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3224 /* Allocate buffer of that many longwords. */
3225 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3226 int pid = lwpid_of (get_thread_lwp (current_inferior));
3227
3228 if (debug_threads)
3229 {
3230 /* Dump up to four bytes. */
3231 unsigned int val = * (unsigned int *) myaddr;
3232 if (len == 1)
3233 val = val & 0xff;
3234 else if (len == 2)
3235 val = val & 0xffff;
3236 else if (len == 3)
3237 val = val & 0xffffff;
3238 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3239 val, (long)memaddr);
3240 }
3241
3242 /* Fill start and end extra bytes of buffer with existing memory data. */
3243
3244 errno = 0;
3245 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3246 about coercing an 8 byte integer to a 4 byte pointer. */
3247 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3248 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3249 if (errno)
3250 return errno;
3251
3252 if (count > 1)
3253 {
3254 errno = 0;
3255 buffer[count - 1]
3256 = ptrace (PTRACE_PEEKTEXT, pid,
3257 /* Coerce to a uintptr_t first to avoid potential gcc warning
3258 about coercing an 8 byte integer to a 4 byte pointer. */
3259 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3260 * sizeof (PTRACE_XFER_TYPE)),
3261 0);
3262 if (errno)
3263 return errno;
3264 }
3265
3266 /* Copy data to be written over corresponding part of buffer. */
3267
3268 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3269
3270 /* Write the entire buffer. */
3271
3272 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3273 {
3274 errno = 0;
3275 ptrace (PTRACE_POKETEXT, pid,
3276 /* Coerce to a uintptr_t first to avoid potential gcc warning
3277 about coercing an 8 byte integer to a 4 byte pointer. */
3278 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3279 (PTRACE_ARG4_TYPE) buffer[i]);
3280 if (errno)
3281 return errno;
3282 }
3283
3284 return 0;
3285 }
3286
3287 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
3288 static int linux_supports_tracefork_flag;
3289
3290 /* Helper functions for linux_test_for_tracefork, called via clone (). */
3291
3292 static int
3293 linux_tracefork_grandchild (void *arg)
3294 {
3295 _exit (0);
3296 }
3297
3298 #define STACK_SIZE 4096
3299
3300 static int
3301 linux_tracefork_child (void *arg)
3302 {
3303 ptrace (PTRACE_TRACEME, 0, 0, 0);
3304 kill (getpid (), SIGSTOP);
3305
3306 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3307
3308 if (fork () == 0)
3309 linux_tracefork_grandchild (NULL);
3310
3311 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3312
3313 #ifdef __ia64__
3314 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3315 CLONE_VM | SIGCHLD, NULL);
3316 #else
3317 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3318 CLONE_VM | SIGCHLD, NULL);
3319 #endif
3320
3321 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3322
3323 _exit (0);
3324 }
3325
3326 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3327 sure that we can enable the option, and that it had the desired
3328 effect. */
3329
3330 static void
3331 linux_test_for_tracefork (void)
3332 {
3333 int child_pid, ret, status;
3334 long second_pid;
3335 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3336 char *stack = xmalloc (STACK_SIZE * 4);
3337 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3338
3339 linux_supports_tracefork_flag = 0;
3340
3341 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3342
3343 child_pid = fork ();
3344 if (child_pid == 0)
3345 linux_tracefork_child (NULL);
3346
3347 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3348
3349 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
3350 #ifdef __ia64__
3351 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3352 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3353 #else /* !__ia64__ */
3354 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3355 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3356 #endif /* !__ia64__ */
3357
3358 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3359
3360 if (child_pid == -1)
3361 perror_with_name ("clone");
3362
3363 ret = my_waitpid (child_pid, &status, 0);
3364 if (ret == -1)
3365 perror_with_name ("waitpid");
3366 else if (ret != child_pid)
3367 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3368 if (! WIFSTOPPED (status))
3369 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3370
3371 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3372 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
3373 if (ret != 0)
3374 {
3375 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3376 if (ret != 0)
3377 {
3378 warning ("linux_test_for_tracefork: failed to kill child");
3379 return;
3380 }
3381
3382 ret = my_waitpid (child_pid, &status, 0);
3383 if (ret != child_pid)
3384 warning ("linux_test_for_tracefork: failed to wait for killed child");
3385 else if (!WIFSIGNALED (status))
3386 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3387 "killed child", status);
3388
3389 return;
3390 }
3391
3392 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3393 if (ret != 0)
3394 warning ("linux_test_for_tracefork: failed to resume child");
3395
3396 ret = my_waitpid (child_pid, &status, 0);
3397
3398 if (ret == child_pid && WIFSTOPPED (status)
3399 && status >> 16 == PTRACE_EVENT_FORK)
3400 {
3401 second_pid = 0;
3402 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3403 if (ret == 0 && second_pid != 0)
3404 {
3405 int second_status;
3406
3407 linux_supports_tracefork_flag = 1;
3408 my_waitpid (second_pid, &second_status, 0);
3409 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3410 if (ret != 0)
3411 warning ("linux_test_for_tracefork: failed to kill second child");
3412 my_waitpid (second_pid, &status, 0);
3413 }
3414 }
3415 else
3416 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3417 "(%d, status 0x%x)", ret, status);
3418
3419 do
3420 {
3421 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3422 if (ret != 0)
3423 warning ("linux_test_for_tracefork: failed to kill child");
3424 my_waitpid (child_pid, &status, 0);
3425 }
3426 while (WIFSTOPPED (status));
3427
3428 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3429 free (stack);
3430 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3431 }
3432
3433
3434 static void
3435 linux_look_up_symbols (void)
3436 {
3437 #ifdef USE_THREAD_DB
3438 struct process_info *proc = current_process ();
3439
3440 if (proc->private->thread_db != NULL)
3441 return;
3442
3443 /* If the kernel supports tracing forks then it also supports tracing
3444 clones, and then we don't need to use the magic thread event breakpoint
3445 to learn about threads. */
3446 thread_db_init (!linux_supports_tracefork_flag);
3447 #endif
3448 }
3449
3450 static void
3451 linux_request_interrupt (void)
3452 {
3453 extern unsigned long signal_pid;
3454
3455 if (!ptid_equal (cont_thread, null_ptid)
3456 && !ptid_equal (cont_thread, minus_one_ptid))
3457 {
3458 struct lwp_info *lwp;
3459 int lwpid;
3460
3461 lwp = get_thread_lwp (current_inferior);
3462 lwpid = lwpid_of (lwp);
3463 kill_lwp (lwpid, SIGINT);
3464 }
3465 else
3466 kill_lwp (signal_pid, SIGINT);
3467 }
3468
3469 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3470 to debugger memory starting at MYADDR. */
3471
3472 static int
3473 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
3474 {
3475 char filename[PATH_MAX];
3476 int fd, n;
3477 int pid = lwpid_of (get_thread_lwp (current_inferior));
3478
3479 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
3480
3481 fd = open (filename, O_RDONLY);
3482 if (fd < 0)
3483 return -1;
3484
3485 if (offset != (CORE_ADDR) 0
3486 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3487 n = -1;
3488 else
3489 n = read (fd, myaddr, len);
3490
3491 close (fd);
3492
3493 return n;
3494 }
3495
3496 /* These breakpoint and watchpoint related wrapper functions simply
3497 pass on the function call if the target has registered a
3498 corresponding function. */
3499
3500 static int
3501 linux_insert_point (char type, CORE_ADDR addr, int len)
3502 {
3503 if (the_low_target.insert_point != NULL)
3504 return the_low_target.insert_point (type, addr, len);
3505 else
3506 /* Unsupported (see target.h). */
3507 return 1;
3508 }
3509
3510 static int
3511 linux_remove_point (char type, CORE_ADDR addr, int len)
3512 {
3513 if (the_low_target.remove_point != NULL)
3514 return the_low_target.remove_point (type, addr, len);
3515 else
3516 /* Unsupported (see target.h). */
3517 return 1;
3518 }
3519
3520 static int
3521 linux_stopped_by_watchpoint (void)
3522 {
3523 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3524
3525 return lwp->stopped_by_watchpoint;
3526 }
3527
3528 static CORE_ADDR
3529 linux_stopped_data_address (void)
3530 {
3531 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3532
3533 return lwp->stopped_data_address;
3534 }
3535
3536 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3537 #if defined(__mcoldfire__)
3538 /* These should really be defined in the kernel's ptrace.h header. */
3539 #define PT_TEXT_ADDR 49*4
3540 #define PT_DATA_ADDR 50*4
3541 #define PT_TEXT_END_ADDR 51*4
3542 #endif
3543
3544 /* Under uClinux, programs are loaded at non-zero offsets, which we need
3545 to tell gdb about. */
3546
3547 static int
3548 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3549 {
3550 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3551 unsigned long text, text_end, data;
3552 int pid = lwpid_of (get_thread_lwp (current_inferior));
3553
3554 errno = 0;
3555
3556 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3557 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3558 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3559
3560 if (errno == 0)
3561 {
3562 /* Both text and data offsets produced at compile-time (and so
3563 used by gdb) are relative to the beginning of the program,
3564 with the data segment immediately following the text segment.
3565 However, the actual runtime layout in memory may put the data
3566 somewhere else, so when we send gdb a data base-address, we
3567 use the real data base address and subtract the compile-time
3568 data base-address from it (which is just the length of the
3569 text segment). BSS immediately follows data in both
3570 cases. */
3571 *text_p = text;
3572 *data_p = data - (text_end - text);
3573
3574 return 1;
3575 }
3576 #endif
3577 return 0;
3578 }
3579 #endif
3580
3581 static int
3582 compare_ints (const void *xa, const void *xb)
3583 {
3584 int a = *(const int *)xa;
3585 int b = *(const int *)xb;
3586
3587 return a - b;
3588 }
3589
3590 static int *
3591 unique (int *b, int *e)
3592 {
3593 int *d = b;
3594 while (++b != e)
3595 if (*d != *b)
3596 *++d = *b;
3597 return ++d;
3598 }
3599
3600 /* Given PID, iterates over all threads in that process.
3601
3602 Information about each thread, in a format suitable for qXfer:osdata:thread
3603 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3604 initialized, and the caller is responsible for finishing and appending '\0'
3605 to it.
3606
3607 The list of cores that threads are running on is assigned to *CORES, if it
3608 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3609 should free *CORES. */
3610
3611 static void
3612 list_threads (int pid, struct buffer *buffer, char **cores)
3613 {
3614 int count = 0;
3615 int allocated = 10;
3616 int *core_numbers = xmalloc (sizeof (int) * allocated);
3617 char pathname[128];
3618 DIR *dir;
3619 struct dirent *dp;
3620 struct stat statbuf;
3621
3622 sprintf (pathname, "/proc/%d/task", pid);
3623 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3624 {
3625 dir = opendir (pathname);
3626 if (!dir)
3627 {
3628 free (core_numbers);
3629 return;
3630 }
3631
3632 while ((dp = readdir (dir)) != NULL)
3633 {
3634 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3635
3636 if (lwp != 0)
3637 {
3638 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3639
3640 if (core != -1)
3641 {
3642 char s[sizeof ("4294967295")];
3643 sprintf (s, "%u", core);
3644
3645 if (count == allocated)
3646 {
3647 allocated *= 2;
3648 core_numbers = realloc (core_numbers,
3649 sizeof (int) * allocated);
3650 }
3651 core_numbers[count++] = core;
3652 if (buffer)
3653 buffer_xml_printf (buffer,
3654 "<item>"
3655 "<column name=\"pid\">%d</column>"
3656 "<column name=\"tid\">%s</column>"
3657 "<column name=\"core\">%s</column>"
3658 "</item>", pid, dp->d_name, s);
3659 }
3660 else
3661 {
3662 if (buffer)
3663 buffer_xml_printf (buffer,
3664 "<item>"
3665 "<column name=\"pid\">%d</column>"
3666 "<column name=\"tid\">%s</column>"
3667 "</item>", pid, dp->d_name);
3668 }
3669 }
3670 }
3671 }
3672
3673 if (cores)
3674 {
3675 *cores = NULL;
3676 if (count > 0)
3677 {
3678 struct buffer buffer2;
3679 int *b;
3680 int *e;
3681 qsort (core_numbers, count, sizeof (int), compare_ints);
3682
3683 /* Remove duplicates. */
3684 b = core_numbers;
3685 e = unique (b, core_numbers + count);
3686
3687 buffer_init (&buffer2);
3688
3689 for (b = core_numbers; b != e; ++b)
3690 {
3691 char number[sizeof ("4294967295")];
3692 sprintf (number, "%u", *b);
3693 buffer_xml_printf (&buffer2, "%s%s",
3694 (b == core_numbers) ? "" : ",", number);
3695 }
3696 buffer_grow_str0 (&buffer2, "");
3697
3698 *cores = buffer_finish (&buffer2);
3699 }
3700 }
3701 free (core_numbers);
3702 }
3703
3704 static void
3705 show_process (int pid, const char *username, struct buffer *buffer)
3706 {
3707 char pathname[128];
3708 FILE *f;
3709 char cmd[MAXPATHLEN + 1];
3710
3711 sprintf (pathname, "/proc/%d/cmdline", pid);
3712
3713 if ((f = fopen (pathname, "r")) != NULL)
3714 {
3715 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3716 if (len > 0)
3717 {
3718 char *cores = 0;
3719 int i;
3720 for (i = 0; i < len; i++)
3721 if (cmd[i] == '\0')
3722 cmd[i] = ' ';
3723 cmd[len] = '\0';
3724
3725 buffer_xml_printf (buffer,
3726 "<item>"
3727 "<column name=\"pid\">%d</column>"
3728 "<column name=\"user\">%s</column>"
3729 "<column name=\"command\">%s</column>",
3730 pid,
3731 username,
3732 cmd);
3733
3734 /* This only collects core numbers, and does not print threads. */
3735 list_threads (pid, NULL, &cores);
3736
3737 if (cores)
3738 {
3739 buffer_xml_printf (buffer,
3740 "<column name=\"cores\">%s</column>", cores);
3741 free (cores);
3742 }
3743
3744 buffer_xml_printf (buffer, "</item>");
3745 }
3746 fclose (f);
3747 }
3748 }
3749
3750 static int
3751 linux_qxfer_osdata (const char *annex,
3752 unsigned char *readbuf, unsigned const char *writebuf,
3753 CORE_ADDR offset, int len)
3754 {
3755 /* We make the process list snapshot when the object starts to be
3756 read. */
3757 static const char *buf;
3758 static long len_avail = -1;
3759 static struct buffer buffer;
3760 int processes = 0;
3761 int threads = 0;
3762
3763 DIR *dirp;
3764
3765 if (strcmp (annex, "processes") == 0)
3766 processes = 1;
3767 else if (strcmp (annex, "threads") == 0)
3768 threads = 1;
3769 else
3770 return 0;
3771
3772 if (!readbuf || writebuf)
3773 return 0;
3774
3775 if (offset == 0)
3776 {
3777 if (len_avail != -1 && len_avail != 0)
3778 buffer_free (&buffer);
3779 len_avail = 0;
3780 buf = NULL;
3781 buffer_init (&buffer);
3782 if (processes)
3783 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3784 else if (threads)
3785 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
3786
3787 dirp = opendir ("/proc");
3788 if (dirp)
3789 {
3790 struct dirent *dp;
3791 while ((dp = readdir (dirp)) != NULL)
3792 {
3793 struct stat statbuf;
3794 char procentry[sizeof ("/proc/4294967295")];
3795
3796 if (!isdigit (dp->d_name[0])
3797 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3798 continue;
3799
3800 sprintf (procentry, "/proc/%s", dp->d_name);
3801 if (stat (procentry, &statbuf) == 0
3802 && S_ISDIR (statbuf.st_mode))
3803 {
3804 int pid = (int) strtoul (dp->d_name, NULL, 10);
3805
3806 if (processes)
3807 {
3808 struct passwd *entry = getpwuid (statbuf.st_uid);
3809 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3810 }
3811 else if (threads)
3812 {
3813 list_threads (pid, &buffer, NULL);
3814 }
3815 }
3816 }
3817
3818 closedir (dirp);
3819 }
3820 buffer_grow_str0 (&buffer, "</osdata>\n");
3821 buf = buffer_finish (&buffer);
3822 len_avail = strlen (buf);
3823 }
3824
3825 if (offset >= len_avail)
3826 {
3827 /* Done. Get rid of the data. */
3828 buffer_free (&buffer);
3829 buf = NULL;
3830 len_avail = 0;
3831 return 0;
3832 }
3833
3834 if (len > len_avail - offset)
3835 len = len_avail - offset;
3836 memcpy (readbuf, buf + offset, len);
3837
3838 return len;
3839 }
3840
3841 /* Convert a native/host siginfo object, into/from the siginfo in the
3842 layout of the inferiors' architecture. */
3843
3844 static void
3845 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3846 {
3847 int done = 0;
3848
3849 if (the_low_target.siginfo_fixup != NULL)
3850 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3851
3852 /* If there was no callback, or the callback didn't do anything,
3853 then just do a straight memcpy. */
3854 if (!done)
3855 {
3856 if (direction == 1)
3857 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3858 else
3859 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3860 }
3861 }
3862
3863 static int
3864 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3865 unsigned const char *writebuf, CORE_ADDR offset, int len)
3866 {
3867 int pid;
3868 struct siginfo siginfo;
3869 char inf_siginfo[sizeof (struct siginfo)];
3870
3871 if (current_inferior == NULL)
3872 return -1;
3873
3874 pid = lwpid_of (get_thread_lwp (current_inferior));
3875
3876 if (debug_threads)
3877 fprintf (stderr, "%s siginfo for lwp %d.\n",
3878 readbuf != NULL ? "Reading" : "Writing",
3879 pid);
3880
3881 if (offset > sizeof (siginfo))
3882 return -1;
3883
3884 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
3885 return -1;
3886
3887 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3888 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3889 inferior with a 64-bit GDBSERVER should look the same as debugging it
3890 with a 32-bit GDBSERVER, we need to convert it. */
3891 siginfo_fixup (&siginfo, inf_siginfo, 0);
3892
3893 if (offset + len > sizeof (siginfo))
3894 len = sizeof (siginfo) - offset;
3895
3896 if (readbuf != NULL)
3897 memcpy (readbuf, inf_siginfo + offset, len);
3898 else
3899 {
3900 memcpy (inf_siginfo + offset, writebuf, len);
3901
3902 /* Convert back to ptrace layout before flushing it out. */
3903 siginfo_fixup (&siginfo, inf_siginfo, 1);
3904
3905 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
3906 return -1;
3907 }
3908
3909 return len;
3910 }
3911
3912 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
3913 so we notice when children change state; as the handler for the
3914 sigsuspend in my_waitpid. */
3915
3916 static void
3917 sigchld_handler (int signo)
3918 {
3919 int old_errno = errno;
3920
3921 if (debug_threads)
3922 /* fprintf is not async-signal-safe, so call write directly. */
3923 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
3924
3925 if (target_is_async_p ())
3926 async_file_mark (); /* trigger a linux_wait */
3927
3928 errno = old_errno;
3929 }
3930
3931 static int
3932 linux_supports_non_stop (void)
3933 {
3934 return 1;
3935 }
3936
3937 static int
3938 linux_async (int enable)
3939 {
3940 int previous = (linux_event_pipe[0] != -1);
3941
3942 if (previous != enable)
3943 {
3944 sigset_t mask;
3945 sigemptyset (&mask);
3946 sigaddset (&mask, SIGCHLD);
3947
3948 sigprocmask (SIG_BLOCK, &mask, NULL);
3949
3950 if (enable)
3951 {
3952 if (pipe (linux_event_pipe) == -1)
3953 fatal ("creating event pipe failed.");
3954
3955 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
3956 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
3957
3958 /* Register the event loop handler. */
3959 add_file_handler (linux_event_pipe[0],
3960 handle_target_event, NULL);
3961
3962 /* Always trigger a linux_wait. */
3963 async_file_mark ();
3964 }
3965 else
3966 {
3967 delete_file_handler (linux_event_pipe[0]);
3968
3969 close (linux_event_pipe[0]);
3970 close (linux_event_pipe[1]);
3971 linux_event_pipe[0] = -1;
3972 linux_event_pipe[1] = -1;
3973 }
3974
3975 sigprocmask (SIG_UNBLOCK, &mask, NULL);
3976 }
3977
3978 return previous;
3979 }
3980
3981 static int
3982 linux_start_non_stop (int nonstop)
3983 {
3984 /* Register or unregister from event-loop accordingly. */
3985 linux_async (nonstop);
3986 return 0;
3987 }
3988
3989 static int
3990 linux_supports_multi_process (void)
3991 {
3992 return 1;
3993 }
3994
3995
3996 /* Enumerate spufs IDs for process PID. */
3997 static int
3998 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
3999 {
4000 int pos = 0;
4001 int written = 0;
4002 char path[128];
4003 DIR *dir;
4004 struct dirent *entry;
4005
4006 sprintf (path, "/proc/%ld/fd", pid);
4007 dir = opendir (path);
4008 if (!dir)
4009 return -1;
4010
4011 rewinddir (dir);
4012 while ((entry = readdir (dir)) != NULL)
4013 {
4014 struct stat st;
4015 struct statfs stfs;
4016 int fd;
4017
4018 fd = atoi (entry->d_name);
4019 if (!fd)
4020 continue;
4021
4022 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4023 if (stat (path, &st) != 0)
4024 continue;
4025 if (!S_ISDIR (st.st_mode))
4026 continue;
4027
4028 if (statfs (path, &stfs) != 0)
4029 continue;
4030 if (stfs.f_type != SPUFS_MAGIC)
4031 continue;
4032
4033 if (pos >= offset && pos + 4 <= offset + len)
4034 {
4035 *(unsigned int *)(buf + pos - offset) = fd;
4036 written += 4;
4037 }
4038 pos += 4;
4039 }
4040
4041 closedir (dir);
4042 return written;
4043 }
4044
4045 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4046 object type, using the /proc file system. */
4047 static int
4048 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4049 unsigned const char *writebuf,
4050 CORE_ADDR offset, int len)
4051 {
4052 long pid = lwpid_of (get_thread_lwp (current_inferior));
4053 char buf[128];
4054 int fd = 0;
4055 int ret = 0;
4056
4057 if (!writebuf && !readbuf)
4058 return -1;
4059
4060 if (!*annex)
4061 {
4062 if (!readbuf)
4063 return -1;
4064 else
4065 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4066 }
4067
4068 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4069 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4070 if (fd <= 0)
4071 return -1;
4072
4073 if (offset != 0
4074 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4075 {
4076 close (fd);
4077 return 0;
4078 }
4079
4080 if (writebuf)
4081 ret = write (fd, writebuf, (size_t) len);
4082 else
4083 ret = read (fd, readbuf, (size_t) len);
4084
4085 close (fd);
4086 return ret;
4087 }
4088
4089 static int
4090 linux_core_of_thread (ptid_t ptid)
4091 {
4092 char filename[sizeof ("/proc//task//stat")
4093 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4094 + 1];
4095 FILE *f;
4096 char *content = NULL;
4097 char *p;
4098 char *ts = 0;
4099 int content_read = 0;
4100 int i;
4101 int core;
4102
4103 sprintf (filename, "/proc/%d/task/%ld/stat",
4104 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4105 f = fopen (filename, "r");
4106 if (!f)
4107 return -1;
4108
4109 for (;;)
4110 {
4111 int n;
4112 content = realloc (content, content_read + 1024);
4113 n = fread (content + content_read, 1, 1024, f);
4114 content_read += n;
4115 if (n < 1024)
4116 {
4117 content[content_read] = '\0';
4118 break;
4119 }
4120 }
4121
4122 p = strchr (content, '(');
4123 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
4124
4125 p = strtok_r (p, " ", &ts);
4126 for (i = 0; i != 36; ++i)
4127 p = strtok_r (NULL, " ", &ts);
4128
4129 if (sscanf (p, "%d", &core) == 0)
4130 core = -1;
4131
4132 free (content);
4133 fclose (f);
4134
4135 return core;
4136 }
4137
4138 static struct target_ops linux_target_ops = {
4139 linux_create_inferior,
4140 linux_attach,
4141 linux_kill,
4142 linux_detach,
4143 linux_join,
4144 linux_thread_alive,
4145 linux_resume,
4146 linux_wait,
4147 linux_fetch_registers,
4148 linux_store_registers,
4149 linux_read_memory,
4150 linux_write_memory,
4151 linux_look_up_symbols,
4152 linux_request_interrupt,
4153 linux_read_auxv,
4154 linux_insert_point,
4155 linux_remove_point,
4156 linux_stopped_by_watchpoint,
4157 linux_stopped_data_address,
4158 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4159 linux_read_offsets,
4160 #else
4161 NULL,
4162 #endif
4163 #ifdef USE_THREAD_DB
4164 thread_db_get_tls_address,
4165 #else
4166 NULL,
4167 #endif
4168 linux_qxfer_spu,
4169 hostio_last_error_from_errno,
4170 linux_qxfer_osdata,
4171 linux_xfer_siginfo,
4172 linux_supports_non_stop,
4173 linux_async,
4174 linux_start_non_stop,
4175 linux_supports_multi_process,
4176 #ifdef USE_THREAD_DB
4177 thread_db_handle_monitor_command,
4178 #else
4179 NULL,
4180 #endif
4181 linux_core_of_thread
4182 };
4183
4184 static void
4185 linux_init_signals ()
4186 {
4187 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4188 to find what the cancel signal actually is. */
4189 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
4190 signal (__SIGRTMIN+1, SIG_IGN);
4191 #endif
4192 }
4193
4194 void
4195 initialize_low (void)
4196 {
4197 struct sigaction sigchld_action;
4198 memset (&sigchld_action, 0, sizeof (sigchld_action));
4199 set_target_ops (&linux_target_ops);
4200 set_breakpoint_data (the_low_target.breakpoint,
4201 the_low_target.breakpoint_len);
4202 linux_init_signals ();
4203 linux_test_for_tracefork ();
4204 #ifdef HAVE_LINUX_REGSETS
4205 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4206 ;
4207 disabled_regsets = xmalloc (num_regsets);
4208 #endif
4209
4210 sigchld_action.sa_handler = sigchld_handler;
4211 sigemptyset (&sigchld_action.sa_mask);
4212 sigchld_action.sa_flags = SA_RESTART;
4213 sigaction (SIGCHLD, &sigchld_action, NULL);
4214 }
This page took 0.12511 seconds and 4 git commands to generate.