ad68179e6663b89baf2d6be75ead9b65a8d6c3f3
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #ifndef ELFMAG0
43 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
44 then ELFMAG0 will have been defined. If it didn't get included by
45 gdb_proc_service.h then including it will likely introduce a duplicate
46 definition of elf_fpregset_t. */
47 #include <elf.h>
48 #endif
49
50 #ifndef SPUFS_MAGIC
51 #define SPUFS_MAGIC 0x23c9b64e
52 #endif
53
54 #ifndef PTRACE_GETSIGINFO
55 # define PTRACE_GETSIGINFO 0x4202
56 # define PTRACE_SETSIGINFO 0x4203
57 #endif
58
59 #ifndef O_LARGEFILE
60 #define O_LARGEFILE 0
61 #endif
62
63 /* If the system headers did not provide the constants, hard-code the normal
64 values. */
65 #ifndef PTRACE_EVENT_FORK
66
67 #define PTRACE_SETOPTIONS 0x4200
68 #define PTRACE_GETEVENTMSG 0x4201
69
70 /* options set using PTRACE_SETOPTIONS */
71 #define PTRACE_O_TRACESYSGOOD 0x00000001
72 #define PTRACE_O_TRACEFORK 0x00000002
73 #define PTRACE_O_TRACEVFORK 0x00000004
74 #define PTRACE_O_TRACECLONE 0x00000008
75 #define PTRACE_O_TRACEEXEC 0x00000010
76 #define PTRACE_O_TRACEVFORKDONE 0x00000020
77 #define PTRACE_O_TRACEEXIT 0x00000040
78
79 /* Wait extended result codes for the above trace options. */
80 #define PTRACE_EVENT_FORK 1
81 #define PTRACE_EVENT_VFORK 2
82 #define PTRACE_EVENT_CLONE 3
83 #define PTRACE_EVENT_EXEC 4
84 #define PTRACE_EVENT_VFORK_DONE 5
85 #define PTRACE_EVENT_EXIT 6
86
87 #endif /* PTRACE_EVENT_FORK */
88
89 /* We can't always assume that this flag is available, but all systems
90 with the ptrace event handlers also have __WALL, so it's safe to use
91 in some contexts. */
92 #ifndef __WALL
93 #define __WALL 0x40000000 /* Wait for any child. */
94 #endif
95
96 #ifndef W_STOPCODE
97 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
98 #endif
99
100 #ifdef __UCLIBC__
101 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
102 #define HAS_NOMMU
103 #endif
104 #endif
105
106 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
107 representation of the thread ID.
108
109 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
110 the same as the LWP ID.
111
112 ``all_processes'' is keyed by the "overall process ID", which
113 GNU/Linux calls tgid, "thread group ID". */
114
115 struct inferior_list all_lwps;
116
117 /* A list of all unknown processes which receive stop signals. Some other
118 process will presumably claim each of these as forked children
119 momentarily. */
120
121 struct inferior_list stopped_pids;
122
123 /* FIXME this is a bit of a hack, and could be removed. */
124 int stopping_threads;
125
126 /* FIXME make into a target method? */
127 int using_threads = 1;
128
129 /* This flag is true iff we've just created or attached to our first
130 inferior but it has not stopped yet. As soon as it does, we need
131 to call the low target's arch_setup callback. Doing this only on
132 the first inferior avoids reinializing the architecture on every
133 inferior, and avoids messing with the register caches of the
134 already running inferiors. NOTE: this assumes all inferiors under
135 control of gdbserver have the same architecture. */
136 static int new_inferior;
137
138 static void linux_resume_one_lwp (struct lwp_info *lwp,
139 int step, int signal, siginfo_t *info);
140 static void linux_resume (struct thread_resume *resume_info, size_t n);
141 static void stop_all_lwps (void);
142 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
143 static void *add_lwp (ptid_t ptid);
144 static int linux_stopped_by_watchpoint (void);
145 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
146 static int linux_core_of_thread (ptid_t ptid);
147 static void proceed_all_lwps (void);
148 static void unstop_all_lwps (struct lwp_info *except);
149 static int finish_step_over (struct lwp_info *lwp);
150 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
151 static int kill_lwp (unsigned long lwpid, int signo);
152
153 /* True if the low target can hardware single-step. Such targets
154 don't need a BREAKPOINT_REINSERT_ADDR callback. */
155
156 static int
157 can_hardware_single_step (void)
158 {
159 return (the_low_target.breakpoint_reinsert_addr == NULL);
160 }
161
162 /* True if the low target supports memory breakpoints. If so, we'll
163 have a GET_PC implementation. */
164
165 static int
166 supports_breakpoints (void)
167 {
168 return (the_low_target.get_pc != NULL);
169 }
170
171 struct pending_signals
172 {
173 int signal;
174 siginfo_t info;
175 struct pending_signals *prev;
176 };
177
178 #define PTRACE_ARG3_TYPE void *
179 #define PTRACE_ARG4_TYPE void *
180 #define PTRACE_XFER_TYPE long
181
182 #ifdef HAVE_LINUX_REGSETS
183 static char *disabled_regsets;
184 static int num_regsets;
185 #endif
186
187 /* The read/write ends of the pipe registered as waitable file in the
188 event loop. */
189 static int linux_event_pipe[2] = { -1, -1 };
190
191 /* True if we're currently in async mode. */
192 #define target_is_async_p() (linux_event_pipe[0] != -1)
193
194 static void send_sigstop (struct inferior_list_entry *entry);
195 static void wait_for_sigstop (struct inferior_list_entry *entry);
196
197 /* Accepts an integer PID; Returns a string representing a file that
198 can be opened to get info for the child process.
199 Space for the result is malloc'd, caller must free. */
200
201 char *
202 linux_child_pid_to_exec_file (int pid)
203 {
204 char *name1, *name2;
205
206 name1 = xmalloc (MAXPATHLEN);
207 name2 = xmalloc (MAXPATHLEN);
208 memset (name2, 0, MAXPATHLEN);
209
210 sprintf (name1, "/proc/%d/exe", pid);
211 if (readlink (name1, name2, MAXPATHLEN) > 0)
212 {
213 free (name1);
214 return name2;
215 }
216 else
217 {
218 free (name2);
219 return name1;
220 }
221 }
222
223 /* Return non-zero if HEADER is a 64-bit ELF file. */
224
225 static int
226 elf_64_header_p (const Elf64_Ehdr *header)
227 {
228 return (header->e_ident[EI_MAG0] == ELFMAG0
229 && header->e_ident[EI_MAG1] == ELFMAG1
230 && header->e_ident[EI_MAG2] == ELFMAG2
231 && header->e_ident[EI_MAG3] == ELFMAG3
232 && header->e_ident[EI_CLASS] == ELFCLASS64);
233 }
234
235 /* Return non-zero if FILE is a 64-bit ELF file,
236 zero if the file is not a 64-bit ELF file,
237 and -1 if the file is not accessible or doesn't exist. */
238
239 int
240 elf_64_file_p (const char *file)
241 {
242 Elf64_Ehdr header;
243 int fd;
244
245 fd = open (file, O_RDONLY);
246 if (fd < 0)
247 return -1;
248
249 if (read (fd, &header, sizeof (header)) != sizeof (header))
250 {
251 close (fd);
252 return 0;
253 }
254 close (fd);
255
256 return elf_64_header_p (&header);
257 }
258
259 static void
260 delete_lwp (struct lwp_info *lwp)
261 {
262 remove_thread (get_lwp_thread (lwp));
263 remove_inferior (&all_lwps, &lwp->head);
264 free (lwp->arch_private);
265 free (lwp);
266 }
267
268 /* Add a process to the common process list, and set its private
269 data. */
270
271 static struct process_info *
272 linux_add_process (int pid, int attached)
273 {
274 struct process_info *proc;
275
276 /* Is this the first process? If so, then set the arch. */
277 if (all_processes.head == NULL)
278 new_inferior = 1;
279
280 proc = add_process (pid, attached);
281 proc->private = xcalloc (1, sizeof (*proc->private));
282
283 if (the_low_target.new_process != NULL)
284 proc->private->arch_private = the_low_target.new_process ();
285
286 return proc;
287 }
288
289 /* Remove a process from the common process list,
290 also freeing all private data. */
291
292 static void
293 linux_remove_process (struct process_info *process)
294 {
295 struct process_info_private *priv = process->private;
296
297 free (priv->arch_private);
298 free (priv);
299 remove_process (process);
300 }
301
302 /* Wrapper function for waitpid which handles EINTR, and emulates
303 __WALL for systems where that is not available. */
304
305 static int
306 my_waitpid (int pid, int *status, int flags)
307 {
308 int ret, out_errno;
309
310 if (debug_threads)
311 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
312
313 if (flags & __WALL)
314 {
315 sigset_t block_mask, org_mask, wake_mask;
316 int wnohang;
317
318 wnohang = (flags & WNOHANG) != 0;
319 flags &= ~(__WALL | __WCLONE);
320 flags |= WNOHANG;
321
322 /* Block all signals while here. This avoids knowing about
323 LinuxThread's signals. */
324 sigfillset (&block_mask);
325 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
326
327 /* ... except during the sigsuspend below. */
328 sigemptyset (&wake_mask);
329
330 while (1)
331 {
332 /* Since all signals are blocked, there's no need to check
333 for EINTR here. */
334 ret = waitpid (pid, status, flags);
335 out_errno = errno;
336
337 if (ret == -1 && out_errno != ECHILD)
338 break;
339 else if (ret > 0)
340 break;
341
342 if (flags & __WCLONE)
343 {
344 /* We've tried both flavors now. If WNOHANG is set,
345 there's nothing else to do, just bail out. */
346 if (wnohang)
347 break;
348
349 if (debug_threads)
350 fprintf (stderr, "blocking\n");
351
352 /* Block waiting for signals. */
353 sigsuspend (&wake_mask);
354 }
355
356 flags ^= __WCLONE;
357 }
358
359 sigprocmask (SIG_SETMASK, &org_mask, NULL);
360 }
361 else
362 {
363 do
364 ret = waitpid (pid, status, flags);
365 while (ret == -1 && errno == EINTR);
366 out_errno = errno;
367 }
368
369 if (debug_threads)
370 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
371 pid, flags, status ? *status : -1, ret);
372
373 errno = out_errno;
374 return ret;
375 }
376
377 /* Handle a GNU/Linux extended wait response. If we see a clone
378 event, we need to add the new LWP to our list (and not report the
379 trap to higher layers). */
380
381 static void
382 handle_extended_wait (struct lwp_info *event_child, int wstat)
383 {
384 int event = wstat >> 16;
385 struct lwp_info *new_lwp;
386
387 if (event == PTRACE_EVENT_CLONE)
388 {
389 ptid_t ptid;
390 unsigned long new_pid;
391 int ret, status = W_STOPCODE (SIGSTOP);
392
393 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
394
395 /* If we haven't already seen the new PID stop, wait for it now. */
396 if (! pull_pid_from_list (&stopped_pids, new_pid))
397 {
398 /* The new child has a pending SIGSTOP. We can't affect it until it
399 hits the SIGSTOP, but we're already attached. */
400
401 ret = my_waitpid (new_pid, &status, __WALL);
402
403 if (ret == -1)
404 perror_with_name ("waiting for new child");
405 else if (ret != new_pid)
406 warning ("wait returned unexpected PID %d", ret);
407 else if (!WIFSTOPPED (status))
408 warning ("wait returned unexpected status 0x%x", status);
409 }
410
411 ptrace (PTRACE_SETOPTIONS, new_pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
412
413 ptid = ptid_build (pid_of (event_child), new_pid, 0);
414 new_lwp = (struct lwp_info *) add_lwp (ptid);
415 add_thread (ptid, new_lwp);
416
417 /* Either we're going to immediately resume the new thread
418 or leave it stopped. linux_resume_one_lwp is a nop if it
419 thinks the thread is currently running, so set this first
420 before calling linux_resume_one_lwp. */
421 new_lwp->stopped = 1;
422
423 /* Normally we will get the pending SIGSTOP. But in some cases
424 we might get another signal delivered to the group first.
425 If we do get another signal, be sure not to lose it. */
426 if (WSTOPSIG (status) == SIGSTOP)
427 {
428 if (stopping_threads)
429 new_lwp->stop_pc = get_stop_pc (new_lwp);
430 else
431 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
432 }
433 else
434 {
435 new_lwp->stop_expected = 1;
436
437 if (stopping_threads)
438 {
439 new_lwp->stop_pc = get_stop_pc (new_lwp);
440 new_lwp->status_pending_p = 1;
441 new_lwp->status_pending = status;
442 }
443 else
444 /* Pass the signal on. This is what GDB does - except
445 shouldn't we really report it instead? */
446 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
447 }
448
449 /* Always resume the current thread. If we are stopping
450 threads, it will have a pending SIGSTOP; we may as well
451 collect it now. */
452 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
453 }
454 }
455
456 /* Return the PC as read from the regcache of LWP, without any
457 adjustment. */
458
459 static CORE_ADDR
460 get_pc (struct lwp_info *lwp)
461 {
462 struct thread_info *saved_inferior;
463 struct regcache *regcache;
464 CORE_ADDR pc;
465
466 if (the_low_target.get_pc == NULL)
467 return 0;
468
469 saved_inferior = current_inferior;
470 current_inferior = get_lwp_thread (lwp);
471
472 regcache = get_thread_regcache (current_inferior, 1);
473 pc = (*the_low_target.get_pc) (regcache);
474
475 if (debug_threads)
476 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
477
478 current_inferior = saved_inferior;
479 return pc;
480 }
481
482 /* This function should only be called if LWP got a SIGTRAP.
483 The SIGTRAP could mean several things.
484
485 On i386, where decr_pc_after_break is non-zero:
486 If we were single-stepping this process using PTRACE_SINGLESTEP,
487 we will get only the one SIGTRAP (even if the instruction we
488 stepped over was a breakpoint). The value of $eip will be the
489 next instruction.
490 If we continue the process using PTRACE_CONT, we will get a
491 SIGTRAP when we hit a breakpoint. The value of $eip will be
492 the instruction after the breakpoint (i.e. needs to be
493 decremented). If we report the SIGTRAP to GDB, we must also
494 report the undecremented PC. If we cancel the SIGTRAP, we
495 must resume at the decremented PC.
496
497 (Presumably, not yet tested) On a non-decr_pc_after_break machine
498 with hardware or kernel single-step:
499 If we single-step over a breakpoint instruction, our PC will
500 point at the following instruction. If we continue and hit a
501 breakpoint instruction, our PC will point at the breakpoint
502 instruction. */
503
504 static CORE_ADDR
505 get_stop_pc (struct lwp_info *lwp)
506 {
507 CORE_ADDR stop_pc;
508
509 if (the_low_target.get_pc == NULL)
510 return 0;
511
512 stop_pc = get_pc (lwp);
513
514 if (WSTOPSIG (lwp->last_status) == SIGTRAP && !lwp->stepping)
515 stop_pc -= the_low_target.decr_pc_after_break;
516
517 if (debug_threads)
518 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
519
520 return stop_pc;
521 }
522
523 static void *
524 add_lwp (ptid_t ptid)
525 {
526 struct lwp_info *lwp;
527
528 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
529 memset (lwp, 0, sizeof (*lwp));
530
531 lwp->head.id = ptid;
532
533 lwp->last_resume_kind = resume_continue;
534
535 if (the_low_target.new_thread != NULL)
536 lwp->arch_private = the_low_target.new_thread ();
537
538 add_inferior_to_list (&all_lwps, &lwp->head);
539
540 return lwp;
541 }
542
543 /* Start an inferior process and returns its pid.
544 ALLARGS is a vector of program-name and args. */
545
546 static int
547 linux_create_inferior (char *program, char **allargs)
548 {
549 struct lwp_info *new_lwp;
550 int pid;
551 ptid_t ptid;
552
553 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
554 pid = vfork ();
555 #else
556 pid = fork ();
557 #endif
558 if (pid < 0)
559 perror_with_name ("fork");
560
561 if (pid == 0)
562 {
563 ptrace (PTRACE_TRACEME, 0, 0, 0);
564
565 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
566 signal (__SIGRTMIN + 1, SIG_DFL);
567 #endif
568
569 setpgid (0, 0);
570
571 execv (program, allargs);
572 if (errno == ENOENT)
573 execvp (program, allargs);
574
575 fprintf (stderr, "Cannot exec %s: %s.\n", program,
576 strerror (errno));
577 fflush (stderr);
578 _exit (0177);
579 }
580
581 linux_add_process (pid, 0);
582
583 ptid = ptid_build (pid, pid, 0);
584 new_lwp = add_lwp (ptid);
585 add_thread (ptid, new_lwp);
586 new_lwp->must_set_ptrace_flags = 1;
587
588 return pid;
589 }
590
591 /* Attach to an inferior process. */
592
593 static void
594 linux_attach_lwp_1 (unsigned long lwpid, int initial)
595 {
596 ptid_t ptid;
597 struct lwp_info *new_lwp;
598
599 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
600 {
601 if (!initial)
602 {
603 /* If we fail to attach to an LWP, just warn. */
604 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
605 strerror (errno), errno);
606 fflush (stderr);
607 return;
608 }
609 else
610 /* If we fail to attach to a process, report an error. */
611 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
612 strerror (errno), errno);
613 }
614
615 if (initial)
616 /* NOTE/FIXME: This lwp might have not been the tgid. */
617 ptid = ptid_build (lwpid, lwpid, 0);
618 else
619 {
620 /* Note that extracting the pid from the current inferior is
621 safe, since we're always called in the context of the same
622 process as this new thread. */
623 int pid = pid_of (get_thread_lwp (current_inferior));
624 ptid = ptid_build (pid, lwpid, 0);
625 }
626
627 new_lwp = (struct lwp_info *) add_lwp (ptid);
628 add_thread (ptid, new_lwp);
629
630 /* We need to wait for SIGSTOP before being able to make the next
631 ptrace call on this LWP. */
632 new_lwp->must_set_ptrace_flags = 1;
633
634 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
635 brings it to a halt.
636
637 There are several cases to consider here:
638
639 1) gdbserver has already attached to the process and is being notified
640 of a new thread that is being created.
641 In this case we should ignore that SIGSTOP and resume the
642 process. This is handled below by setting stop_expected = 1,
643 and the fact that add_lwp sets last_resume_kind ==
644 resume_continue.
645
646 2) This is the first thread (the process thread), and we're attaching
647 to it via attach_inferior.
648 In this case we want the process thread to stop.
649 This is handled by having linux_attach set last_resume_kind ==
650 resume_stop after we return.
651 ??? If the process already has several threads we leave the other
652 threads running.
653
654 3) GDB is connecting to gdbserver and is requesting an enumeration of all
655 existing threads.
656 In this case we want the thread to stop.
657 FIXME: This case is currently not properly handled.
658 We should wait for the SIGSTOP but don't. Things work apparently
659 because enough time passes between when we ptrace (ATTACH) and when
660 gdb makes the next ptrace call on the thread.
661
662 On the other hand, if we are currently trying to stop all threads, we
663 should treat the new thread as if we had sent it a SIGSTOP. This works
664 because we are guaranteed that the add_lwp call above added us to the
665 end of the list, and so the new thread has not yet reached
666 wait_for_sigstop (but will). */
667 new_lwp->stop_expected = 1;
668 }
669
670 void
671 linux_attach_lwp (unsigned long lwpid)
672 {
673 linux_attach_lwp_1 (lwpid, 0);
674 }
675
676 int
677 linux_attach (unsigned long pid)
678 {
679 struct lwp_info *lwp;
680
681 linux_attach_lwp_1 (pid, 1);
682
683 linux_add_process (pid, 1);
684
685 if (!non_stop)
686 {
687 /* Don't ignore the initial SIGSTOP if we just attached to this
688 process. It will be collected by wait shortly. */
689 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
690 ptid_build (pid, pid, 0));
691 lwp->last_resume_kind = resume_stop;
692 }
693
694 return 0;
695 }
696
697 struct counter
698 {
699 int pid;
700 int count;
701 };
702
703 static int
704 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
705 {
706 struct counter *counter = args;
707
708 if (ptid_get_pid (entry->id) == counter->pid)
709 {
710 if (++counter->count > 1)
711 return 1;
712 }
713
714 return 0;
715 }
716
717 static int
718 last_thread_of_process_p (struct thread_info *thread)
719 {
720 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
721 int pid = ptid_get_pid (ptid);
722 struct counter counter = { pid , 0 };
723
724 return (find_inferior (&all_threads,
725 second_thread_of_pid_p, &counter) == NULL);
726 }
727
728 /* Kill the inferior lwp. */
729
730 static int
731 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
732 {
733 struct thread_info *thread = (struct thread_info *) entry;
734 struct lwp_info *lwp = get_thread_lwp (thread);
735 int wstat;
736 int pid = * (int *) args;
737
738 if (ptid_get_pid (entry->id) != pid)
739 return 0;
740
741 /* We avoid killing the first thread here, because of a Linux kernel (at
742 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
743 the children get a chance to be reaped, it will remain a zombie
744 forever. */
745
746 if (lwpid_of (lwp) == pid)
747 {
748 if (debug_threads)
749 fprintf (stderr, "lkop: is last of process %s\n",
750 target_pid_to_str (entry->id));
751 return 0;
752 }
753
754 /* If we're killing a running inferior, make sure it is stopped
755 first, as PTRACE_KILL will not work otherwise. */
756 if (!lwp->stopped)
757 send_sigstop (&lwp->head);
758
759 do
760 {
761 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
762
763 /* Make sure it died. The loop is most likely unnecessary. */
764 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
765 } while (pid > 0 && WIFSTOPPED (wstat));
766
767 return 0;
768 }
769
770 static int
771 linux_kill (int pid)
772 {
773 struct process_info *process;
774 struct lwp_info *lwp;
775 struct thread_info *thread;
776 int wstat;
777 int lwpid;
778
779 process = find_process_pid (pid);
780 if (process == NULL)
781 return -1;
782
783 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
784
785 /* See the comment in linux_kill_one_lwp. We did not kill the first
786 thread in the list, so do so now. */
787 lwp = find_lwp_pid (pid_to_ptid (pid));
788 thread = get_lwp_thread (lwp);
789
790 if (debug_threads)
791 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
792 lwpid_of (lwp), pid);
793
794 /* If we're killing a running inferior, make sure it is stopped
795 first, as PTRACE_KILL will not work otherwise. */
796 if (!lwp->stopped)
797 send_sigstop (&lwp->head);
798
799 do
800 {
801 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
802
803 /* Make sure it died. The loop is most likely unnecessary. */
804 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
805 } while (lwpid > 0 && WIFSTOPPED (wstat));
806
807 #ifdef USE_THREAD_DB
808 thread_db_free (process, 0);
809 #endif
810 delete_lwp (lwp);
811 linux_remove_process (process);
812 return 0;
813 }
814
815 static int
816 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
817 {
818 struct thread_info *thread = (struct thread_info *) entry;
819 struct lwp_info *lwp = get_thread_lwp (thread);
820 int pid = * (int *) args;
821
822 if (ptid_get_pid (entry->id) != pid)
823 return 0;
824
825 /* If we're detaching from a running inferior, make sure it is
826 stopped first, as PTRACE_DETACH will not work otherwise. */
827 if (!lwp->stopped)
828 {
829 int lwpid = lwpid_of (lwp);
830
831 stopping_threads = 1;
832 send_sigstop (&lwp->head);
833
834 /* If this detects a new thread through a clone event, the new
835 thread is appended to the end of the lwp list, so we'll
836 eventually detach from it. */
837 wait_for_sigstop (&lwp->head);
838 stopping_threads = 0;
839
840 /* If LWP exits while we're trying to stop it, there's nothing
841 left to do. */
842 lwp = find_lwp_pid (pid_to_ptid (lwpid));
843 if (lwp == NULL)
844 return 0;
845 }
846
847 /* If this process is stopped but is expecting a SIGSTOP, then make
848 sure we take care of that now. This isn't absolutely guaranteed
849 to collect the SIGSTOP, but is fairly likely to. */
850 if (lwp->stop_expected)
851 {
852 int wstat;
853 /* Clear stop_expected, so that the SIGSTOP will be reported. */
854 lwp->stop_expected = 0;
855 if (lwp->stopped)
856 linux_resume_one_lwp (lwp, 0, 0, NULL);
857 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
858 }
859
860 /* Flush any pending changes to the process's registers. */
861 regcache_invalidate_one ((struct inferior_list_entry *)
862 get_lwp_thread (lwp));
863
864 /* Finally, let it resume. */
865 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
866
867 delete_lwp (lwp);
868 return 0;
869 }
870
871 static int
872 any_thread_of (struct inferior_list_entry *entry, void *args)
873 {
874 int *pid_p = args;
875
876 if (ptid_get_pid (entry->id) == *pid_p)
877 return 1;
878
879 return 0;
880 }
881
882 static int
883 linux_detach (int pid)
884 {
885 struct process_info *process;
886
887 process = find_process_pid (pid);
888 if (process == NULL)
889 return -1;
890
891 #ifdef USE_THREAD_DB
892 thread_db_free (process, 1);
893 #endif
894
895 current_inferior =
896 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
897
898 delete_all_breakpoints ();
899 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
900 linux_remove_process (process);
901 return 0;
902 }
903
904 static void
905 linux_join (int pid)
906 {
907 int status, ret;
908 struct process_info *process;
909
910 process = find_process_pid (pid);
911 if (process == NULL)
912 return;
913
914 do {
915 ret = my_waitpid (pid, &status, 0);
916 if (WIFEXITED (status) || WIFSIGNALED (status))
917 break;
918 } while (ret != -1 || errno != ECHILD);
919 }
920
921 /* Return nonzero if the given thread is still alive. */
922 static int
923 linux_thread_alive (ptid_t ptid)
924 {
925 struct lwp_info *lwp = find_lwp_pid (ptid);
926
927 /* We assume we always know if a thread exits. If a whole process
928 exited but we still haven't been able to report it to GDB, we'll
929 hold on to the last lwp of the dead process. */
930 if (lwp != NULL)
931 return !lwp->dead;
932 else
933 return 0;
934 }
935
936 /* Return 1 if this lwp has an interesting status pending. */
937 static int
938 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
939 {
940 struct lwp_info *lwp = (struct lwp_info *) entry;
941 ptid_t ptid = * (ptid_t *) arg;
942 struct thread_info *thread = get_lwp_thread (lwp);
943
944 /* Check if we're only interested in events from a specific process
945 or its lwps. */
946 if (!ptid_equal (minus_one_ptid, ptid)
947 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
948 return 0;
949
950 thread = get_lwp_thread (lwp);
951
952 /* If we got a `vCont;t', but we haven't reported a stop yet, do
953 report any status pending the LWP may have. */
954 if (lwp->last_resume_kind == resume_stop
955 && thread->last_status.kind == TARGET_WAITKIND_STOPPED)
956 return 0;
957
958 return lwp->status_pending_p;
959 }
960
961 static int
962 same_lwp (struct inferior_list_entry *entry, void *data)
963 {
964 ptid_t ptid = *(ptid_t *) data;
965 int lwp;
966
967 if (ptid_get_lwp (ptid) != 0)
968 lwp = ptid_get_lwp (ptid);
969 else
970 lwp = ptid_get_pid (ptid);
971
972 if (ptid_get_lwp (entry->id) == lwp)
973 return 1;
974
975 return 0;
976 }
977
978 struct lwp_info *
979 find_lwp_pid (ptid_t ptid)
980 {
981 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
982 }
983
984 static struct lwp_info *
985 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
986 {
987 int ret;
988 int to_wait_for = -1;
989 struct lwp_info *child = NULL;
990
991 if (debug_threads)
992 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
993
994 if (ptid_equal (ptid, minus_one_ptid))
995 to_wait_for = -1; /* any child */
996 else
997 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
998
999 options |= __WALL;
1000
1001 retry:
1002
1003 ret = my_waitpid (to_wait_for, wstatp, options);
1004 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1005 return NULL;
1006 else if (ret == -1)
1007 perror_with_name ("waitpid");
1008
1009 if (debug_threads
1010 && (!WIFSTOPPED (*wstatp)
1011 || (WSTOPSIG (*wstatp) != 32
1012 && WSTOPSIG (*wstatp) != 33)))
1013 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1014
1015 child = find_lwp_pid (pid_to_ptid (ret));
1016
1017 /* If we didn't find a process, one of two things presumably happened:
1018 - A process we started and then detached from has exited. Ignore it.
1019 - A process we are controlling has forked and the new child's stop
1020 was reported to us by the kernel. Save its PID. */
1021 if (child == NULL && WIFSTOPPED (*wstatp))
1022 {
1023 add_pid_to_list (&stopped_pids, ret);
1024 goto retry;
1025 }
1026 else if (child == NULL)
1027 goto retry;
1028
1029 child->stopped = 1;
1030
1031 child->last_status = *wstatp;
1032
1033 /* Architecture-specific setup after inferior is running.
1034 This needs to happen after we have attached to the inferior
1035 and it is stopped for the first time, but before we access
1036 any inferior registers. */
1037 if (new_inferior)
1038 {
1039 the_low_target.arch_setup ();
1040 #ifdef HAVE_LINUX_REGSETS
1041 memset (disabled_regsets, 0, num_regsets);
1042 #endif
1043 new_inferior = 0;
1044 }
1045
1046 /* Fetch the possibly triggered data watchpoint info and store it in
1047 CHILD.
1048
1049 On some archs, like x86, that use debug registers to set
1050 watchpoints, it's possible that the way to know which watched
1051 address trapped, is to check the register that is used to select
1052 which address to watch. Problem is, between setting the
1053 watchpoint and reading back which data address trapped, the user
1054 may change the set of watchpoints, and, as a consequence, GDB
1055 changes the debug registers in the inferior. To avoid reading
1056 back a stale stopped-data-address when that happens, we cache in
1057 LP the fact that a watchpoint trapped, and the corresponding data
1058 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1059 changes the debug registers meanwhile, we have the cached data we
1060 can rely on. */
1061
1062 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1063 {
1064 if (the_low_target.stopped_by_watchpoint == NULL)
1065 {
1066 child->stopped_by_watchpoint = 0;
1067 }
1068 else
1069 {
1070 struct thread_info *saved_inferior;
1071
1072 saved_inferior = current_inferior;
1073 current_inferior = get_lwp_thread (child);
1074
1075 child->stopped_by_watchpoint
1076 = the_low_target.stopped_by_watchpoint ();
1077
1078 if (child->stopped_by_watchpoint)
1079 {
1080 if (the_low_target.stopped_data_address != NULL)
1081 child->stopped_data_address
1082 = the_low_target.stopped_data_address ();
1083 else
1084 child->stopped_data_address = 0;
1085 }
1086
1087 current_inferior = saved_inferior;
1088 }
1089 }
1090
1091 /* Store the STOP_PC, with adjustment applied. This depends on the
1092 architecture being defined already (so that CHILD has a valid
1093 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1094 not). */
1095 if (WIFSTOPPED (*wstatp))
1096 child->stop_pc = get_stop_pc (child);
1097
1098 if (debug_threads
1099 && WIFSTOPPED (*wstatp)
1100 && the_low_target.get_pc != NULL)
1101 {
1102 struct thread_info *saved_inferior = current_inferior;
1103 struct regcache *regcache;
1104 CORE_ADDR pc;
1105
1106 current_inferior = get_lwp_thread (child);
1107 regcache = get_thread_regcache (current_inferior, 1);
1108 pc = (*the_low_target.get_pc) (regcache);
1109 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1110 current_inferior = saved_inferior;
1111 }
1112
1113 return child;
1114 }
1115
1116 /* Arrange for a breakpoint to be hit again later. We don't keep the
1117 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1118 will handle the current event, eventually we will resume this LWP,
1119 and this breakpoint will trap again. */
1120
1121 static int
1122 cancel_breakpoint (struct lwp_info *lwp)
1123 {
1124 struct thread_info *saved_inferior;
1125 struct regcache *regcache;
1126
1127 /* There's nothing to do if we don't support breakpoints. */
1128 if (!supports_breakpoints ())
1129 return 0;
1130
1131 if (lwp->stepping)
1132 {
1133 if (debug_threads)
1134 fprintf (stderr,
1135 "CB: [%s] is stepping\n",
1136 target_pid_to_str (lwp->head.id));
1137 return 0;
1138 }
1139
1140 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1141
1142 /* breakpoint_at reads from current inferior. */
1143 saved_inferior = current_inferior;
1144 current_inferior = get_lwp_thread (lwp);
1145
1146 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1147 {
1148 if (debug_threads)
1149 fprintf (stderr,
1150 "CB: Push back breakpoint for %s\n",
1151 target_pid_to_str (lwp->head.id));
1152
1153 /* Back up the PC if necessary. */
1154 if (the_low_target.decr_pc_after_break)
1155 {
1156 struct regcache *regcache
1157 = get_thread_regcache (get_lwp_thread (lwp), 1);
1158 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1159 }
1160
1161 current_inferior = saved_inferior;
1162 return 1;
1163 }
1164 else
1165 {
1166 if (debug_threads)
1167 fprintf (stderr,
1168 "CB: No breakpoint found at %s for [%s]\n",
1169 paddress (lwp->stop_pc),
1170 target_pid_to_str (lwp->head.id));
1171 }
1172
1173 current_inferior = saved_inferior;
1174 return 0;
1175 }
1176
1177 /* When the event-loop is doing a step-over, this points at the thread
1178 being stepped. */
1179 ptid_t step_over_bkpt;
1180
1181 /* Wait for an event from child PID. If PID is -1, wait for any
1182 child. Store the stop status through the status pointer WSTAT.
1183 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1184 event was found and OPTIONS contains WNOHANG. Return the PID of
1185 the stopped child otherwise. */
1186
1187 static int
1188 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1189 {
1190 struct lwp_info *event_child, *requested_child;
1191
1192 event_child = NULL;
1193 requested_child = NULL;
1194
1195 /* Check for a lwp with a pending status. */
1196
1197 if (ptid_equal (ptid, minus_one_ptid)
1198 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1199 {
1200 event_child = (struct lwp_info *)
1201 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1202 if (debug_threads && event_child)
1203 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1204 }
1205 else
1206 {
1207 requested_child = find_lwp_pid (ptid);
1208
1209 if (requested_child->status_pending_p)
1210 event_child = requested_child;
1211 }
1212
1213 if (event_child != NULL)
1214 {
1215 if (debug_threads)
1216 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1217 lwpid_of (event_child), event_child->status_pending);
1218 *wstat = event_child->status_pending;
1219 event_child->status_pending_p = 0;
1220 event_child->status_pending = 0;
1221 current_inferior = get_lwp_thread (event_child);
1222 return lwpid_of (event_child);
1223 }
1224
1225 /* We only enter this loop if no process has a pending wait status. Thus
1226 any action taken in response to a wait status inside this loop is
1227 responding as soon as we detect the status, not after any pending
1228 events. */
1229 while (1)
1230 {
1231 event_child = linux_wait_for_lwp (ptid, wstat, options);
1232
1233 if ((options & WNOHANG) && event_child == NULL)
1234 {
1235 if (debug_threads)
1236 fprintf (stderr, "WNOHANG set, no event found\n");
1237 return 0;
1238 }
1239
1240 if (event_child == NULL)
1241 error ("event from unknown child");
1242
1243 current_inferior = get_lwp_thread (event_child);
1244
1245 /* Check for thread exit. */
1246 if (! WIFSTOPPED (*wstat))
1247 {
1248 if (debug_threads)
1249 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1250
1251 /* If the last thread is exiting, just return. */
1252 if (last_thread_of_process_p (current_inferior))
1253 {
1254 if (debug_threads)
1255 fprintf (stderr, "LWP %ld is last lwp of process\n",
1256 lwpid_of (event_child));
1257 return lwpid_of (event_child);
1258 }
1259
1260 if (!non_stop)
1261 {
1262 current_inferior = (struct thread_info *) all_threads.head;
1263 if (debug_threads)
1264 fprintf (stderr, "Current inferior is now %ld\n",
1265 lwpid_of (get_thread_lwp (current_inferior)));
1266 }
1267 else
1268 {
1269 current_inferior = NULL;
1270 if (debug_threads)
1271 fprintf (stderr, "Current inferior is now <NULL>\n");
1272 }
1273
1274 /* If we were waiting for this particular child to do something...
1275 well, it did something. */
1276 if (requested_child != NULL)
1277 {
1278 int lwpid = lwpid_of (event_child);
1279
1280 /* Cancel the step-over operation --- the thread that
1281 started it is gone. */
1282 if (finish_step_over (event_child))
1283 unstop_all_lwps (event_child);
1284 delete_lwp (event_child);
1285 return lwpid;
1286 }
1287
1288 delete_lwp (event_child);
1289
1290 /* Wait for a more interesting event. */
1291 continue;
1292 }
1293
1294 if (event_child->must_set_ptrace_flags)
1295 {
1296 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1297 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
1298 event_child->must_set_ptrace_flags = 0;
1299 }
1300
1301 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1302 && *wstat >> 16 != 0)
1303 {
1304 handle_extended_wait (event_child, *wstat);
1305 continue;
1306 }
1307
1308 /* If GDB is not interested in this signal, don't stop other
1309 threads, and don't report it to GDB. Just resume the
1310 inferior right away. We do this for threading-related
1311 signals as well as any that GDB specifically requested we
1312 ignore. But never ignore SIGSTOP if we sent it ourselves,
1313 and do not ignore signals when stepping - they may require
1314 special handling to skip the signal handler. */
1315 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1316 thread library? */
1317 if (WIFSTOPPED (*wstat)
1318 && !event_child->stepping
1319 && (
1320 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1321 (current_process ()->private->thread_db != NULL
1322 && (WSTOPSIG (*wstat) == __SIGRTMIN
1323 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1324 ||
1325 #endif
1326 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1327 && !(WSTOPSIG (*wstat) == SIGSTOP
1328 && event_child->stop_expected))))
1329 {
1330 siginfo_t info, *info_p;
1331
1332 if (debug_threads)
1333 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1334 WSTOPSIG (*wstat), lwpid_of (event_child));
1335
1336 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1337 info_p = &info;
1338 else
1339 info_p = NULL;
1340 linux_resume_one_lwp (event_child, event_child->stepping,
1341 WSTOPSIG (*wstat), info_p);
1342 continue;
1343 }
1344
1345 if (WIFSTOPPED (*wstat)
1346 && WSTOPSIG (*wstat) == SIGSTOP
1347 && event_child->stop_expected)
1348 {
1349 int should_stop;
1350
1351 if (debug_threads)
1352 fprintf (stderr, "Expected stop.\n");
1353 event_child->stop_expected = 0;
1354
1355 should_stop = (event_child->last_resume_kind == resume_stop
1356 || stopping_threads);
1357
1358 if (!should_stop)
1359 {
1360 linux_resume_one_lwp (event_child,
1361 event_child->stepping, 0, NULL);
1362 continue;
1363 }
1364 }
1365
1366 return lwpid_of (event_child);
1367 }
1368
1369 /* NOTREACHED */
1370 return 0;
1371 }
1372
1373 static int
1374 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1375 {
1376 ptid_t wait_ptid;
1377
1378 if (ptid_is_pid (ptid))
1379 {
1380 /* A request to wait for a specific tgid. This is not possible
1381 with waitpid, so instead, we wait for any child, and leave
1382 children we're not interested in right now with a pending
1383 status to report later. */
1384 wait_ptid = minus_one_ptid;
1385 }
1386 else
1387 wait_ptid = ptid;
1388
1389 while (1)
1390 {
1391 int event_pid;
1392
1393 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1394
1395 if (event_pid > 0
1396 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1397 {
1398 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1399
1400 if (! WIFSTOPPED (*wstat))
1401 mark_lwp_dead (event_child, *wstat);
1402 else
1403 {
1404 event_child->status_pending_p = 1;
1405 event_child->status_pending = *wstat;
1406 }
1407 }
1408 else
1409 return event_pid;
1410 }
1411 }
1412
1413
1414 /* Count the LWP's that have had events. */
1415
1416 static int
1417 count_events_callback (struct inferior_list_entry *entry, void *data)
1418 {
1419 struct lwp_info *lp = (struct lwp_info *) entry;
1420 int *count = data;
1421
1422 gdb_assert (count != NULL);
1423
1424 /* Count only resumed LWPs that have a SIGTRAP event pending that
1425 should be reported to GDB. */
1426 if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1427 && lp->last_resume_kind != resume_stop
1428 && lp->status_pending_p
1429 && WIFSTOPPED (lp->status_pending)
1430 && WSTOPSIG (lp->status_pending) == SIGTRAP
1431 && !breakpoint_inserted_here (lp->stop_pc))
1432 (*count)++;
1433
1434 return 0;
1435 }
1436
1437 /* Select the LWP (if any) that is currently being single-stepped. */
1438
1439 static int
1440 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1441 {
1442 struct lwp_info *lp = (struct lwp_info *) entry;
1443
1444 if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1445 && lp->last_resume_kind == resume_step
1446 && lp->status_pending_p)
1447 return 1;
1448 else
1449 return 0;
1450 }
1451
1452 /* Select the Nth LWP that has had a SIGTRAP event that should be
1453 reported to GDB. */
1454
1455 static int
1456 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1457 {
1458 struct lwp_info *lp = (struct lwp_info *) entry;
1459 int *selector = data;
1460
1461 gdb_assert (selector != NULL);
1462
1463 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1464 if (lp->last_resume_kind != resume_stop
1465 && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1466 && lp->status_pending_p
1467 && WIFSTOPPED (lp->status_pending)
1468 && WSTOPSIG (lp->status_pending) == SIGTRAP
1469 && !breakpoint_inserted_here (lp->stop_pc))
1470 if ((*selector)-- == 0)
1471 return 1;
1472
1473 return 0;
1474 }
1475
1476 static int
1477 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1478 {
1479 struct lwp_info *lp = (struct lwp_info *) entry;
1480 struct lwp_info *event_lp = data;
1481
1482 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1483 if (lp == event_lp)
1484 return 0;
1485
1486 /* If a LWP other than the LWP that we're reporting an event for has
1487 hit a GDB breakpoint (as opposed to some random trap signal),
1488 then just arrange for it to hit it again later. We don't keep
1489 the SIGTRAP status and don't forward the SIGTRAP signal to the
1490 LWP. We will handle the current event, eventually we will resume
1491 all LWPs, and this one will get its breakpoint trap again.
1492
1493 If we do not do this, then we run the risk that the user will
1494 delete or disable the breakpoint, but the LWP will have already
1495 tripped on it. */
1496
1497 if (lp->last_resume_kind != resume_stop
1498 && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE
1499 && lp->status_pending_p
1500 && WIFSTOPPED (lp->status_pending)
1501 && WSTOPSIG (lp->status_pending) == SIGTRAP
1502 && cancel_breakpoint (lp))
1503 /* Throw away the SIGTRAP. */
1504 lp->status_pending_p = 0;
1505
1506 return 0;
1507 }
1508
1509 /* Select one LWP out of those that have events pending. */
1510
1511 static void
1512 select_event_lwp (struct lwp_info **orig_lp)
1513 {
1514 int num_events = 0;
1515 int random_selector;
1516 struct lwp_info *event_lp;
1517
1518 /* Give preference to any LWP that is being single-stepped. */
1519 event_lp
1520 = (struct lwp_info *) find_inferior (&all_lwps,
1521 select_singlestep_lwp_callback, NULL);
1522 if (event_lp != NULL)
1523 {
1524 if (debug_threads)
1525 fprintf (stderr,
1526 "SEL: Select single-step %s\n",
1527 target_pid_to_str (ptid_of (event_lp)));
1528 }
1529 else
1530 {
1531 /* No single-stepping LWP. Select one at random, out of those
1532 which have had SIGTRAP events. */
1533
1534 /* First see how many SIGTRAP events we have. */
1535 find_inferior (&all_lwps, count_events_callback, &num_events);
1536
1537 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1538 random_selector = (int)
1539 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1540
1541 if (debug_threads && num_events > 1)
1542 fprintf (stderr,
1543 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1544 num_events, random_selector);
1545
1546 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1547 select_event_lwp_callback,
1548 &random_selector);
1549 }
1550
1551 if (event_lp != NULL)
1552 {
1553 /* Switch the event LWP. */
1554 *orig_lp = event_lp;
1555 }
1556 }
1557
1558 /* Set this inferior LWP's state as "want-stopped". We won't resume
1559 this LWP until the client gives us another action for it. */
1560
1561 static void
1562 gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1563 {
1564 struct lwp_info *lwp = (struct lwp_info *) entry;
1565 struct thread_info *thread = get_lwp_thread (lwp);
1566
1567 /* Most threads are stopped implicitly (all-stop); tag that with
1568 signal 0. The thread being explicitly reported stopped to the
1569 client, gets it's status fixed up afterwards. */
1570 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1571 thread->last_status.value.sig = TARGET_SIGNAL_0;
1572
1573 lwp->last_resume_kind = resume_stop;
1574 }
1575
1576 /* Set all LWP's states as "want-stopped". */
1577
1578 static void
1579 gdb_wants_all_stopped (void)
1580 {
1581 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1582 }
1583
1584 /* Wait for process, returns status. */
1585
1586 static ptid_t
1587 linux_wait_1 (ptid_t ptid,
1588 struct target_waitstatus *ourstatus, int target_options)
1589 {
1590 int w;
1591 struct thread_info *thread = NULL;
1592 struct lwp_info *event_child = NULL;
1593 int options;
1594 int pid;
1595 int step_over_finished;
1596 int bp_explains_trap;
1597 int maybe_internal_trap;
1598 int report_to_gdb;
1599
1600 /* Translate generic target options into linux options. */
1601 options = __WALL;
1602 if (target_options & TARGET_WNOHANG)
1603 options |= WNOHANG;
1604
1605 retry:
1606 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1607
1608 /* If we were only supposed to resume one thread, only wait for
1609 that thread - if it's still alive. If it died, however - which
1610 can happen if we're coming from the thread death case below -
1611 then we need to make sure we restart the other threads. We could
1612 pick a thread at random or restart all; restarting all is less
1613 arbitrary. */
1614 if (!non_stop
1615 && !ptid_equal (cont_thread, null_ptid)
1616 && !ptid_equal (cont_thread, minus_one_ptid))
1617 {
1618 thread = (struct thread_info *) find_inferior_id (&all_threads,
1619 cont_thread);
1620
1621 /* No stepping, no signal - unless one is pending already, of course. */
1622 if (thread == NULL)
1623 {
1624 struct thread_resume resume_info;
1625 resume_info.thread = minus_one_ptid;
1626 resume_info.kind = resume_continue;
1627 resume_info.sig = 0;
1628 linux_resume (&resume_info, 1);
1629 }
1630 else
1631 ptid = cont_thread;
1632 }
1633
1634 if (ptid_equal (step_over_bkpt, null_ptid))
1635 pid = linux_wait_for_event (ptid, &w, options);
1636 else
1637 {
1638 if (debug_threads)
1639 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1640 target_pid_to_str (step_over_bkpt));
1641 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1642 }
1643
1644 if (pid == 0) /* only if TARGET_WNOHANG */
1645 return null_ptid;
1646
1647 event_child = get_thread_lwp (current_inferior);
1648
1649 /* If we are waiting for a particular child, and it exited,
1650 linux_wait_for_event will return its exit status. Similarly if
1651 the last child exited. If this is not the last child, however,
1652 do not report it as exited until there is a 'thread exited' response
1653 available in the remote protocol. Instead, just wait for another event.
1654 This should be safe, because if the thread crashed we will already
1655 have reported the termination signal to GDB; that should stop any
1656 in-progress stepping operations, etc.
1657
1658 Report the exit status of the last thread to exit. This matches
1659 LinuxThreads' behavior. */
1660
1661 if (last_thread_of_process_p (current_inferior))
1662 {
1663 if (WIFEXITED (w) || WIFSIGNALED (w))
1664 {
1665 int pid = pid_of (event_child);
1666 struct process_info *process = find_process_pid (pid);
1667
1668 #ifdef USE_THREAD_DB
1669 thread_db_free (process, 0);
1670 #endif
1671 delete_lwp (event_child);
1672 linux_remove_process (process);
1673
1674 current_inferior = NULL;
1675
1676 if (WIFEXITED (w))
1677 {
1678 ourstatus->kind = TARGET_WAITKIND_EXITED;
1679 ourstatus->value.integer = WEXITSTATUS (w);
1680
1681 if (debug_threads)
1682 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1683 }
1684 else
1685 {
1686 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1687 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1688
1689 if (debug_threads)
1690 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1691
1692 }
1693
1694 return pid_to_ptid (pid);
1695 }
1696 }
1697 else
1698 {
1699 if (!WIFSTOPPED (w))
1700 goto retry;
1701 }
1702
1703 /* If this event was not handled before, and is not a SIGTRAP, we
1704 report it. SIGILL and SIGSEGV are also treated as traps in case
1705 a breakpoint is inserted at the current PC. If this target does
1706 not support internal breakpoints at all, we also report the
1707 SIGTRAP without further processing; it's of no concern to us. */
1708 maybe_internal_trap
1709 = (supports_breakpoints ()
1710 && (WSTOPSIG (w) == SIGTRAP
1711 || ((WSTOPSIG (w) == SIGILL
1712 || WSTOPSIG (w) == SIGSEGV)
1713 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1714
1715 if (maybe_internal_trap)
1716 {
1717 /* Handle anything that requires bookkeeping before deciding to
1718 report the event or continue waiting. */
1719
1720 /* First check if we can explain the SIGTRAP with an internal
1721 breakpoint, or if we should possibly report the event to GDB.
1722 Do this before anything that may remove or insert a
1723 breakpoint. */
1724 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1725
1726 /* We have a SIGTRAP, possibly a step-over dance has just
1727 finished. If so, tweak the state machine accordingly,
1728 reinsert breakpoints and delete any reinsert (software
1729 single-step) breakpoints. */
1730 step_over_finished = finish_step_over (event_child);
1731
1732 /* Now invoke the callbacks of any internal breakpoints there. */
1733 check_breakpoints (event_child->stop_pc);
1734
1735 if (bp_explains_trap)
1736 {
1737 /* If we stepped or ran into an internal breakpoint, we've
1738 already handled it. So next time we resume (from this
1739 PC), we should step over it. */
1740 if (debug_threads)
1741 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1742
1743 event_child->need_step_over = 1;
1744 }
1745 }
1746 else
1747 {
1748 /* We have some other signal, possibly a step-over dance was in
1749 progress, and it should be cancelled too. */
1750 step_over_finished = finish_step_over (event_child);
1751 }
1752
1753 /* We have all the data we need. Either report the event to GDB, or
1754 resume threads and keep waiting for more. */
1755
1756 /* Check If GDB would be interested in this event. If GDB wanted
1757 this thread to single step, we always want to report the SIGTRAP,
1758 and let GDB handle it. */
1759 report_to_gdb = (!maybe_internal_trap
1760 || event_child->last_resume_kind == resume_step
1761 || event_child->stopped_by_watchpoint
1762 || (!step_over_finished && !bp_explains_trap));
1763
1764 /* We found no reason GDB would want us to stop. We either hit one
1765 of our own breakpoints, or finished an internal step GDB
1766 shouldn't know about. */
1767 if (!report_to_gdb)
1768 {
1769 if (debug_threads)
1770 {
1771 if (bp_explains_trap)
1772 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1773 if (step_over_finished)
1774 fprintf (stderr, "Step-over finished.\n");
1775 }
1776
1777 /* We're not reporting this breakpoint to GDB, so apply the
1778 decr_pc_after_break adjustment to the inferior's regcache
1779 ourselves. */
1780
1781 if (the_low_target.set_pc != NULL)
1782 {
1783 struct regcache *regcache
1784 = get_thread_regcache (get_lwp_thread (event_child), 1);
1785 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1786 }
1787
1788 /* We've finished stepping over a breakpoint. We've stopped all
1789 LWPs momentarily except the stepping one. This is where we
1790 resume them all again. We're going to keep waiting, so use
1791 proceed, which handles stepping over the next breakpoint. */
1792 if (debug_threads)
1793 fprintf (stderr, "proceeding all threads.\n");
1794 proceed_all_lwps ();
1795 goto retry;
1796 }
1797
1798 if (debug_threads)
1799 {
1800 if (event_child->last_resume_kind == resume_step)
1801 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1802 if (event_child->stopped_by_watchpoint)
1803 fprintf (stderr, "Stopped by watchpoint.\n");
1804 if (debug_threads)
1805 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1806 }
1807
1808 /* Alright, we're going to report a stop. */
1809
1810 if (!non_stop)
1811 {
1812 /* In all-stop, stop all threads. */
1813 stop_all_lwps ();
1814
1815 /* If we're not waiting for a specific LWP, choose an event LWP
1816 from among those that have had events. Giving equal priority
1817 to all LWPs that have had events helps prevent
1818 starvation. */
1819 if (ptid_equal (ptid, minus_one_ptid))
1820 {
1821 event_child->status_pending_p = 1;
1822 event_child->status_pending = w;
1823
1824 select_event_lwp (&event_child);
1825
1826 event_child->status_pending_p = 0;
1827 w = event_child->status_pending;
1828 }
1829
1830 /* Now that we've selected our final event LWP, cancel any
1831 breakpoints in other LWPs that have hit a GDB breakpoint.
1832 See the comment in cancel_breakpoints_callback to find out
1833 why. */
1834 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1835 }
1836 else
1837 {
1838 /* If we just finished a step-over, then all threads had been
1839 momentarily paused. In all-stop, that's fine, we want
1840 threads stopped by now anyway. In non-stop, we need to
1841 re-resume threads that GDB wanted to be running. */
1842 if (step_over_finished)
1843 unstop_all_lwps (event_child);
1844 }
1845
1846 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1847
1848 /* Do this before the gdb_wants_all_stopped calls below, since they
1849 always set last_resume_kind to resume_stop. */
1850 if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) == SIGSTOP)
1851 {
1852 /* A thread that has been requested to stop by GDB with vCont;t,
1853 and it stopped cleanly, so report as SIG0. The use of
1854 SIGSTOP is an implementation detail. */
1855 ourstatus->value.sig = TARGET_SIGNAL_0;
1856 }
1857 else if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) != SIGSTOP)
1858 {
1859 /* A thread that has been requested to stop by GDB with vCont;t,
1860 but, it stopped for other reasons. */
1861 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1862 }
1863 else
1864 {
1865 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1866 }
1867
1868 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1869
1870 if (!non_stop)
1871 {
1872 /* From GDB's perspective, all-stop mode always stops all
1873 threads implicitly. Tag all threads as "want-stopped". */
1874 gdb_wants_all_stopped ();
1875 }
1876 else
1877 {
1878 /* We're reporting this LWP as stopped. Update it's
1879 "want-stopped" state to what the client wants, until it gets
1880 a new resume action. */
1881 gdb_wants_lwp_stopped (&event_child->head);
1882 }
1883
1884 if (debug_threads)
1885 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1886 target_pid_to_str (ptid_of (event_child)),
1887 ourstatus->kind,
1888 ourstatus->value.sig);
1889
1890 get_lwp_thread (event_child)->last_status = *ourstatus;
1891 return ptid_of (event_child);
1892 }
1893
1894 /* Get rid of any pending event in the pipe. */
1895 static void
1896 async_file_flush (void)
1897 {
1898 int ret;
1899 char buf;
1900
1901 do
1902 ret = read (linux_event_pipe[0], &buf, 1);
1903 while (ret >= 0 || (ret == -1 && errno == EINTR));
1904 }
1905
1906 /* Put something in the pipe, so the event loop wakes up. */
1907 static void
1908 async_file_mark (void)
1909 {
1910 int ret;
1911
1912 async_file_flush ();
1913
1914 do
1915 ret = write (linux_event_pipe[1], "+", 1);
1916 while (ret == 0 || (ret == -1 && errno == EINTR));
1917
1918 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1919 be awakened anyway. */
1920 }
1921
1922 static ptid_t
1923 linux_wait (ptid_t ptid,
1924 struct target_waitstatus *ourstatus, int target_options)
1925 {
1926 ptid_t event_ptid;
1927
1928 if (debug_threads)
1929 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1930
1931 /* Flush the async file first. */
1932 if (target_is_async_p ())
1933 async_file_flush ();
1934
1935 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1936
1937 /* If at least one stop was reported, there may be more. A single
1938 SIGCHLD can signal more than one child stop. */
1939 if (target_is_async_p ()
1940 && (target_options & TARGET_WNOHANG) != 0
1941 && !ptid_equal (event_ptid, null_ptid))
1942 async_file_mark ();
1943
1944 return event_ptid;
1945 }
1946
1947 /* Send a signal to an LWP. */
1948
1949 static int
1950 kill_lwp (unsigned long lwpid, int signo)
1951 {
1952 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1953 fails, then we are not using nptl threads and we should be using kill. */
1954
1955 #ifdef __NR_tkill
1956 {
1957 static int tkill_failed;
1958
1959 if (!tkill_failed)
1960 {
1961 int ret;
1962
1963 errno = 0;
1964 ret = syscall (__NR_tkill, lwpid, signo);
1965 if (errno != ENOSYS)
1966 return ret;
1967 tkill_failed = 1;
1968 }
1969 }
1970 #endif
1971
1972 return kill (lwpid, signo);
1973 }
1974
1975 static void
1976 send_sigstop (struct inferior_list_entry *entry)
1977 {
1978 struct lwp_info *lwp = (struct lwp_info *) entry;
1979 int pid;
1980
1981 if (lwp->stopped)
1982 return;
1983
1984 pid = lwpid_of (lwp);
1985
1986 /* If we already have a pending stop signal for this process, don't
1987 send another. */
1988 if (lwp->stop_expected)
1989 {
1990 if (debug_threads)
1991 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
1992
1993 return;
1994 }
1995
1996 if (debug_threads)
1997 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
1998
1999 lwp->stop_expected = 1;
2000 kill_lwp (pid, SIGSTOP);
2001 }
2002
2003 static void
2004 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2005 {
2006 /* It's dead, really. */
2007 lwp->dead = 1;
2008
2009 /* Store the exit status for later. */
2010 lwp->status_pending_p = 1;
2011 lwp->status_pending = wstat;
2012
2013 /* Prevent trying to stop it. */
2014 lwp->stopped = 1;
2015
2016 /* No further stops are expected from a dead lwp. */
2017 lwp->stop_expected = 0;
2018 }
2019
2020 static void
2021 wait_for_sigstop (struct inferior_list_entry *entry)
2022 {
2023 struct lwp_info *lwp = (struct lwp_info *) entry;
2024 struct thread_info *saved_inferior;
2025 int wstat;
2026 ptid_t saved_tid;
2027 ptid_t ptid;
2028 int pid;
2029
2030 if (lwp->stopped)
2031 {
2032 if (debug_threads)
2033 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2034 lwpid_of (lwp));
2035 return;
2036 }
2037
2038 saved_inferior = current_inferior;
2039 if (saved_inferior != NULL)
2040 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2041 else
2042 saved_tid = null_ptid; /* avoid bogus unused warning */
2043
2044 ptid = lwp->head.id;
2045
2046 if (debug_threads)
2047 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2048
2049 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2050
2051 /* If we stopped with a non-SIGSTOP signal, save it for later
2052 and record the pending SIGSTOP. If the process exited, just
2053 return. */
2054 if (WIFSTOPPED (wstat))
2055 {
2056 if (debug_threads)
2057 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2058 lwpid_of (lwp), WSTOPSIG (wstat));
2059
2060 if (WSTOPSIG (wstat) != SIGSTOP)
2061 {
2062 if (debug_threads)
2063 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2064 lwpid_of (lwp), wstat);
2065
2066 lwp->status_pending_p = 1;
2067 lwp->status_pending = wstat;
2068 }
2069 }
2070 else
2071 {
2072 if (debug_threads)
2073 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2074
2075 lwp = find_lwp_pid (pid_to_ptid (pid));
2076 if (lwp)
2077 {
2078 /* Leave this status pending for the next time we're able to
2079 report it. In the mean time, we'll report this lwp as
2080 dead to GDB, so GDB doesn't try to read registers and
2081 memory from it. This can only happen if this was the
2082 last thread of the process; otherwise, PID is removed
2083 from the thread tables before linux_wait_for_event
2084 returns. */
2085 mark_lwp_dead (lwp, wstat);
2086 }
2087 }
2088
2089 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2090 current_inferior = saved_inferior;
2091 else
2092 {
2093 if (debug_threads)
2094 fprintf (stderr, "Previously current thread died.\n");
2095
2096 if (non_stop)
2097 {
2098 /* We can't change the current inferior behind GDB's back,
2099 otherwise, a subsequent command may apply to the wrong
2100 process. */
2101 current_inferior = NULL;
2102 }
2103 else
2104 {
2105 /* Set a valid thread as current. */
2106 set_desired_inferior (0);
2107 }
2108 }
2109 }
2110
2111 static void
2112 stop_all_lwps (void)
2113 {
2114 stopping_threads = 1;
2115 for_each_inferior (&all_lwps, send_sigstop);
2116 for_each_inferior (&all_lwps, wait_for_sigstop);
2117 stopping_threads = 0;
2118 }
2119
2120 /* Resume execution of the inferior process.
2121 If STEP is nonzero, single-step it.
2122 If SIGNAL is nonzero, give it that signal. */
2123
2124 static void
2125 linux_resume_one_lwp (struct lwp_info *lwp,
2126 int step, int signal, siginfo_t *info)
2127 {
2128 struct thread_info *saved_inferior;
2129
2130 if (lwp->stopped == 0)
2131 return;
2132
2133 /* If we have pending signals or status, and a new signal, enqueue the
2134 signal. Also enqueue the signal if we are waiting to reinsert a
2135 breakpoint; it will be picked up again below. */
2136 if (signal != 0
2137 && (lwp->status_pending_p || lwp->pending_signals != NULL
2138 || lwp->bp_reinsert != 0))
2139 {
2140 struct pending_signals *p_sig;
2141 p_sig = xmalloc (sizeof (*p_sig));
2142 p_sig->prev = lwp->pending_signals;
2143 p_sig->signal = signal;
2144 if (info == NULL)
2145 memset (&p_sig->info, 0, sizeof (siginfo_t));
2146 else
2147 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2148 lwp->pending_signals = p_sig;
2149 }
2150
2151 if (lwp->status_pending_p)
2152 {
2153 if (debug_threads)
2154 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2155 " has pending status\n",
2156 lwpid_of (lwp), step ? "step" : "continue", signal,
2157 lwp->stop_expected ? "expected" : "not expected");
2158 return;
2159 }
2160
2161 saved_inferior = current_inferior;
2162 current_inferior = get_lwp_thread (lwp);
2163
2164 if (debug_threads)
2165 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2166 lwpid_of (lwp), step ? "step" : "continue", signal,
2167 lwp->stop_expected ? "expected" : "not expected");
2168
2169 /* This bit needs some thinking about. If we get a signal that
2170 we must report while a single-step reinsert is still pending,
2171 we often end up resuming the thread. It might be better to
2172 (ew) allow a stack of pending events; then we could be sure that
2173 the reinsert happened right away and not lose any signals.
2174
2175 Making this stack would also shrink the window in which breakpoints are
2176 uninserted (see comment in linux_wait_for_lwp) but not enough for
2177 complete correctness, so it won't solve that problem. It may be
2178 worthwhile just to solve this one, however. */
2179 if (lwp->bp_reinsert != 0)
2180 {
2181 if (debug_threads)
2182 fprintf (stderr, " pending reinsert at 0x%s\n",
2183 paddress (lwp->bp_reinsert));
2184
2185 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2186 {
2187 if (step == 0)
2188 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2189
2190 step = 1;
2191 }
2192
2193 /* Postpone any pending signal. It was enqueued above. */
2194 signal = 0;
2195 }
2196
2197 if (debug_threads && the_low_target.get_pc != NULL)
2198 {
2199 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2200 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2201 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2202 }
2203
2204 /* If we have pending signals, consume one unless we are trying to reinsert
2205 a breakpoint. */
2206 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
2207 {
2208 struct pending_signals **p_sig;
2209
2210 p_sig = &lwp->pending_signals;
2211 while ((*p_sig)->prev != NULL)
2212 p_sig = &(*p_sig)->prev;
2213
2214 signal = (*p_sig)->signal;
2215 if ((*p_sig)->info.si_signo != 0)
2216 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2217
2218 free (*p_sig);
2219 *p_sig = NULL;
2220 }
2221
2222 if (the_low_target.prepare_to_resume != NULL)
2223 the_low_target.prepare_to_resume (lwp);
2224
2225 regcache_invalidate_one ((struct inferior_list_entry *)
2226 get_lwp_thread (lwp));
2227 errno = 0;
2228 lwp->stopped = 0;
2229 lwp->stopped_by_watchpoint = 0;
2230 lwp->stepping = step;
2231 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2232 /* Coerce to a uintptr_t first to avoid potential gcc warning
2233 of coercing an 8 byte integer to a 4 byte pointer. */
2234 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2235
2236 current_inferior = saved_inferior;
2237 if (errno)
2238 {
2239 /* ESRCH from ptrace either means that the thread was already
2240 running (an error) or that it is gone (a race condition). If
2241 it's gone, we will get a notification the next time we wait,
2242 so we can ignore the error. We could differentiate these
2243 two, but it's tricky without waiting; the thread still exists
2244 as a zombie, so sending it signal 0 would succeed. So just
2245 ignore ESRCH. */
2246 if (errno == ESRCH)
2247 return;
2248
2249 perror_with_name ("ptrace");
2250 }
2251 }
2252
2253 struct thread_resume_array
2254 {
2255 struct thread_resume *resume;
2256 size_t n;
2257 };
2258
2259 /* This function is called once per thread. We look up the thread
2260 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2261 resume request.
2262
2263 This algorithm is O(threads * resume elements), but resume elements
2264 is small (and will remain small at least until GDB supports thread
2265 suspension). */
2266 static int
2267 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
2268 {
2269 struct lwp_info *lwp;
2270 struct thread_info *thread;
2271 int ndx;
2272 struct thread_resume_array *r;
2273
2274 thread = (struct thread_info *) entry;
2275 lwp = get_thread_lwp (thread);
2276 r = arg;
2277
2278 for (ndx = 0; ndx < r->n; ndx++)
2279 {
2280 ptid_t ptid = r->resume[ndx].thread;
2281 if (ptid_equal (ptid, minus_one_ptid)
2282 || ptid_equal (ptid, entry->id)
2283 || (ptid_is_pid (ptid)
2284 && (ptid_get_pid (ptid) == pid_of (lwp)))
2285 || (ptid_get_lwp (ptid) == -1
2286 && (ptid_get_pid (ptid) == pid_of (lwp))))
2287 {
2288 if (r->resume[ndx].kind == resume_stop
2289 && lwp->last_resume_kind == resume_stop)
2290 {
2291 if (debug_threads)
2292 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2293 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2294 ? "stopped"
2295 : "stopping",
2296 lwpid_of (lwp));
2297
2298 continue;
2299 }
2300
2301 lwp->resume = &r->resume[ndx];
2302 lwp->last_resume_kind = lwp->resume->kind;
2303 return 0;
2304 }
2305 }
2306
2307 /* No resume action for this thread. */
2308 lwp->resume = NULL;
2309
2310 return 0;
2311 }
2312
2313
2314 /* Set *FLAG_P if this lwp has an interesting status pending. */
2315 static int
2316 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
2317 {
2318 struct lwp_info *lwp = (struct lwp_info *) entry;
2319
2320 /* LWPs which will not be resumed are not interesting, because
2321 we might not wait for them next time through linux_wait. */
2322 if (lwp->resume == NULL)
2323 return 0;
2324
2325 if (lwp->status_pending_p)
2326 * (int *) flag_p = 1;
2327
2328 return 0;
2329 }
2330
2331 /* Return 1 if this lwp that GDB wants running is stopped at an
2332 internal breakpoint that we need to step over. It assumes that any
2333 required STOP_PC adjustment has already been propagated to the
2334 inferior's regcache. */
2335
2336 static int
2337 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2338 {
2339 struct lwp_info *lwp = (struct lwp_info *) entry;
2340 struct thread_info *saved_inferior;
2341 CORE_ADDR pc;
2342
2343 /* LWPs which will not be resumed are not interesting, because we
2344 might not wait for them next time through linux_wait. */
2345
2346 if (!lwp->stopped)
2347 {
2348 if (debug_threads)
2349 fprintf (stderr,
2350 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2351 lwpid_of (lwp));
2352 return 0;
2353 }
2354
2355 if (lwp->last_resume_kind == resume_stop)
2356 {
2357 if (debug_threads)
2358 fprintf (stderr,
2359 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2360 lwpid_of (lwp));
2361 return 0;
2362 }
2363
2364 if (!lwp->need_step_over)
2365 {
2366 if (debug_threads)
2367 fprintf (stderr,
2368 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2369 }
2370
2371 if (lwp->status_pending_p)
2372 {
2373 if (debug_threads)
2374 fprintf (stderr,
2375 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2376 lwpid_of (lwp));
2377 return 0;
2378 }
2379
2380 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2381 or we have. */
2382 pc = get_pc (lwp);
2383
2384 /* If the PC has changed since we stopped, then don't do anything,
2385 and let the breakpoint/tracepoint be hit. This happens if, for
2386 instance, GDB handled the decr_pc_after_break subtraction itself,
2387 GDB is OOL stepping this thread, or the user has issued a "jump"
2388 command, or poked thread's registers herself. */
2389 if (pc != lwp->stop_pc)
2390 {
2391 if (debug_threads)
2392 fprintf (stderr,
2393 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2394 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2395 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2396
2397 lwp->need_step_over = 0;
2398 return 0;
2399 }
2400
2401 saved_inferior = current_inferior;
2402 current_inferior = get_lwp_thread (lwp);
2403
2404 /* We only step over our breakpoints. */
2405 if (breakpoint_here (pc))
2406 {
2407 if (debug_threads)
2408 fprintf (stderr,
2409 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2410 lwpid_of (lwp), paddress (pc));
2411
2412 /* We've found an lwp that needs stepping over --- return 1 so
2413 that find_inferior stops looking. */
2414 current_inferior = saved_inferior;
2415
2416 /* If the step over is cancelled, this is set again. */
2417 lwp->need_step_over = 0;
2418 return 1;
2419 }
2420
2421 current_inferior = saved_inferior;
2422
2423 if (debug_threads)
2424 fprintf (stderr,
2425 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2426 lwpid_of (lwp), paddress (pc));
2427
2428 return 0;
2429 }
2430
2431 /* Start a step-over operation on LWP. When LWP stopped at a
2432 breakpoint, to make progress, we need to remove the breakpoint out
2433 of the way. If we let other threads run while we do that, they may
2434 pass by the breakpoint location and miss hitting it. To avoid
2435 that, a step-over momentarily stops all threads while LWP is
2436 single-stepped while the breakpoint is temporarily uninserted from
2437 the inferior. When the single-step finishes, we reinsert the
2438 breakpoint, and let all threads that are supposed to be running,
2439 run again.
2440
2441 On targets that don't support hardware single-step, we don't
2442 currently support full software single-stepping. Instead, we only
2443 support stepping over the thread event breakpoint, by asking the
2444 low target where to place a reinsert breakpoint. Since this
2445 routine assumes the breakpoint being stepped over is a thread event
2446 breakpoint, it usually assumes the return address of the current
2447 function is a good enough place to set the reinsert breakpoint. */
2448
2449 static int
2450 start_step_over (struct lwp_info *lwp)
2451 {
2452 struct thread_info *saved_inferior;
2453 CORE_ADDR pc;
2454 int step;
2455
2456 if (debug_threads)
2457 fprintf (stderr,
2458 "Starting step-over on LWP %ld. Stopping all threads\n",
2459 lwpid_of (lwp));
2460
2461 stop_all_lwps ();
2462
2463 if (debug_threads)
2464 fprintf (stderr, "Done stopping all threads for step-over.\n");
2465
2466 /* Note, we should always reach here with an already adjusted PC,
2467 either by GDB (if we're resuming due to GDB's request), or by our
2468 caller, if we just finished handling an internal breakpoint GDB
2469 shouldn't care about. */
2470 pc = get_pc (lwp);
2471
2472 saved_inferior = current_inferior;
2473 current_inferior = get_lwp_thread (lwp);
2474
2475 lwp->bp_reinsert = pc;
2476 uninsert_breakpoints_at (pc);
2477
2478 if (can_hardware_single_step ())
2479 {
2480 step = 1;
2481 }
2482 else
2483 {
2484 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2485 set_reinsert_breakpoint (raddr);
2486 step = 0;
2487 }
2488
2489 current_inferior = saved_inferior;
2490
2491 linux_resume_one_lwp (lwp, step, 0, NULL);
2492
2493 /* Require next event from this LWP. */
2494 step_over_bkpt = lwp->head.id;
2495 return 1;
2496 }
2497
2498 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
2499 start_step_over, if still there, and delete any reinsert
2500 breakpoints we've set, on non hardware single-step targets. */
2501
2502 static int
2503 finish_step_over (struct lwp_info *lwp)
2504 {
2505 if (lwp->bp_reinsert != 0)
2506 {
2507 if (debug_threads)
2508 fprintf (stderr, "Finished step over.\n");
2509
2510 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2511 may be no breakpoint to reinsert there by now. */
2512 reinsert_breakpoints_at (lwp->bp_reinsert);
2513
2514 lwp->bp_reinsert = 0;
2515
2516 /* Delete any software-single-step reinsert breakpoints. No
2517 longer needed. We don't have to worry about other threads
2518 hitting this trap, and later not being able to explain it,
2519 because we were stepping over a breakpoint, and we hold all
2520 threads but LWP stopped while doing that. */
2521 if (!can_hardware_single_step ())
2522 delete_reinsert_breakpoints ();
2523
2524 step_over_bkpt = null_ptid;
2525 return 1;
2526 }
2527 else
2528 return 0;
2529 }
2530
2531 /* This function is called once per thread. We check the thread's resume
2532 request, which will tell us whether to resume, step, or leave the thread
2533 stopped; and what signal, if any, it should be sent.
2534
2535 For threads which we aren't explicitly told otherwise, we preserve
2536 the stepping flag; this is used for stepping over gdbserver-placed
2537 breakpoints.
2538
2539 If pending_flags was set in any thread, we queue any needed
2540 signals, since we won't actually resume. We already have a pending
2541 event to report, so we don't need to preserve any step requests;
2542 they should be re-issued if necessary. */
2543
2544 static int
2545 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
2546 {
2547 struct lwp_info *lwp;
2548 struct thread_info *thread;
2549 int step;
2550 int leave_all_stopped = * (int *) arg;
2551 int leave_pending;
2552
2553 thread = (struct thread_info *) entry;
2554 lwp = get_thread_lwp (thread);
2555
2556 if (lwp->resume == NULL)
2557 return 0;
2558
2559 if (lwp->resume->kind == resume_stop)
2560 {
2561 if (debug_threads)
2562 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
2563
2564 if (!lwp->stopped)
2565 {
2566 if (debug_threads)
2567 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
2568
2569 /* Stop the thread, and wait for the event asynchronously,
2570 through the event loop. */
2571 send_sigstop (&lwp->head);
2572 }
2573 else
2574 {
2575 if (debug_threads)
2576 fprintf (stderr, "already stopped LWP %ld\n",
2577 lwpid_of (lwp));
2578
2579 /* The LWP may have been stopped in an internal event that
2580 was not meant to be notified back to GDB (e.g., gdbserver
2581 breakpoint), so we should be reporting a stop event in
2582 this case too. */
2583
2584 /* If the thread already has a pending SIGSTOP, this is a
2585 no-op. Otherwise, something later will presumably resume
2586 the thread and this will cause it to cancel any pending
2587 operation, due to last_resume_kind == resume_stop. If
2588 the thread already has a pending status to report, we
2589 will still report it the next time we wait - see
2590 status_pending_p_callback. */
2591 send_sigstop (&lwp->head);
2592 }
2593
2594 /* For stop requests, we're done. */
2595 lwp->resume = NULL;
2596 get_lwp_thread (lwp)->last_status.kind = TARGET_WAITKIND_IGNORE;
2597 return 0;
2598 }
2599
2600 /* If this thread which is about to be resumed has a pending status,
2601 then don't resume any threads - we can just report the pending
2602 status. Make sure to queue any signals that would otherwise be
2603 sent. In all-stop mode, we do this decision based on if *any*
2604 thread has a pending status. If there's a thread that needs the
2605 step-over-breakpoint dance, then don't resume any other thread
2606 but that particular one. */
2607 leave_pending = (lwp->status_pending_p || leave_all_stopped);
2608
2609 if (!leave_pending)
2610 {
2611 if (debug_threads)
2612 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
2613
2614 step = (lwp->resume->kind == resume_step);
2615 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
2616 get_lwp_thread (lwp)->last_status.kind = TARGET_WAITKIND_IGNORE;
2617 }
2618 else
2619 {
2620 if (debug_threads)
2621 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
2622
2623 /* If we have a new signal, enqueue the signal. */
2624 if (lwp->resume->sig != 0)
2625 {
2626 struct pending_signals *p_sig;
2627 p_sig = xmalloc (sizeof (*p_sig));
2628 p_sig->prev = lwp->pending_signals;
2629 p_sig->signal = lwp->resume->sig;
2630 memset (&p_sig->info, 0, sizeof (siginfo_t));
2631
2632 /* If this is the same signal we were previously stopped by,
2633 make sure to queue its siginfo. We can ignore the return
2634 value of ptrace; if it fails, we'll skip
2635 PTRACE_SETSIGINFO. */
2636 if (WIFSTOPPED (lwp->last_status)
2637 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2638 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2639
2640 lwp->pending_signals = p_sig;
2641 }
2642 }
2643
2644 lwp->resume = NULL;
2645 return 0;
2646 }
2647
2648 static void
2649 linux_resume (struct thread_resume *resume_info, size_t n)
2650 {
2651 struct thread_resume_array array = { resume_info, n };
2652 struct lwp_info *need_step_over = NULL;
2653 int any_pending;
2654 int leave_all_stopped;
2655
2656 find_inferior (&all_threads, linux_set_resume_request, &array);
2657
2658 /* If there is a thread which would otherwise be resumed, which has
2659 a pending status, then don't resume any threads - we can just
2660 report the pending status. Make sure to queue any signals that
2661 would otherwise be sent. In non-stop mode, we'll apply this
2662 logic to each thread individually. We consume all pending events
2663 before considering to start a step-over (in all-stop). */
2664 any_pending = 0;
2665 if (!non_stop)
2666 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2667
2668 /* If there is a thread which would otherwise be resumed, which is
2669 stopped at a breakpoint that needs stepping over, then don't
2670 resume any threads - have it step over the breakpoint with all
2671 other threads stopped, then resume all threads again. Make sure
2672 to queue any signals that would otherwise be delivered or
2673 queued. */
2674 if (!any_pending && supports_breakpoints ())
2675 need_step_over
2676 = (struct lwp_info *) find_inferior (&all_lwps,
2677 need_step_over_p, NULL);
2678
2679 leave_all_stopped = (need_step_over != NULL || any_pending);
2680
2681 if (debug_threads)
2682 {
2683 if (need_step_over != NULL)
2684 fprintf (stderr, "Not resuming all, need step over\n");
2685 else if (any_pending)
2686 fprintf (stderr,
2687 "Not resuming, all-stop and found "
2688 "an LWP with pending status\n");
2689 else
2690 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2691 }
2692
2693 /* Even if we're leaving threads stopped, queue all signals we'd
2694 otherwise deliver. */
2695 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2696
2697 if (need_step_over)
2698 start_step_over (need_step_over);
2699 }
2700
2701 /* This function is called once per thread. We check the thread's
2702 last resume request, which will tell us whether to resume, step, or
2703 leave the thread stopped. Any signal the client requested to be
2704 delivered has already been enqueued at this point.
2705
2706 If any thread that GDB wants running is stopped at an internal
2707 breakpoint that needs stepping over, we start a step-over operation
2708 on that particular thread, and leave all others stopped. */
2709
2710 static void
2711 proceed_one_lwp (struct inferior_list_entry *entry)
2712 {
2713 struct lwp_info *lwp;
2714 int step;
2715
2716 lwp = (struct lwp_info *) entry;
2717
2718 if (debug_threads)
2719 fprintf (stderr,
2720 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2721
2722 if (!lwp->stopped)
2723 {
2724 if (debug_threads)
2725 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
2726 return;
2727 }
2728
2729 if (lwp->last_resume_kind == resume_stop)
2730 {
2731 if (debug_threads)
2732 fprintf (stderr, " client wants LWP %ld stopped\n", lwpid_of (lwp));
2733 return;
2734 }
2735
2736 if (lwp->status_pending_p)
2737 {
2738 if (debug_threads)
2739 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2740 lwpid_of (lwp));
2741 return;
2742 }
2743
2744 if (lwp->suspended)
2745 {
2746 if (debug_threads)
2747 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
2748 return;
2749 }
2750
2751 step = lwp->last_resume_kind == resume_step;
2752 linux_resume_one_lwp (lwp, step, 0, NULL);
2753 }
2754
2755 /* When we finish a step-over, set threads running again. If there's
2756 another thread that may need a step-over, now's the time to start
2757 it. Eventually, we'll move all threads past their breakpoints. */
2758
2759 static void
2760 proceed_all_lwps (void)
2761 {
2762 struct lwp_info *need_step_over;
2763
2764 /* If there is a thread which would otherwise be resumed, which is
2765 stopped at a breakpoint that needs stepping over, then don't
2766 resume any threads - have it step over the breakpoint with all
2767 other threads stopped, then resume all threads again. */
2768
2769 if (supports_breakpoints ())
2770 {
2771 need_step_over
2772 = (struct lwp_info *) find_inferior (&all_lwps,
2773 need_step_over_p, NULL);
2774
2775 if (need_step_over != NULL)
2776 {
2777 if (debug_threads)
2778 fprintf (stderr, "proceed_all_lwps: found "
2779 "thread %ld needing a step-over\n",
2780 lwpid_of (need_step_over));
2781
2782 start_step_over (need_step_over);
2783 return;
2784 }
2785 }
2786
2787 if (debug_threads)
2788 fprintf (stderr, "Proceeding, no step-over needed\n");
2789
2790 for_each_inferior (&all_lwps, proceed_one_lwp);
2791 }
2792
2793 /* Stopped LWPs that the client wanted to be running, that don't have
2794 pending statuses, are set to run again, except for EXCEPT, if not
2795 NULL. This undoes a stop_all_lwps call. */
2796
2797 static void
2798 unstop_all_lwps (struct lwp_info *except)
2799 {
2800 if (debug_threads)
2801 {
2802 if (except)
2803 fprintf (stderr,
2804 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
2805 else
2806 fprintf (stderr,
2807 "unstopping all lwps\n");
2808 }
2809
2810 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2811 if (except != NULL)
2812 ++except->suspended;
2813
2814 for_each_inferior (&all_lwps, proceed_one_lwp);
2815
2816 if (except != NULL)
2817 --except->suspended;
2818 }
2819
2820 #ifdef HAVE_LINUX_USRREGS
2821
2822 int
2823 register_addr (int regnum)
2824 {
2825 int addr;
2826
2827 if (regnum < 0 || regnum >= the_low_target.num_regs)
2828 error ("Invalid register number %d.", regnum);
2829
2830 addr = the_low_target.regmap[regnum];
2831
2832 return addr;
2833 }
2834
2835 /* Fetch one register. */
2836 static void
2837 fetch_register (struct regcache *regcache, int regno)
2838 {
2839 CORE_ADDR regaddr;
2840 int i, size;
2841 char *buf;
2842 int pid;
2843
2844 if (regno >= the_low_target.num_regs)
2845 return;
2846 if ((*the_low_target.cannot_fetch_register) (regno))
2847 return;
2848
2849 regaddr = register_addr (regno);
2850 if (regaddr == -1)
2851 return;
2852
2853 pid = lwpid_of (get_thread_lwp (current_inferior));
2854 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2855 & - sizeof (PTRACE_XFER_TYPE));
2856 buf = alloca (size);
2857 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2858 {
2859 errno = 0;
2860 *(PTRACE_XFER_TYPE *) (buf + i) =
2861 ptrace (PTRACE_PEEKUSER, pid,
2862 /* Coerce to a uintptr_t first to avoid potential gcc warning
2863 of coercing an 8 byte integer to a 4 byte pointer. */
2864 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
2865 regaddr += sizeof (PTRACE_XFER_TYPE);
2866 if (errno != 0)
2867 error ("reading register %d: %s", regno, strerror (errno));
2868 }
2869
2870 if (the_low_target.supply_ptrace_register)
2871 the_low_target.supply_ptrace_register (regcache, regno, buf);
2872 else
2873 supply_register (regcache, regno, buf);
2874 }
2875
2876 /* Fetch all registers, or just one, from the child process. */
2877 static void
2878 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
2879 {
2880 if (regno == -1)
2881 for (regno = 0; regno < the_low_target.num_regs; regno++)
2882 fetch_register (regcache, regno);
2883 else
2884 fetch_register (regcache, regno);
2885 }
2886
2887 /* Store our register values back into the inferior.
2888 If REGNO is -1, do this for all registers.
2889 Otherwise, REGNO specifies which register (so we can save time). */
2890 static void
2891 usr_store_inferior_registers (struct regcache *regcache, int regno)
2892 {
2893 CORE_ADDR regaddr;
2894 int i, size;
2895 char *buf;
2896 int pid;
2897
2898 if (regno >= 0)
2899 {
2900 if (regno >= the_low_target.num_regs)
2901 return;
2902
2903 if ((*the_low_target.cannot_store_register) (regno) == 1)
2904 return;
2905
2906 regaddr = register_addr (regno);
2907 if (regaddr == -1)
2908 return;
2909 errno = 0;
2910 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2911 & - sizeof (PTRACE_XFER_TYPE);
2912 buf = alloca (size);
2913 memset (buf, 0, size);
2914
2915 if (the_low_target.collect_ptrace_register)
2916 the_low_target.collect_ptrace_register (regcache, regno, buf);
2917 else
2918 collect_register (regcache, regno, buf);
2919
2920 pid = lwpid_of (get_thread_lwp (current_inferior));
2921 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2922 {
2923 errno = 0;
2924 ptrace (PTRACE_POKEUSER, pid,
2925 /* Coerce to a uintptr_t first to avoid potential gcc warning
2926 about coercing an 8 byte integer to a 4 byte pointer. */
2927 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
2928 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
2929 if (errno != 0)
2930 {
2931 /* At this point, ESRCH should mean the process is
2932 already gone, in which case we simply ignore attempts
2933 to change its registers. See also the related
2934 comment in linux_resume_one_lwp. */
2935 if (errno == ESRCH)
2936 return;
2937
2938 if ((*the_low_target.cannot_store_register) (regno) == 0)
2939 error ("writing register %d: %s", regno, strerror (errno));
2940 }
2941 regaddr += sizeof (PTRACE_XFER_TYPE);
2942 }
2943 }
2944 else
2945 for (regno = 0; regno < the_low_target.num_regs; regno++)
2946 usr_store_inferior_registers (regcache, regno);
2947 }
2948 #endif /* HAVE_LINUX_USRREGS */
2949
2950
2951
2952 #ifdef HAVE_LINUX_REGSETS
2953
2954 static int
2955 regsets_fetch_inferior_registers (struct regcache *regcache)
2956 {
2957 struct regset_info *regset;
2958 int saw_general_regs = 0;
2959 int pid;
2960
2961 regset = target_regsets;
2962
2963 pid = lwpid_of (get_thread_lwp (current_inferior));
2964 while (regset->size >= 0)
2965 {
2966 void *buf;
2967 int res;
2968
2969 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2970 {
2971 regset ++;
2972 continue;
2973 }
2974
2975 buf = xmalloc (regset->size);
2976 #ifndef __sparc__
2977 res = ptrace (regset->get_request, pid, 0, buf);
2978 #else
2979 res = ptrace (regset->get_request, pid, buf, 0);
2980 #endif
2981 if (res < 0)
2982 {
2983 if (errno == EIO)
2984 {
2985 /* If we get EIO on a regset, do not try it again for
2986 this process. */
2987 disabled_regsets[regset - target_regsets] = 1;
2988 free (buf);
2989 continue;
2990 }
2991 else
2992 {
2993 char s[256];
2994 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
2995 pid);
2996 perror (s);
2997 }
2998 }
2999 else if (regset->type == GENERAL_REGS)
3000 saw_general_regs = 1;
3001 regset->store_function (regcache, buf);
3002 regset ++;
3003 free (buf);
3004 }
3005 if (saw_general_regs)
3006 return 0;
3007 else
3008 return 1;
3009 }
3010
3011 static int
3012 regsets_store_inferior_registers (struct regcache *regcache)
3013 {
3014 struct regset_info *regset;
3015 int saw_general_regs = 0;
3016 int pid;
3017
3018 regset = target_regsets;
3019
3020 pid = lwpid_of (get_thread_lwp (current_inferior));
3021 while (regset->size >= 0)
3022 {
3023 void *buf;
3024 int res;
3025
3026 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3027 {
3028 regset ++;
3029 continue;
3030 }
3031
3032 buf = xmalloc (regset->size);
3033
3034 /* First fill the buffer with the current register set contents,
3035 in case there are any items in the kernel's regset that are
3036 not in gdbserver's regcache. */
3037 #ifndef __sparc__
3038 res = ptrace (regset->get_request, pid, 0, buf);
3039 #else
3040 res = ptrace (regset->get_request, pid, buf, 0);
3041 #endif
3042
3043 if (res == 0)
3044 {
3045 /* Then overlay our cached registers on that. */
3046 regset->fill_function (regcache, buf);
3047
3048 /* Only now do we write the register set. */
3049 #ifndef __sparc__
3050 res = ptrace (regset->set_request, pid, 0, buf);
3051 #else
3052 res = ptrace (regset->set_request, pid, buf, 0);
3053 #endif
3054 }
3055
3056 if (res < 0)
3057 {
3058 if (errno == EIO)
3059 {
3060 /* If we get EIO on a regset, do not try it again for
3061 this process. */
3062 disabled_regsets[regset - target_regsets] = 1;
3063 free (buf);
3064 continue;
3065 }
3066 else if (errno == ESRCH)
3067 {
3068 /* At this point, ESRCH should mean the process is
3069 already gone, in which case we simply ignore attempts
3070 to change its registers. See also the related
3071 comment in linux_resume_one_lwp. */
3072 free (buf);
3073 return 0;
3074 }
3075 else
3076 {
3077 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3078 }
3079 }
3080 else if (regset->type == GENERAL_REGS)
3081 saw_general_regs = 1;
3082 regset ++;
3083 free (buf);
3084 }
3085 if (saw_general_regs)
3086 return 0;
3087 else
3088 return 1;
3089 return 0;
3090 }
3091
3092 #endif /* HAVE_LINUX_REGSETS */
3093
3094
3095 void
3096 linux_fetch_registers (struct regcache *regcache, int regno)
3097 {
3098 #ifdef HAVE_LINUX_REGSETS
3099 if (regsets_fetch_inferior_registers (regcache) == 0)
3100 return;
3101 #endif
3102 #ifdef HAVE_LINUX_USRREGS
3103 usr_fetch_inferior_registers (regcache, regno);
3104 #endif
3105 }
3106
3107 void
3108 linux_store_registers (struct regcache *regcache, int regno)
3109 {
3110 #ifdef HAVE_LINUX_REGSETS
3111 if (regsets_store_inferior_registers (regcache) == 0)
3112 return;
3113 #endif
3114 #ifdef HAVE_LINUX_USRREGS
3115 usr_store_inferior_registers (regcache, regno);
3116 #endif
3117 }
3118
3119
3120 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3121 to debugger memory starting at MYADDR. */
3122
3123 static int
3124 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
3125 {
3126 register int i;
3127 /* Round starting address down to longword boundary. */
3128 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3129 /* Round ending address up; get number of longwords that makes. */
3130 register int count
3131 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
3132 / sizeof (PTRACE_XFER_TYPE);
3133 /* Allocate buffer of that many longwords. */
3134 register PTRACE_XFER_TYPE *buffer
3135 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3136 int fd;
3137 char filename[64];
3138 int pid = lwpid_of (get_thread_lwp (current_inferior));
3139
3140 /* Try using /proc. Don't bother for one word. */
3141 if (len >= 3 * sizeof (long))
3142 {
3143 /* We could keep this file open and cache it - possibly one per
3144 thread. That requires some juggling, but is even faster. */
3145 sprintf (filename, "/proc/%d/mem", pid);
3146 fd = open (filename, O_RDONLY | O_LARGEFILE);
3147 if (fd == -1)
3148 goto no_proc;
3149
3150 /* If pread64 is available, use it. It's faster if the kernel
3151 supports it (only one syscall), and it's 64-bit safe even on
3152 32-bit platforms (for instance, SPARC debugging a SPARC64
3153 application). */
3154 #ifdef HAVE_PREAD64
3155 if (pread64 (fd, myaddr, len, memaddr) != len)
3156 #else
3157 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
3158 #endif
3159 {
3160 close (fd);
3161 goto no_proc;
3162 }
3163
3164 close (fd);
3165 return 0;
3166 }
3167
3168 no_proc:
3169 /* Read all the longwords */
3170 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3171 {
3172 errno = 0;
3173 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3174 about coercing an 8 byte integer to a 4 byte pointer. */
3175 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3176 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3177 if (errno)
3178 return errno;
3179 }
3180
3181 /* Copy appropriate bytes out of the buffer. */
3182 memcpy (myaddr,
3183 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3184 len);
3185
3186 return 0;
3187 }
3188
3189 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3190 memory at MEMADDR. On failure (cannot write to the inferior)
3191 returns the value of errno. */
3192
3193 static int
3194 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
3195 {
3196 register int i;
3197 /* Round starting address down to longword boundary. */
3198 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3199 /* Round ending address up; get number of longwords that makes. */
3200 register int count
3201 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3202 /* Allocate buffer of that many longwords. */
3203 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3204 int pid = lwpid_of (get_thread_lwp (current_inferior));
3205
3206 if (debug_threads)
3207 {
3208 /* Dump up to four bytes. */
3209 unsigned int val = * (unsigned int *) myaddr;
3210 if (len == 1)
3211 val = val & 0xff;
3212 else if (len == 2)
3213 val = val & 0xffff;
3214 else if (len == 3)
3215 val = val & 0xffffff;
3216 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3217 val, (long)memaddr);
3218 }
3219
3220 /* Fill start and end extra bytes of buffer with existing memory data. */
3221
3222 errno = 0;
3223 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3224 about coercing an 8 byte integer to a 4 byte pointer. */
3225 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3226 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3227 if (errno)
3228 return errno;
3229
3230 if (count > 1)
3231 {
3232 errno = 0;
3233 buffer[count - 1]
3234 = ptrace (PTRACE_PEEKTEXT, pid,
3235 /* Coerce to a uintptr_t first to avoid potential gcc warning
3236 about coercing an 8 byte integer to a 4 byte pointer. */
3237 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3238 * sizeof (PTRACE_XFER_TYPE)),
3239 0);
3240 if (errno)
3241 return errno;
3242 }
3243
3244 /* Copy data to be written over corresponding part of buffer. */
3245
3246 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3247
3248 /* Write the entire buffer. */
3249
3250 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3251 {
3252 errno = 0;
3253 ptrace (PTRACE_POKETEXT, pid,
3254 /* Coerce to a uintptr_t first to avoid potential gcc warning
3255 about coercing an 8 byte integer to a 4 byte pointer. */
3256 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3257 (PTRACE_ARG4_TYPE) buffer[i]);
3258 if (errno)
3259 return errno;
3260 }
3261
3262 return 0;
3263 }
3264
3265 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
3266 static int linux_supports_tracefork_flag;
3267
3268 /* Helper functions for linux_test_for_tracefork, called via clone (). */
3269
3270 static int
3271 linux_tracefork_grandchild (void *arg)
3272 {
3273 _exit (0);
3274 }
3275
3276 #define STACK_SIZE 4096
3277
3278 static int
3279 linux_tracefork_child (void *arg)
3280 {
3281 ptrace (PTRACE_TRACEME, 0, 0, 0);
3282 kill (getpid (), SIGSTOP);
3283
3284 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3285
3286 if (fork () == 0)
3287 linux_tracefork_grandchild (NULL);
3288
3289 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3290
3291 #ifdef __ia64__
3292 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3293 CLONE_VM | SIGCHLD, NULL);
3294 #else
3295 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3296 CLONE_VM | SIGCHLD, NULL);
3297 #endif
3298
3299 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3300
3301 _exit (0);
3302 }
3303
3304 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3305 sure that we can enable the option, and that it had the desired
3306 effect. */
3307
3308 static void
3309 linux_test_for_tracefork (void)
3310 {
3311 int child_pid, ret, status;
3312 long second_pid;
3313 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3314 char *stack = xmalloc (STACK_SIZE * 4);
3315 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3316
3317 linux_supports_tracefork_flag = 0;
3318
3319 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3320
3321 child_pid = fork ();
3322 if (child_pid == 0)
3323 linux_tracefork_child (NULL);
3324
3325 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3326
3327 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
3328 #ifdef __ia64__
3329 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3330 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3331 #else /* !__ia64__ */
3332 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3333 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3334 #endif /* !__ia64__ */
3335
3336 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3337
3338 if (child_pid == -1)
3339 perror_with_name ("clone");
3340
3341 ret = my_waitpid (child_pid, &status, 0);
3342 if (ret == -1)
3343 perror_with_name ("waitpid");
3344 else if (ret != child_pid)
3345 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3346 if (! WIFSTOPPED (status))
3347 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3348
3349 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3350 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
3351 if (ret != 0)
3352 {
3353 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3354 if (ret != 0)
3355 {
3356 warning ("linux_test_for_tracefork: failed to kill child");
3357 return;
3358 }
3359
3360 ret = my_waitpid (child_pid, &status, 0);
3361 if (ret != child_pid)
3362 warning ("linux_test_for_tracefork: failed to wait for killed child");
3363 else if (!WIFSIGNALED (status))
3364 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3365 "killed child", status);
3366
3367 return;
3368 }
3369
3370 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3371 if (ret != 0)
3372 warning ("linux_test_for_tracefork: failed to resume child");
3373
3374 ret = my_waitpid (child_pid, &status, 0);
3375
3376 if (ret == child_pid && WIFSTOPPED (status)
3377 && status >> 16 == PTRACE_EVENT_FORK)
3378 {
3379 second_pid = 0;
3380 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3381 if (ret == 0 && second_pid != 0)
3382 {
3383 int second_status;
3384
3385 linux_supports_tracefork_flag = 1;
3386 my_waitpid (second_pid, &second_status, 0);
3387 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3388 if (ret != 0)
3389 warning ("linux_test_for_tracefork: failed to kill second child");
3390 my_waitpid (second_pid, &status, 0);
3391 }
3392 }
3393 else
3394 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3395 "(%d, status 0x%x)", ret, status);
3396
3397 do
3398 {
3399 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3400 if (ret != 0)
3401 warning ("linux_test_for_tracefork: failed to kill child");
3402 my_waitpid (child_pid, &status, 0);
3403 }
3404 while (WIFSTOPPED (status));
3405
3406 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3407 free (stack);
3408 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3409 }
3410
3411
3412 static void
3413 linux_look_up_symbols (void)
3414 {
3415 #ifdef USE_THREAD_DB
3416 struct process_info *proc = current_process ();
3417
3418 if (proc->private->thread_db != NULL)
3419 return;
3420
3421 /* If the kernel supports tracing forks then it also supports tracing
3422 clones, and then we don't need to use the magic thread event breakpoint
3423 to learn about threads. */
3424 thread_db_init (!linux_supports_tracefork_flag);
3425 #endif
3426 }
3427
3428 static void
3429 linux_request_interrupt (void)
3430 {
3431 extern unsigned long signal_pid;
3432
3433 if (!ptid_equal (cont_thread, null_ptid)
3434 && !ptid_equal (cont_thread, minus_one_ptid))
3435 {
3436 struct lwp_info *lwp;
3437 int lwpid;
3438
3439 lwp = get_thread_lwp (current_inferior);
3440 lwpid = lwpid_of (lwp);
3441 kill_lwp (lwpid, SIGINT);
3442 }
3443 else
3444 kill_lwp (signal_pid, SIGINT);
3445 }
3446
3447 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3448 to debugger memory starting at MYADDR. */
3449
3450 static int
3451 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
3452 {
3453 char filename[PATH_MAX];
3454 int fd, n;
3455 int pid = lwpid_of (get_thread_lwp (current_inferior));
3456
3457 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
3458
3459 fd = open (filename, O_RDONLY);
3460 if (fd < 0)
3461 return -1;
3462
3463 if (offset != (CORE_ADDR) 0
3464 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3465 n = -1;
3466 else
3467 n = read (fd, myaddr, len);
3468
3469 close (fd);
3470
3471 return n;
3472 }
3473
3474 /* These breakpoint and watchpoint related wrapper functions simply
3475 pass on the function call if the target has registered a
3476 corresponding function. */
3477
3478 static int
3479 linux_insert_point (char type, CORE_ADDR addr, int len)
3480 {
3481 if (the_low_target.insert_point != NULL)
3482 return the_low_target.insert_point (type, addr, len);
3483 else
3484 /* Unsupported (see target.h). */
3485 return 1;
3486 }
3487
3488 static int
3489 linux_remove_point (char type, CORE_ADDR addr, int len)
3490 {
3491 if (the_low_target.remove_point != NULL)
3492 return the_low_target.remove_point (type, addr, len);
3493 else
3494 /* Unsupported (see target.h). */
3495 return 1;
3496 }
3497
3498 static int
3499 linux_stopped_by_watchpoint (void)
3500 {
3501 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3502
3503 return lwp->stopped_by_watchpoint;
3504 }
3505
3506 static CORE_ADDR
3507 linux_stopped_data_address (void)
3508 {
3509 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3510
3511 return lwp->stopped_data_address;
3512 }
3513
3514 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3515 #if defined(__mcoldfire__)
3516 /* These should really be defined in the kernel's ptrace.h header. */
3517 #define PT_TEXT_ADDR 49*4
3518 #define PT_DATA_ADDR 50*4
3519 #define PT_TEXT_END_ADDR 51*4
3520 #endif
3521
3522 /* Under uClinux, programs are loaded at non-zero offsets, which we need
3523 to tell gdb about. */
3524
3525 static int
3526 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3527 {
3528 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3529 unsigned long text, text_end, data;
3530 int pid = lwpid_of (get_thread_lwp (current_inferior));
3531
3532 errno = 0;
3533
3534 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3535 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3536 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3537
3538 if (errno == 0)
3539 {
3540 /* Both text and data offsets produced at compile-time (and so
3541 used by gdb) are relative to the beginning of the program,
3542 with the data segment immediately following the text segment.
3543 However, the actual runtime layout in memory may put the data
3544 somewhere else, so when we send gdb a data base-address, we
3545 use the real data base address and subtract the compile-time
3546 data base-address from it (which is just the length of the
3547 text segment). BSS immediately follows data in both
3548 cases. */
3549 *text_p = text;
3550 *data_p = data - (text_end - text);
3551
3552 return 1;
3553 }
3554 #endif
3555 return 0;
3556 }
3557 #endif
3558
3559 static int
3560 compare_ints (const void *xa, const void *xb)
3561 {
3562 int a = *(const int *)xa;
3563 int b = *(const int *)xb;
3564
3565 return a - b;
3566 }
3567
3568 static int *
3569 unique (int *b, int *e)
3570 {
3571 int *d = b;
3572 while (++b != e)
3573 if (*d != *b)
3574 *++d = *b;
3575 return ++d;
3576 }
3577
3578 /* Given PID, iterates over all threads in that process.
3579
3580 Information about each thread, in a format suitable for qXfer:osdata:thread
3581 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3582 initialized, and the caller is responsible for finishing and appending '\0'
3583 to it.
3584
3585 The list of cores that threads are running on is assigned to *CORES, if it
3586 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3587 should free *CORES. */
3588
3589 static void
3590 list_threads (int pid, struct buffer *buffer, char **cores)
3591 {
3592 int count = 0;
3593 int allocated = 10;
3594 int *core_numbers = xmalloc (sizeof (int) * allocated);
3595 char pathname[128];
3596 DIR *dir;
3597 struct dirent *dp;
3598 struct stat statbuf;
3599
3600 sprintf (pathname, "/proc/%d/task", pid);
3601 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3602 {
3603 dir = opendir (pathname);
3604 if (!dir)
3605 {
3606 free (core_numbers);
3607 return;
3608 }
3609
3610 while ((dp = readdir (dir)) != NULL)
3611 {
3612 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3613
3614 if (lwp != 0)
3615 {
3616 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3617
3618 if (core != -1)
3619 {
3620 char s[sizeof ("4294967295")];
3621 sprintf (s, "%u", core);
3622
3623 if (count == allocated)
3624 {
3625 allocated *= 2;
3626 core_numbers = realloc (core_numbers,
3627 sizeof (int) * allocated);
3628 }
3629 core_numbers[count++] = core;
3630 if (buffer)
3631 buffer_xml_printf (buffer,
3632 "<item>"
3633 "<column name=\"pid\">%d</column>"
3634 "<column name=\"tid\">%s</column>"
3635 "<column name=\"core\">%s</column>"
3636 "</item>", pid, dp->d_name, s);
3637 }
3638 else
3639 {
3640 if (buffer)
3641 buffer_xml_printf (buffer,
3642 "<item>"
3643 "<column name=\"pid\">%d</column>"
3644 "<column name=\"tid\">%s</column>"
3645 "</item>", pid, dp->d_name);
3646 }
3647 }
3648 }
3649 }
3650
3651 if (cores)
3652 {
3653 *cores = NULL;
3654 if (count > 0)
3655 {
3656 struct buffer buffer2;
3657 int *b;
3658 int *e;
3659 qsort (core_numbers, count, sizeof (int), compare_ints);
3660
3661 /* Remove duplicates. */
3662 b = core_numbers;
3663 e = unique (b, core_numbers + count);
3664
3665 buffer_init (&buffer2);
3666
3667 for (b = core_numbers; b != e; ++b)
3668 {
3669 char number[sizeof ("4294967295")];
3670 sprintf (number, "%u", *b);
3671 buffer_xml_printf (&buffer2, "%s%s",
3672 (b == core_numbers) ? "" : ",", number);
3673 }
3674 buffer_grow_str0 (&buffer2, "");
3675
3676 *cores = buffer_finish (&buffer2);
3677 }
3678 }
3679 free (core_numbers);
3680 }
3681
3682 static void
3683 show_process (int pid, const char *username, struct buffer *buffer)
3684 {
3685 char pathname[128];
3686 FILE *f;
3687 char cmd[MAXPATHLEN + 1];
3688
3689 sprintf (pathname, "/proc/%d/cmdline", pid);
3690
3691 if ((f = fopen (pathname, "r")) != NULL)
3692 {
3693 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3694 if (len > 0)
3695 {
3696 char *cores = 0;
3697 int i;
3698 for (i = 0; i < len; i++)
3699 if (cmd[i] == '\0')
3700 cmd[i] = ' ';
3701 cmd[len] = '\0';
3702
3703 buffer_xml_printf (buffer,
3704 "<item>"
3705 "<column name=\"pid\">%d</column>"
3706 "<column name=\"user\">%s</column>"
3707 "<column name=\"command\">%s</column>",
3708 pid,
3709 username,
3710 cmd);
3711
3712 /* This only collects core numbers, and does not print threads. */
3713 list_threads (pid, NULL, &cores);
3714
3715 if (cores)
3716 {
3717 buffer_xml_printf (buffer,
3718 "<column name=\"cores\">%s</column>", cores);
3719 free (cores);
3720 }
3721
3722 buffer_xml_printf (buffer, "</item>");
3723 }
3724 fclose (f);
3725 }
3726 }
3727
3728 static int
3729 linux_qxfer_osdata (const char *annex,
3730 unsigned char *readbuf, unsigned const char *writebuf,
3731 CORE_ADDR offset, int len)
3732 {
3733 /* We make the process list snapshot when the object starts to be
3734 read. */
3735 static const char *buf;
3736 static long len_avail = -1;
3737 static struct buffer buffer;
3738 int processes = 0;
3739 int threads = 0;
3740
3741 DIR *dirp;
3742
3743 if (strcmp (annex, "processes") == 0)
3744 processes = 1;
3745 else if (strcmp (annex, "threads") == 0)
3746 threads = 1;
3747 else
3748 return 0;
3749
3750 if (!readbuf || writebuf)
3751 return 0;
3752
3753 if (offset == 0)
3754 {
3755 if (len_avail != -1 && len_avail != 0)
3756 buffer_free (&buffer);
3757 len_avail = 0;
3758 buf = NULL;
3759 buffer_init (&buffer);
3760 if (processes)
3761 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3762 else if (threads)
3763 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
3764
3765 dirp = opendir ("/proc");
3766 if (dirp)
3767 {
3768 struct dirent *dp;
3769 while ((dp = readdir (dirp)) != NULL)
3770 {
3771 struct stat statbuf;
3772 char procentry[sizeof ("/proc/4294967295")];
3773
3774 if (!isdigit (dp->d_name[0])
3775 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3776 continue;
3777
3778 sprintf (procentry, "/proc/%s", dp->d_name);
3779 if (stat (procentry, &statbuf) == 0
3780 && S_ISDIR (statbuf.st_mode))
3781 {
3782 int pid = (int) strtoul (dp->d_name, NULL, 10);
3783
3784 if (processes)
3785 {
3786 struct passwd *entry = getpwuid (statbuf.st_uid);
3787 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3788 }
3789 else if (threads)
3790 {
3791 list_threads (pid, &buffer, NULL);
3792 }
3793 }
3794 }
3795
3796 closedir (dirp);
3797 }
3798 buffer_grow_str0 (&buffer, "</osdata>\n");
3799 buf = buffer_finish (&buffer);
3800 len_avail = strlen (buf);
3801 }
3802
3803 if (offset >= len_avail)
3804 {
3805 /* Done. Get rid of the data. */
3806 buffer_free (&buffer);
3807 buf = NULL;
3808 len_avail = 0;
3809 return 0;
3810 }
3811
3812 if (len > len_avail - offset)
3813 len = len_avail - offset;
3814 memcpy (readbuf, buf + offset, len);
3815
3816 return len;
3817 }
3818
3819 /* Convert a native/host siginfo object, into/from the siginfo in the
3820 layout of the inferiors' architecture. */
3821
3822 static void
3823 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3824 {
3825 int done = 0;
3826
3827 if (the_low_target.siginfo_fixup != NULL)
3828 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3829
3830 /* If there was no callback, or the callback didn't do anything,
3831 then just do a straight memcpy. */
3832 if (!done)
3833 {
3834 if (direction == 1)
3835 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3836 else
3837 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3838 }
3839 }
3840
3841 static int
3842 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3843 unsigned const char *writebuf, CORE_ADDR offset, int len)
3844 {
3845 int pid;
3846 struct siginfo siginfo;
3847 char inf_siginfo[sizeof (struct siginfo)];
3848
3849 if (current_inferior == NULL)
3850 return -1;
3851
3852 pid = lwpid_of (get_thread_lwp (current_inferior));
3853
3854 if (debug_threads)
3855 fprintf (stderr, "%s siginfo for lwp %d.\n",
3856 readbuf != NULL ? "Reading" : "Writing",
3857 pid);
3858
3859 if (offset > sizeof (siginfo))
3860 return -1;
3861
3862 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
3863 return -1;
3864
3865 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3866 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3867 inferior with a 64-bit GDBSERVER should look the same as debugging it
3868 with a 32-bit GDBSERVER, we need to convert it. */
3869 siginfo_fixup (&siginfo, inf_siginfo, 0);
3870
3871 if (offset + len > sizeof (siginfo))
3872 len = sizeof (siginfo) - offset;
3873
3874 if (readbuf != NULL)
3875 memcpy (readbuf, inf_siginfo + offset, len);
3876 else
3877 {
3878 memcpy (inf_siginfo + offset, writebuf, len);
3879
3880 /* Convert back to ptrace layout before flushing it out. */
3881 siginfo_fixup (&siginfo, inf_siginfo, 1);
3882
3883 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
3884 return -1;
3885 }
3886
3887 return len;
3888 }
3889
3890 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
3891 so we notice when children change state; as the handler for the
3892 sigsuspend in my_waitpid. */
3893
3894 static void
3895 sigchld_handler (int signo)
3896 {
3897 int old_errno = errno;
3898
3899 if (debug_threads)
3900 /* fprintf is not async-signal-safe, so call write directly. */
3901 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
3902
3903 if (target_is_async_p ())
3904 async_file_mark (); /* trigger a linux_wait */
3905
3906 errno = old_errno;
3907 }
3908
3909 static int
3910 linux_supports_non_stop (void)
3911 {
3912 return 1;
3913 }
3914
3915 static int
3916 linux_async (int enable)
3917 {
3918 int previous = (linux_event_pipe[0] != -1);
3919
3920 if (previous != enable)
3921 {
3922 sigset_t mask;
3923 sigemptyset (&mask);
3924 sigaddset (&mask, SIGCHLD);
3925
3926 sigprocmask (SIG_BLOCK, &mask, NULL);
3927
3928 if (enable)
3929 {
3930 if (pipe (linux_event_pipe) == -1)
3931 fatal ("creating event pipe failed.");
3932
3933 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
3934 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
3935
3936 /* Register the event loop handler. */
3937 add_file_handler (linux_event_pipe[0],
3938 handle_target_event, NULL);
3939
3940 /* Always trigger a linux_wait. */
3941 async_file_mark ();
3942 }
3943 else
3944 {
3945 delete_file_handler (linux_event_pipe[0]);
3946
3947 close (linux_event_pipe[0]);
3948 close (linux_event_pipe[1]);
3949 linux_event_pipe[0] = -1;
3950 linux_event_pipe[1] = -1;
3951 }
3952
3953 sigprocmask (SIG_UNBLOCK, &mask, NULL);
3954 }
3955
3956 return previous;
3957 }
3958
3959 static int
3960 linux_start_non_stop (int nonstop)
3961 {
3962 /* Register or unregister from event-loop accordingly. */
3963 linux_async (nonstop);
3964 return 0;
3965 }
3966
3967 static int
3968 linux_supports_multi_process (void)
3969 {
3970 return 1;
3971 }
3972
3973
3974 /* Enumerate spufs IDs for process PID. */
3975 static int
3976 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
3977 {
3978 int pos = 0;
3979 int written = 0;
3980 char path[128];
3981 DIR *dir;
3982 struct dirent *entry;
3983
3984 sprintf (path, "/proc/%ld/fd", pid);
3985 dir = opendir (path);
3986 if (!dir)
3987 return -1;
3988
3989 rewinddir (dir);
3990 while ((entry = readdir (dir)) != NULL)
3991 {
3992 struct stat st;
3993 struct statfs stfs;
3994 int fd;
3995
3996 fd = atoi (entry->d_name);
3997 if (!fd)
3998 continue;
3999
4000 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4001 if (stat (path, &st) != 0)
4002 continue;
4003 if (!S_ISDIR (st.st_mode))
4004 continue;
4005
4006 if (statfs (path, &stfs) != 0)
4007 continue;
4008 if (stfs.f_type != SPUFS_MAGIC)
4009 continue;
4010
4011 if (pos >= offset && pos + 4 <= offset + len)
4012 {
4013 *(unsigned int *)(buf + pos - offset) = fd;
4014 written += 4;
4015 }
4016 pos += 4;
4017 }
4018
4019 closedir (dir);
4020 return written;
4021 }
4022
4023 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4024 object type, using the /proc file system. */
4025 static int
4026 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4027 unsigned const char *writebuf,
4028 CORE_ADDR offset, int len)
4029 {
4030 long pid = lwpid_of (get_thread_lwp (current_inferior));
4031 char buf[128];
4032 int fd = 0;
4033 int ret = 0;
4034
4035 if (!writebuf && !readbuf)
4036 return -1;
4037
4038 if (!*annex)
4039 {
4040 if (!readbuf)
4041 return -1;
4042 else
4043 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4044 }
4045
4046 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4047 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4048 if (fd <= 0)
4049 return -1;
4050
4051 if (offset != 0
4052 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4053 {
4054 close (fd);
4055 return 0;
4056 }
4057
4058 if (writebuf)
4059 ret = write (fd, writebuf, (size_t) len);
4060 else
4061 ret = read (fd, readbuf, (size_t) len);
4062
4063 close (fd);
4064 return ret;
4065 }
4066
4067 static int
4068 linux_core_of_thread (ptid_t ptid)
4069 {
4070 char filename[sizeof ("/proc//task//stat")
4071 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4072 + 1];
4073 FILE *f;
4074 char *content = NULL;
4075 char *p;
4076 char *ts = 0;
4077 int content_read = 0;
4078 int i;
4079 int core;
4080
4081 sprintf (filename, "/proc/%d/task/%ld/stat",
4082 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4083 f = fopen (filename, "r");
4084 if (!f)
4085 return -1;
4086
4087 for (;;)
4088 {
4089 int n;
4090 content = realloc (content, content_read + 1024);
4091 n = fread (content + content_read, 1, 1024, f);
4092 content_read += n;
4093 if (n < 1024)
4094 {
4095 content[content_read] = '\0';
4096 break;
4097 }
4098 }
4099
4100 p = strchr (content, '(');
4101 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
4102
4103 p = strtok_r (p, " ", &ts);
4104 for (i = 0; i != 36; ++i)
4105 p = strtok_r (NULL, " ", &ts);
4106
4107 if (sscanf (p, "%d", &core) == 0)
4108 core = -1;
4109
4110 free (content);
4111 fclose (f);
4112
4113 return core;
4114 }
4115
4116 static struct target_ops linux_target_ops = {
4117 linux_create_inferior,
4118 linux_attach,
4119 linux_kill,
4120 linux_detach,
4121 linux_join,
4122 linux_thread_alive,
4123 linux_resume,
4124 linux_wait,
4125 linux_fetch_registers,
4126 linux_store_registers,
4127 linux_read_memory,
4128 linux_write_memory,
4129 linux_look_up_symbols,
4130 linux_request_interrupt,
4131 linux_read_auxv,
4132 linux_insert_point,
4133 linux_remove_point,
4134 linux_stopped_by_watchpoint,
4135 linux_stopped_data_address,
4136 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4137 linux_read_offsets,
4138 #else
4139 NULL,
4140 #endif
4141 #ifdef USE_THREAD_DB
4142 thread_db_get_tls_address,
4143 #else
4144 NULL,
4145 #endif
4146 linux_qxfer_spu,
4147 hostio_last_error_from_errno,
4148 linux_qxfer_osdata,
4149 linux_xfer_siginfo,
4150 linux_supports_non_stop,
4151 linux_async,
4152 linux_start_non_stop,
4153 linux_supports_multi_process,
4154 #ifdef USE_THREAD_DB
4155 thread_db_handle_monitor_command,
4156 #else
4157 NULL,
4158 #endif
4159 linux_core_of_thread
4160 };
4161
4162 static void
4163 linux_init_signals ()
4164 {
4165 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4166 to find what the cancel signal actually is. */
4167 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
4168 signal (__SIGRTMIN+1, SIG_IGN);
4169 #endif
4170 }
4171
4172 void
4173 initialize_low (void)
4174 {
4175 struct sigaction sigchld_action;
4176 memset (&sigchld_action, 0, sizeof (sigchld_action));
4177 set_target_ops (&linux_target_ops);
4178 set_breakpoint_data (the_low_target.breakpoint,
4179 the_low_target.breakpoint_len);
4180 linux_init_signals ();
4181 linux_test_for_tracefork ();
4182 #ifdef HAVE_LINUX_REGSETS
4183 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4184 ;
4185 disabled_regsets = xmalloc (num_regsets);
4186 #endif
4187
4188 sigchld_action.sa_handler = sigchld_handler;
4189 sigemptyset (&sigchld_action.sa_mask);
4190 sigchld_action.sa_flags = SA_RESTART;
4191 sigaction (SIGCHLD, &sigchld_action, NULL);
4192 }
This page took 0.115521 seconds and 4 git commands to generate.