common/
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #ifndef ELFMAG0
43 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
44 then ELFMAG0 will have been defined. If it didn't get included by
45 gdb_proc_service.h then including it will likely introduce a duplicate
46 definition of elf_fpregset_t. */
47 #include <elf.h>
48 #endif
49
50 #ifndef SPUFS_MAGIC
51 #define SPUFS_MAGIC 0x23c9b64e
52 #endif
53
54 #ifndef PTRACE_GETSIGINFO
55 # define PTRACE_GETSIGINFO 0x4202
56 # define PTRACE_SETSIGINFO 0x4203
57 #endif
58
59 #ifndef O_LARGEFILE
60 #define O_LARGEFILE 0
61 #endif
62
63 /* If the system headers did not provide the constants, hard-code the normal
64 values. */
65 #ifndef PTRACE_EVENT_FORK
66
67 #define PTRACE_SETOPTIONS 0x4200
68 #define PTRACE_GETEVENTMSG 0x4201
69
70 /* options set using PTRACE_SETOPTIONS */
71 #define PTRACE_O_TRACESYSGOOD 0x00000001
72 #define PTRACE_O_TRACEFORK 0x00000002
73 #define PTRACE_O_TRACEVFORK 0x00000004
74 #define PTRACE_O_TRACECLONE 0x00000008
75 #define PTRACE_O_TRACEEXEC 0x00000010
76 #define PTRACE_O_TRACEVFORKDONE 0x00000020
77 #define PTRACE_O_TRACEEXIT 0x00000040
78
79 /* Wait extended result codes for the above trace options. */
80 #define PTRACE_EVENT_FORK 1
81 #define PTRACE_EVENT_VFORK 2
82 #define PTRACE_EVENT_CLONE 3
83 #define PTRACE_EVENT_EXEC 4
84 #define PTRACE_EVENT_VFORK_DONE 5
85 #define PTRACE_EVENT_EXIT 6
86
87 #endif /* PTRACE_EVENT_FORK */
88
89 /* We can't always assume that this flag is available, but all systems
90 with the ptrace event handlers also have __WALL, so it's safe to use
91 in some contexts. */
92 #ifndef __WALL
93 #define __WALL 0x40000000 /* Wait for any child. */
94 #endif
95
96 #ifndef W_STOPCODE
97 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
98 #endif
99
100 #ifdef __UCLIBC__
101 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
102 #define HAS_NOMMU
103 #endif
104 #endif
105
106 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
107 representation of the thread ID.
108
109 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
110 the same as the LWP ID.
111
112 ``all_processes'' is keyed by the "overall process ID", which
113 GNU/Linux calls tgid, "thread group ID". */
114
115 struct inferior_list all_lwps;
116
117 /* A list of all unknown processes which receive stop signals. Some other
118 process will presumably claim each of these as forked children
119 momentarily. */
120
121 struct inferior_list stopped_pids;
122
123 /* FIXME this is a bit of a hack, and could be removed. */
124 int stopping_threads;
125
126 /* FIXME make into a target method? */
127 int using_threads = 1;
128
129 /* This flag is true iff we've just created or attached to our first
130 inferior but it has not stopped yet. As soon as it does, we need
131 to call the low target's arch_setup callback. Doing this only on
132 the first inferior avoids reinializing the architecture on every
133 inferior, and avoids messing with the register caches of the
134 already running inferiors. NOTE: this assumes all inferiors under
135 control of gdbserver have the same architecture. */
136 static int new_inferior;
137
138 static void linux_resume_one_lwp (struct lwp_info *lwp,
139 int step, int signal, siginfo_t *info);
140 static void linux_resume (struct thread_resume *resume_info, size_t n);
141 static void stop_all_lwps (void);
142 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
143 static int check_removed_breakpoint (struct lwp_info *event_child);
144 static void *add_lwp (ptid_t ptid);
145 static int linux_stopped_by_watchpoint (void);
146 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
147 static int linux_core_of_thread (ptid_t ptid);
148
149 struct pending_signals
150 {
151 int signal;
152 siginfo_t info;
153 struct pending_signals *prev;
154 };
155
156 #define PTRACE_ARG3_TYPE void *
157 #define PTRACE_ARG4_TYPE void *
158 #define PTRACE_XFER_TYPE long
159
160 #ifdef HAVE_LINUX_REGSETS
161 static char *disabled_regsets;
162 static int num_regsets;
163 #endif
164
165 /* The read/write ends of the pipe registered as waitable file in the
166 event loop. */
167 static int linux_event_pipe[2] = { -1, -1 };
168
169 /* True if we're currently in async mode. */
170 #define target_is_async_p() (linux_event_pipe[0] != -1)
171
172 static void send_sigstop (struct inferior_list_entry *entry);
173 static void wait_for_sigstop (struct inferior_list_entry *entry);
174
175 /* Accepts an integer PID; Returns a string representing a file that
176 can be opened to get info for the child process.
177 Space for the result is malloc'd, caller must free. */
178
179 char *
180 linux_child_pid_to_exec_file (int pid)
181 {
182 char *name1, *name2;
183
184 name1 = xmalloc (MAXPATHLEN);
185 name2 = xmalloc (MAXPATHLEN);
186 memset (name2, 0, MAXPATHLEN);
187
188 sprintf (name1, "/proc/%d/exe", pid);
189 if (readlink (name1, name2, MAXPATHLEN) > 0)
190 {
191 free (name1);
192 return name2;
193 }
194 else
195 {
196 free (name2);
197 return name1;
198 }
199 }
200
201 /* Return non-zero if HEADER is a 64-bit ELF file. */
202
203 static int
204 elf_64_header_p (const Elf64_Ehdr *header)
205 {
206 return (header->e_ident[EI_MAG0] == ELFMAG0
207 && header->e_ident[EI_MAG1] == ELFMAG1
208 && header->e_ident[EI_MAG2] == ELFMAG2
209 && header->e_ident[EI_MAG3] == ELFMAG3
210 && header->e_ident[EI_CLASS] == ELFCLASS64);
211 }
212
213 /* Return non-zero if FILE is a 64-bit ELF file,
214 zero if the file is not a 64-bit ELF file,
215 and -1 if the file is not accessible or doesn't exist. */
216
217 int
218 elf_64_file_p (const char *file)
219 {
220 Elf64_Ehdr header;
221 int fd;
222
223 fd = open (file, O_RDONLY);
224 if (fd < 0)
225 return -1;
226
227 if (read (fd, &header, sizeof (header)) != sizeof (header))
228 {
229 close (fd);
230 return 0;
231 }
232 close (fd);
233
234 return elf_64_header_p (&header);
235 }
236
237 static void
238 delete_lwp (struct lwp_info *lwp)
239 {
240 remove_thread (get_lwp_thread (lwp));
241 remove_inferior (&all_lwps, &lwp->head);
242 free (lwp->arch_private);
243 free (lwp);
244 }
245
246 /* Add a process to the common process list, and set its private
247 data. */
248
249 static struct process_info *
250 linux_add_process (int pid, int attached)
251 {
252 struct process_info *proc;
253
254 /* Is this the first process? If so, then set the arch. */
255 if (all_processes.head == NULL)
256 new_inferior = 1;
257
258 proc = add_process (pid, attached);
259 proc->private = xcalloc (1, sizeof (*proc->private));
260
261 if (the_low_target.new_process != NULL)
262 proc->private->arch_private = the_low_target.new_process ();
263
264 return proc;
265 }
266
267 /* Remove a process from the common process list,
268 also freeing all private data. */
269
270 static void
271 linux_remove_process (struct process_info *process)
272 {
273 struct process_info_private *priv = process->private;
274
275 free (priv->arch_private);
276 free (priv);
277 remove_process (process);
278 }
279
280 /* Wrapper function for waitpid which handles EINTR, and emulates
281 __WALL for systems where that is not available. */
282
283 static int
284 my_waitpid (int pid, int *status, int flags)
285 {
286 int ret, out_errno;
287
288 if (debug_threads)
289 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
290
291 if (flags & __WALL)
292 {
293 sigset_t block_mask, org_mask, wake_mask;
294 int wnohang;
295
296 wnohang = (flags & WNOHANG) != 0;
297 flags &= ~(__WALL | __WCLONE);
298 flags |= WNOHANG;
299
300 /* Block all signals while here. This avoids knowing about
301 LinuxThread's signals. */
302 sigfillset (&block_mask);
303 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
304
305 /* ... except during the sigsuspend below. */
306 sigemptyset (&wake_mask);
307
308 while (1)
309 {
310 /* Since all signals are blocked, there's no need to check
311 for EINTR here. */
312 ret = waitpid (pid, status, flags);
313 out_errno = errno;
314
315 if (ret == -1 && out_errno != ECHILD)
316 break;
317 else if (ret > 0)
318 break;
319
320 if (flags & __WCLONE)
321 {
322 /* We've tried both flavors now. If WNOHANG is set,
323 there's nothing else to do, just bail out. */
324 if (wnohang)
325 break;
326
327 if (debug_threads)
328 fprintf (stderr, "blocking\n");
329
330 /* Block waiting for signals. */
331 sigsuspend (&wake_mask);
332 }
333
334 flags ^= __WCLONE;
335 }
336
337 sigprocmask (SIG_SETMASK, &org_mask, NULL);
338 }
339 else
340 {
341 do
342 ret = waitpid (pid, status, flags);
343 while (ret == -1 && errno == EINTR);
344 out_errno = errno;
345 }
346
347 if (debug_threads)
348 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
349 pid, flags, status ? *status : -1, ret);
350
351 errno = out_errno;
352 return ret;
353 }
354
355 /* Handle a GNU/Linux extended wait response. If we see a clone
356 event, we need to add the new LWP to our list (and not report the
357 trap to higher layers). */
358
359 static void
360 handle_extended_wait (struct lwp_info *event_child, int wstat)
361 {
362 int event = wstat >> 16;
363 struct lwp_info *new_lwp;
364
365 if (event == PTRACE_EVENT_CLONE)
366 {
367 ptid_t ptid;
368 unsigned long new_pid;
369 int ret, status = W_STOPCODE (SIGSTOP);
370
371 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
372
373 /* If we haven't already seen the new PID stop, wait for it now. */
374 if (! pull_pid_from_list (&stopped_pids, new_pid))
375 {
376 /* The new child has a pending SIGSTOP. We can't affect it until it
377 hits the SIGSTOP, but we're already attached. */
378
379 ret = my_waitpid (new_pid, &status, __WALL);
380
381 if (ret == -1)
382 perror_with_name ("waiting for new child");
383 else if (ret != new_pid)
384 warning ("wait returned unexpected PID %d", ret);
385 else if (!WIFSTOPPED (status))
386 warning ("wait returned unexpected status 0x%x", status);
387 }
388
389 ptrace (PTRACE_SETOPTIONS, new_pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
390
391 ptid = ptid_build (pid_of (event_child), new_pid, 0);
392 new_lwp = (struct lwp_info *) add_lwp (ptid);
393 add_thread (ptid, new_lwp);
394
395 /* Either we're going to immediately resume the new thread
396 or leave it stopped. linux_resume_one_lwp is a nop if it
397 thinks the thread is currently running, so set this first
398 before calling linux_resume_one_lwp. */
399 new_lwp->stopped = 1;
400
401 /* Normally we will get the pending SIGSTOP. But in some cases
402 we might get another signal delivered to the group first.
403 If we do get another signal, be sure not to lose it. */
404 if (WSTOPSIG (status) == SIGSTOP)
405 {
406 if (! stopping_threads)
407 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
408 }
409 else
410 {
411 new_lwp->stop_expected = 1;
412 if (stopping_threads)
413 {
414 new_lwp->status_pending_p = 1;
415 new_lwp->status_pending = status;
416 }
417 else
418 /* Pass the signal on. This is what GDB does - except
419 shouldn't we really report it instead? */
420 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
421 }
422
423 /* Always resume the current thread. If we are stopping
424 threads, it will have a pending SIGSTOP; we may as well
425 collect it now. */
426 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
427 }
428 }
429
430 /* This function should only be called if the process got a SIGTRAP.
431 The SIGTRAP could mean several things.
432
433 On i386, where decr_pc_after_break is non-zero:
434 If we were single-stepping this process using PTRACE_SINGLESTEP,
435 we will get only the one SIGTRAP (even if the instruction we
436 stepped over was a breakpoint). The value of $eip will be the
437 next instruction.
438 If we continue the process using PTRACE_CONT, we will get a
439 SIGTRAP when we hit a breakpoint. The value of $eip will be
440 the instruction after the breakpoint (i.e. needs to be
441 decremented). If we report the SIGTRAP to GDB, we must also
442 report the undecremented PC. If we cancel the SIGTRAP, we
443 must resume at the decremented PC.
444
445 (Presumably, not yet tested) On a non-decr_pc_after_break machine
446 with hardware or kernel single-step:
447 If we single-step over a breakpoint instruction, our PC will
448 point at the following instruction. If we continue and hit a
449 breakpoint instruction, our PC will point at the breakpoint
450 instruction. */
451
452 static CORE_ADDR
453 get_stop_pc (void)
454 {
455 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
456 CORE_ADDR stop_pc = (*the_low_target.get_pc) (regcache);
457
458 if (! get_thread_lwp (current_inferior)->stepping)
459 stop_pc -= the_low_target.decr_pc_after_break;
460
461 if (debug_threads)
462 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
463
464 return stop_pc;
465 }
466
467 static void *
468 add_lwp (ptid_t ptid)
469 {
470 struct lwp_info *lwp;
471
472 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
473 memset (lwp, 0, sizeof (*lwp));
474
475 lwp->head.id = ptid;
476
477 if (the_low_target.new_thread != NULL)
478 lwp->arch_private = the_low_target.new_thread ();
479
480 add_inferior_to_list (&all_lwps, &lwp->head);
481
482 return lwp;
483 }
484
485 /* Start an inferior process and returns its pid.
486 ALLARGS is a vector of program-name and args. */
487
488 static int
489 linux_create_inferior (char *program, char **allargs)
490 {
491 struct lwp_info *new_lwp;
492 int pid;
493 ptid_t ptid;
494
495 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
496 pid = vfork ();
497 #else
498 pid = fork ();
499 #endif
500 if (pid < 0)
501 perror_with_name ("fork");
502
503 if (pid == 0)
504 {
505 ptrace (PTRACE_TRACEME, 0, 0, 0);
506
507 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
508 signal (__SIGRTMIN + 1, SIG_DFL);
509 #endif
510
511 setpgid (0, 0);
512
513 execv (program, allargs);
514 if (errno == ENOENT)
515 execvp (program, allargs);
516
517 fprintf (stderr, "Cannot exec %s: %s.\n", program,
518 strerror (errno));
519 fflush (stderr);
520 _exit (0177);
521 }
522
523 linux_add_process (pid, 0);
524
525 ptid = ptid_build (pid, pid, 0);
526 new_lwp = add_lwp (ptid);
527 add_thread (ptid, new_lwp);
528 new_lwp->must_set_ptrace_flags = 1;
529
530 return pid;
531 }
532
533 /* Attach to an inferior process. */
534
535 static void
536 linux_attach_lwp_1 (unsigned long lwpid, int initial)
537 {
538 ptid_t ptid;
539 struct lwp_info *new_lwp;
540
541 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
542 {
543 if (!initial)
544 {
545 /* If we fail to attach to an LWP, just warn. */
546 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
547 strerror (errno), errno);
548 fflush (stderr);
549 return;
550 }
551 else
552 /* If we fail to attach to a process, report an error. */
553 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
554 strerror (errno), errno);
555 }
556
557 if (initial)
558 /* NOTE/FIXME: This lwp might have not been the tgid. */
559 ptid = ptid_build (lwpid, lwpid, 0);
560 else
561 {
562 /* Note that extracting the pid from the current inferior is
563 safe, since we're always called in the context of the same
564 process as this new thread. */
565 int pid = pid_of (get_thread_lwp (current_inferior));
566 ptid = ptid_build (pid, lwpid, 0);
567 }
568
569 new_lwp = (struct lwp_info *) add_lwp (ptid);
570 add_thread (ptid, new_lwp);
571
572 /* We need to wait for SIGSTOP before being able to make the next
573 ptrace call on this LWP. */
574 new_lwp->must_set_ptrace_flags = 1;
575
576 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
577 brings it to a halt.
578
579 There are several cases to consider here:
580
581 1) gdbserver has already attached to the process and is being notified
582 of a new thread that is being created.
583 In this case we should ignore that SIGSTOP and resume the process.
584 This is handled below by setting stop_expected = 1.
585
586 2) This is the first thread (the process thread), and we're attaching
587 to it via attach_inferior.
588 In this case we want the process thread to stop.
589 This is handled by having linux_attach clear stop_expected after
590 we return.
591 ??? If the process already has several threads we leave the other
592 threads running.
593
594 3) GDB is connecting to gdbserver and is requesting an enumeration of all
595 existing threads.
596 In this case we want the thread to stop.
597 FIXME: This case is currently not properly handled.
598 We should wait for the SIGSTOP but don't. Things work apparently
599 because enough time passes between when we ptrace (ATTACH) and when
600 gdb makes the next ptrace call on the thread.
601
602 On the other hand, if we are currently trying to stop all threads, we
603 should treat the new thread as if we had sent it a SIGSTOP. This works
604 because we are guaranteed that the add_lwp call above added us to the
605 end of the list, and so the new thread has not yet reached
606 wait_for_sigstop (but will). */
607 if (! stopping_threads)
608 new_lwp->stop_expected = 1;
609 }
610
611 void
612 linux_attach_lwp (unsigned long lwpid)
613 {
614 linux_attach_lwp_1 (lwpid, 0);
615 }
616
617 int
618 linux_attach (unsigned long pid)
619 {
620 struct lwp_info *lwp;
621
622 linux_attach_lwp_1 (pid, 1);
623
624 linux_add_process (pid, 1);
625
626 if (!non_stop)
627 {
628 /* Don't ignore the initial SIGSTOP if we just attached to this
629 process. It will be collected by wait shortly. */
630 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
631 ptid_build (pid, pid, 0));
632 lwp->stop_expected = 0;
633 }
634
635 return 0;
636 }
637
638 struct counter
639 {
640 int pid;
641 int count;
642 };
643
644 static int
645 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
646 {
647 struct counter *counter = args;
648
649 if (ptid_get_pid (entry->id) == counter->pid)
650 {
651 if (++counter->count > 1)
652 return 1;
653 }
654
655 return 0;
656 }
657
658 static int
659 last_thread_of_process_p (struct thread_info *thread)
660 {
661 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
662 int pid = ptid_get_pid (ptid);
663 struct counter counter = { pid , 0 };
664
665 return (find_inferior (&all_threads,
666 second_thread_of_pid_p, &counter) == NULL);
667 }
668
669 /* Kill the inferior lwp. */
670
671 static int
672 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
673 {
674 struct thread_info *thread = (struct thread_info *) entry;
675 struct lwp_info *lwp = get_thread_lwp (thread);
676 int wstat;
677 int pid = * (int *) args;
678
679 if (ptid_get_pid (entry->id) != pid)
680 return 0;
681
682 /* We avoid killing the first thread here, because of a Linux kernel (at
683 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
684 the children get a chance to be reaped, it will remain a zombie
685 forever. */
686
687 if (lwpid_of (lwp) == pid)
688 {
689 if (debug_threads)
690 fprintf (stderr, "lkop: is last of process %s\n",
691 target_pid_to_str (entry->id));
692 return 0;
693 }
694
695 /* If we're killing a running inferior, make sure it is stopped
696 first, as PTRACE_KILL will not work otherwise. */
697 if (!lwp->stopped)
698 send_sigstop (&lwp->head);
699
700 do
701 {
702 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
703
704 /* Make sure it died. The loop is most likely unnecessary. */
705 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
706 } while (pid > 0 && WIFSTOPPED (wstat));
707
708 return 0;
709 }
710
711 static int
712 linux_kill (int pid)
713 {
714 struct process_info *process;
715 struct lwp_info *lwp;
716 struct thread_info *thread;
717 int wstat;
718 int lwpid;
719
720 process = find_process_pid (pid);
721 if (process == NULL)
722 return -1;
723
724 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
725
726 /* See the comment in linux_kill_one_lwp. We did not kill the first
727 thread in the list, so do so now. */
728 lwp = find_lwp_pid (pid_to_ptid (pid));
729 thread = get_lwp_thread (lwp);
730
731 if (debug_threads)
732 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
733 lwpid_of (lwp), pid);
734
735 /* If we're killing a running inferior, make sure it is stopped
736 first, as PTRACE_KILL will not work otherwise. */
737 if (!lwp->stopped)
738 send_sigstop (&lwp->head);
739
740 do
741 {
742 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
743
744 /* Make sure it died. The loop is most likely unnecessary. */
745 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
746 } while (lwpid > 0 && WIFSTOPPED (wstat));
747
748 #ifdef USE_THREAD_DB
749 thread_db_free (process, 0);
750 #endif
751 delete_lwp (lwp);
752 linux_remove_process (process);
753 return 0;
754 }
755
756 static int
757 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
758 {
759 struct thread_info *thread = (struct thread_info *) entry;
760 struct lwp_info *lwp = get_thread_lwp (thread);
761 int pid = * (int *) args;
762
763 if (ptid_get_pid (entry->id) != pid)
764 return 0;
765
766 /* If we're detaching from a running inferior, make sure it is
767 stopped first, as PTRACE_DETACH will not work otherwise. */
768 if (!lwp->stopped)
769 {
770 int lwpid = lwpid_of (lwp);
771
772 stopping_threads = 1;
773 send_sigstop (&lwp->head);
774
775 /* If this detects a new thread through a clone event, the new
776 thread is appended to the end of the lwp list, so we'll
777 eventually detach from it. */
778 wait_for_sigstop (&lwp->head);
779 stopping_threads = 0;
780
781 /* If LWP exits while we're trying to stop it, there's nothing
782 left to do. */
783 lwp = find_lwp_pid (pid_to_ptid (lwpid));
784 if (lwp == NULL)
785 return 0;
786 }
787
788 /* Make sure the process isn't stopped at a breakpoint that's
789 no longer there. */
790 check_removed_breakpoint (lwp);
791
792 /* If this process is stopped but is expecting a SIGSTOP, then make
793 sure we take care of that now. This isn't absolutely guaranteed
794 to collect the SIGSTOP, but is fairly likely to. */
795 if (lwp->stop_expected)
796 {
797 int wstat;
798 /* Clear stop_expected, so that the SIGSTOP will be reported. */
799 lwp->stop_expected = 0;
800 if (lwp->stopped)
801 linux_resume_one_lwp (lwp, 0, 0, NULL);
802 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
803 }
804
805 /* Flush any pending changes to the process's registers. */
806 regcache_invalidate_one ((struct inferior_list_entry *)
807 get_lwp_thread (lwp));
808
809 /* Finally, let it resume. */
810 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
811
812 delete_lwp (lwp);
813 return 0;
814 }
815
816 static int
817 any_thread_of (struct inferior_list_entry *entry, void *args)
818 {
819 int *pid_p = args;
820
821 if (ptid_get_pid (entry->id) == *pid_p)
822 return 1;
823
824 return 0;
825 }
826
827 static int
828 linux_detach (int pid)
829 {
830 struct process_info *process;
831
832 process = find_process_pid (pid);
833 if (process == NULL)
834 return -1;
835
836 #ifdef USE_THREAD_DB
837 thread_db_free (process, 1);
838 #endif
839
840 current_inferior =
841 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
842
843 delete_all_breakpoints ();
844 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
845 linux_remove_process (process);
846 return 0;
847 }
848
849 static void
850 linux_join (int pid)
851 {
852 int status, ret;
853 struct process_info *process;
854
855 process = find_process_pid (pid);
856 if (process == NULL)
857 return;
858
859 do {
860 ret = my_waitpid (pid, &status, 0);
861 if (WIFEXITED (status) || WIFSIGNALED (status))
862 break;
863 } while (ret != -1 || errno != ECHILD);
864 }
865
866 /* Return nonzero if the given thread is still alive. */
867 static int
868 linux_thread_alive (ptid_t ptid)
869 {
870 struct lwp_info *lwp = find_lwp_pid (ptid);
871
872 /* We assume we always know if a thread exits. If a whole process
873 exited but we still haven't been able to report it to GDB, we'll
874 hold on to the last lwp of the dead process. */
875 if (lwp != NULL)
876 return !lwp->dead;
877 else
878 return 0;
879 }
880
881 /* Return nonzero if this process stopped at a breakpoint which
882 no longer appears to be inserted. Also adjust the PC
883 appropriately to resume where the breakpoint used to be. */
884 static int
885 check_removed_breakpoint (struct lwp_info *event_child)
886 {
887 CORE_ADDR stop_pc;
888 struct thread_info *saved_inferior;
889 struct regcache *regcache;
890
891 if (event_child->pending_is_breakpoint == 0)
892 return 0;
893
894 if (debug_threads)
895 fprintf (stderr, "Checking for breakpoint in lwp %ld.\n",
896 lwpid_of (event_child));
897
898 saved_inferior = current_inferior;
899 current_inferior = get_lwp_thread (event_child);
900 regcache = get_thread_regcache (current_inferior, 1);
901 stop_pc = get_stop_pc ();
902
903 /* If the PC has changed since we stopped, then we shouldn't do
904 anything. This happens if, for instance, GDB handled the
905 decr_pc_after_break subtraction itself. */
906 if (stop_pc != event_child->pending_stop_pc)
907 {
908 if (debug_threads)
909 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
910 event_child->pending_stop_pc);
911
912 event_child->pending_is_breakpoint = 0;
913 current_inferior = saved_inferior;
914 return 0;
915 }
916
917 /* If the breakpoint is still there, we will report hitting it. */
918 if ((*the_low_target.breakpoint_at) (stop_pc))
919 {
920 if (debug_threads)
921 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
922 current_inferior = saved_inferior;
923 return 0;
924 }
925
926 if (debug_threads)
927 fprintf (stderr, "Removed breakpoint.\n");
928
929 /* For decr_pc_after_break targets, here is where we perform the
930 decrement. We go immediately from this function to resuming,
931 and can not safely call get_stop_pc () again. */
932 if (the_low_target.set_pc != NULL)
933 {
934 if (debug_threads)
935 fprintf (stderr, "Set pc to 0x%lx\n", (long) stop_pc);
936 (*the_low_target.set_pc) (regcache, stop_pc);
937 }
938
939 /* We consumed the pending SIGTRAP. */
940 event_child->pending_is_breakpoint = 0;
941 event_child->status_pending_p = 0;
942 event_child->status_pending = 0;
943
944 current_inferior = saved_inferior;
945 return 1;
946 }
947
948 /* Return 1 if this lwp has an interesting status pending. This
949 function may silently resume an inferior lwp. */
950 static int
951 status_pending_p (struct inferior_list_entry *entry, void *arg)
952 {
953 struct lwp_info *lwp = (struct lwp_info *) entry;
954 ptid_t ptid = * (ptid_t *) arg;
955
956 /* Check if we're only interested in events from a specific process
957 or its lwps. */
958 if (!ptid_equal (minus_one_ptid, ptid)
959 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
960 return 0;
961
962 if (lwp->status_pending_p && !lwp->suspended)
963 if (check_removed_breakpoint (lwp))
964 {
965 /* This thread was stopped at a breakpoint, and the breakpoint
966 is now gone. We were told to continue (or step...) all threads,
967 so GDB isn't trying to single-step past this breakpoint.
968 So instead of reporting the old SIGTRAP, pretend we got to
969 the breakpoint just after it was removed instead of just
970 before; resume the process. */
971 linux_resume_one_lwp (lwp, 0, 0, NULL);
972 return 0;
973 }
974
975 return (lwp->status_pending_p && !lwp->suspended);
976 }
977
978 static int
979 same_lwp (struct inferior_list_entry *entry, void *data)
980 {
981 ptid_t ptid = *(ptid_t *) data;
982 int lwp;
983
984 if (ptid_get_lwp (ptid) != 0)
985 lwp = ptid_get_lwp (ptid);
986 else
987 lwp = ptid_get_pid (ptid);
988
989 if (ptid_get_lwp (entry->id) == lwp)
990 return 1;
991
992 return 0;
993 }
994
995 struct lwp_info *
996 find_lwp_pid (ptid_t ptid)
997 {
998 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
999 }
1000
1001 static struct lwp_info *
1002 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1003 {
1004 int ret;
1005 int to_wait_for = -1;
1006 struct lwp_info *child = NULL;
1007
1008 if (debug_threads)
1009 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1010
1011 if (ptid_equal (ptid, minus_one_ptid))
1012 to_wait_for = -1; /* any child */
1013 else
1014 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1015
1016 options |= __WALL;
1017
1018 retry:
1019
1020 ret = my_waitpid (to_wait_for, wstatp, options);
1021 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1022 return NULL;
1023 else if (ret == -1)
1024 perror_with_name ("waitpid");
1025
1026 if (debug_threads
1027 && (!WIFSTOPPED (*wstatp)
1028 || (WSTOPSIG (*wstatp) != 32
1029 && WSTOPSIG (*wstatp) != 33)))
1030 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1031
1032 child = find_lwp_pid (pid_to_ptid (ret));
1033
1034 /* If we didn't find a process, one of two things presumably happened:
1035 - A process we started and then detached from has exited. Ignore it.
1036 - A process we are controlling has forked and the new child's stop
1037 was reported to us by the kernel. Save its PID. */
1038 if (child == NULL && WIFSTOPPED (*wstatp))
1039 {
1040 add_pid_to_list (&stopped_pids, ret);
1041 goto retry;
1042 }
1043 else if (child == NULL)
1044 goto retry;
1045
1046 child->stopped = 1;
1047 child->pending_is_breakpoint = 0;
1048
1049 child->last_status = *wstatp;
1050
1051 /* Architecture-specific setup after inferior is running.
1052 This needs to happen after we have attached to the inferior
1053 and it is stopped for the first time, but before we access
1054 any inferior registers. */
1055 if (new_inferior)
1056 {
1057 the_low_target.arch_setup ();
1058 #ifdef HAVE_LINUX_REGSETS
1059 memset (disabled_regsets, 0, num_regsets);
1060 #endif
1061 new_inferior = 0;
1062 }
1063
1064 if (debug_threads
1065 && WIFSTOPPED (*wstatp)
1066 && the_low_target.get_pc != NULL)
1067 {
1068 struct thread_info *saved_inferior = current_inferior;
1069 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
1070 CORE_ADDR pc;
1071
1072 current_inferior = (struct thread_info *)
1073 find_inferior_id (&all_threads, child->head.id);
1074 pc = (*the_low_target.get_pc) (regcache);
1075 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1076 current_inferior = saved_inferior;
1077 }
1078
1079 return child;
1080 }
1081
1082 /* Wait for an event from child PID. If PID is -1, wait for any
1083 child. Store the stop status through the status pointer WSTAT.
1084 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1085 event was found and OPTIONS contains WNOHANG. Return the PID of
1086 the stopped child otherwise. */
1087
1088 static int
1089 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1090 {
1091 CORE_ADDR stop_pc;
1092 struct lwp_info *event_child = NULL;
1093 int bp_status;
1094 struct lwp_info *requested_child = NULL;
1095
1096 /* Check for a lwp with a pending status. */
1097 /* It is possible that the user changed the pending task's registers since
1098 it stopped. We correctly handle the change of PC if we hit a breakpoint
1099 (in check_removed_breakpoint); signals should be reported anyway. */
1100
1101 if (ptid_equal (ptid, minus_one_ptid)
1102 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1103 {
1104 event_child = (struct lwp_info *)
1105 find_inferior (&all_lwps, status_pending_p, &ptid);
1106 if (debug_threads && event_child)
1107 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1108 }
1109 else
1110 {
1111 requested_child = find_lwp_pid (ptid);
1112 if (requested_child->status_pending_p
1113 && !check_removed_breakpoint (requested_child))
1114 event_child = requested_child;
1115 }
1116
1117 if (event_child != NULL)
1118 {
1119 if (debug_threads)
1120 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1121 lwpid_of (event_child), event_child->status_pending);
1122 *wstat = event_child->status_pending;
1123 event_child->status_pending_p = 0;
1124 event_child->status_pending = 0;
1125 current_inferior = get_lwp_thread (event_child);
1126 return lwpid_of (event_child);
1127 }
1128
1129 /* We only enter this loop if no process has a pending wait status. Thus
1130 any action taken in response to a wait status inside this loop is
1131 responding as soon as we detect the status, not after any pending
1132 events. */
1133 while (1)
1134 {
1135 event_child = linux_wait_for_lwp (ptid, wstat, options);
1136
1137 if ((options & WNOHANG) && event_child == NULL)
1138 return 0;
1139
1140 if (event_child == NULL)
1141 error ("event from unknown child");
1142
1143 current_inferior = get_lwp_thread (event_child);
1144
1145 /* Check for thread exit. */
1146 if (! WIFSTOPPED (*wstat))
1147 {
1148 if (debug_threads)
1149 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1150
1151 /* If the last thread is exiting, just return. */
1152 if (last_thread_of_process_p (current_inferior))
1153 {
1154 if (debug_threads)
1155 fprintf (stderr, "LWP %ld is last lwp of process\n",
1156 lwpid_of (event_child));
1157 return lwpid_of (event_child);
1158 }
1159
1160 delete_lwp (event_child);
1161
1162 if (!non_stop)
1163 {
1164 current_inferior = (struct thread_info *) all_threads.head;
1165 if (debug_threads)
1166 fprintf (stderr, "Current inferior is now %ld\n",
1167 lwpid_of (get_thread_lwp (current_inferior)));
1168 }
1169 else
1170 {
1171 current_inferior = NULL;
1172 if (debug_threads)
1173 fprintf (stderr, "Current inferior is now <NULL>\n");
1174 }
1175
1176 /* If we were waiting for this particular child to do something...
1177 well, it did something. */
1178 if (requested_child != NULL)
1179 return lwpid_of (event_child);
1180
1181 /* Wait for a more interesting event. */
1182 continue;
1183 }
1184
1185 if (event_child->must_set_ptrace_flags)
1186 {
1187 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1188 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
1189 event_child->must_set_ptrace_flags = 0;
1190 }
1191
1192 if (WIFSTOPPED (*wstat)
1193 && WSTOPSIG (*wstat) == SIGSTOP
1194 && event_child->stop_expected)
1195 {
1196 if (debug_threads)
1197 fprintf (stderr, "Expected stop.\n");
1198 event_child->stop_expected = 0;
1199 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
1200 continue;
1201 }
1202
1203 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1204 && *wstat >> 16 != 0)
1205 {
1206 handle_extended_wait (event_child, *wstat);
1207 continue;
1208 }
1209
1210 /* If GDB is not interested in this signal, don't stop other
1211 threads, and don't report it to GDB. Just resume the
1212 inferior right away. We do this for threading-related
1213 signals as well as any that GDB specifically requested we
1214 ignore. But never ignore SIGSTOP if we sent it ourselves,
1215 and do not ignore signals when stepping - they may require
1216 special handling to skip the signal handler. */
1217 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1218 thread library? */
1219 if (WIFSTOPPED (*wstat)
1220 && !event_child->stepping
1221 && (
1222 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1223 (current_process ()->private->thread_db != NULL
1224 && (WSTOPSIG (*wstat) == __SIGRTMIN
1225 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1226 ||
1227 #endif
1228 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1229 && (WSTOPSIG (*wstat) != SIGSTOP || !stopping_threads))))
1230 {
1231 siginfo_t info, *info_p;
1232
1233 if (debug_threads)
1234 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1235 WSTOPSIG (*wstat), lwpid_of (event_child));
1236
1237 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1238 info_p = &info;
1239 else
1240 info_p = NULL;
1241 linux_resume_one_lwp (event_child,
1242 event_child->stepping,
1243 WSTOPSIG (*wstat), info_p);
1244 continue;
1245 }
1246
1247 /* If this event was not handled above, and is not a SIGTRAP, report
1248 it. */
1249 if (!WIFSTOPPED (*wstat) || WSTOPSIG (*wstat) != SIGTRAP)
1250 return lwpid_of (event_child);
1251
1252 /* If this target does not support breakpoints, we simply report the
1253 SIGTRAP; it's of no concern to us. */
1254 if (the_low_target.get_pc == NULL)
1255 return lwpid_of (event_child);
1256
1257 stop_pc = get_stop_pc ();
1258
1259 /* bp_reinsert will only be set if we were single-stepping.
1260 Notice that we will resume the process after hitting
1261 a gdbserver breakpoint; single-stepping to/over one
1262 is not supported (yet). */
1263 if (event_child->bp_reinsert != 0)
1264 {
1265 if (debug_threads)
1266 fprintf (stderr, "Reinserted breakpoint.\n");
1267 reinsert_breakpoint (event_child->bp_reinsert);
1268 event_child->bp_reinsert = 0;
1269
1270 /* Clear the single-stepping flag and SIGTRAP as we resume. */
1271 linux_resume_one_lwp (event_child, 0, 0, NULL);
1272 continue;
1273 }
1274
1275 bp_status = check_breakpoints (stop_pc);
1276
1277 if (bp_status != 0)
1278 {
1279 if (debug_threads)
1280 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1281
1282 /* We hit one of our own breakpoints. We mark it as a pending
1283 breakpoint, so that check_removed_breakpoint () will do the PC
1284 adjustment for us at the appropriate time. */
1285 event_child->pending_is_breakpoint = 1;
1286 event_child->pending_stop_pc = stop_pc;
1287
1288 /* We may need to put the breakpoint back. We continue in the event
1289 loop instead of simply replacing the breakpoint right away,
1290 in order to not lose signals sent to the thread that hit the
1291 breakpoint. Unfortunately this increases the window where another
1292 thread could sneak past the removed breakpoint. For the current
1293 use of server-side breakpoints (thread creation) this is
1294 acceptable; but it needs to be considered before this breakpoint
1295 mechanism can be used in more general ways. For some breakpoints
1296 it may be necessary to stop all other threads, but that should
1297 be avoided where possible.
1298
1299 If breakpoint_reinsert_addr is NULL, that means that we can
1300 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
1301 mark it for reinsertion, and single-step.
1302
1303 Otherwise, call the target function to figure out where we need
1304 our temporary breakpoint, create it, and continue executing this
1305 process. */
1306
1307 /* NOTE: we're lifting breakpoints in non-stop mode. This
1308 is currently only used for thread event breakpoints, so
1309 it isn't that bad as long as we have PTRACE_EVENT_CLONE
1310 events. */
1311 if (bp_status == 2)
1312 /* No need to reinsert. */
1313 linux_resume_one_lwp (event_child, 0, 0, NULL);
1314 else if (the_low_target.breakpoint_reinsert_addr == NULL)
1315 {
1316 event_child->bp_reinsert = stop_pc;
1317 uninsert_breakpoint (stop_pc);
1318 linux_resume_one_lwp (event_child, 1, 0, NULL);
1319 }
1320 else
1321 {
1322 reinsert_breakpoint_by_bp
1323 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
1324 linux_resume_one_lwp (event_child, 0, 0, NULL);
1325 }
1326
1327 continue;
1328 }
1329
1330 if (debug_threads)
1331 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
1332
1333 /* If we were single-stepping, we definitely want to report the
1334 SIGTRAP. Although the single-step operation has completed,
1335 do not clear clear the stepping flag yet; we need to check it
1336 in wait_for_sigstop. */
1337 if (event_child->stepping)
1338 return lwpid_of (event_child);
1339
1340 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
1341 Check if it is a breakpoint, and if so mark the process information
1342 accordingly. This will handle both the necessary fiddling with the
1343 PC on decr_pc_after_break targets and suppressing extra threads
1344 hitting a breakpoint if two hit it at once and then GDB removes it
1345 after the first is reported. Arguably it would be better to report
1346 multiple threads hitting breakpoints simultaneously, but the current
1347 remote protocol does not allow this. */
1348 if ((*the_low_target.breakpoint_at) (stop_pc))
1349 {
1350 event_child->pending_is_breakpoint = 1;
1351 event_child->pending_stop_pc = stop_pc;
1352 }
1353
1354 return lwpid_of (event_child);
1355 }
1356
1357 /* NOTREACHED */
1358 return 0;
1359 }
1360
1361 static int
1362 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1363 {
1364 ptid_t wait_ptid;
1365
1366 if (ptid_is_pid (ptid))
1367 {
1368 /* A request to wait for a specific tgid. This is not possible
1369 with waitpid, so instead, we wait for any child, and leave
1370 children we're not interested in right now with a pending
1371 status to report later. */
1372 wait_ptid = minus_one_ptid;
1373 }
1374 else
1375 wait_ptid = ptid;
1376
1377 while (1)
1378 {
1379 int event_pid;
1380
1381 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1382
1383 if (event_pid > 0
1384 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1385 {
1386 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1387
1388 if (! WIFSTOPPED (*wstat))
1389 mark_lwp_dead (event_child, *wstat);
1390 else
1391 {
1392 event_child->status_pending_p = 1;
1393 event_child->status_pending = *wstat;
1394 }
1395 }
1396 else
1397 return event_pid;
1398 }
1399 }
1400
1401 /* Wait for process, returns status. */
1402
1403 static ptid_t
1404 linux_wait_1 (ptid_t ptid,
1405 struct target_waitstatus *ourstatus, int target_options)
1406 {
1407 int w;
1408 struct thread_info *thread = NULL;
1409 struct lwp_info *lwp = NULL;
1410 int options;
1411 int pid;
1412
1413 /* Translate generic target options into linux options. */
1414 options = __WALL;
1415 if (target_options & TARGET_WNOHANG)
1416 options |= WNOHANG;
1417
1418 retry:
1419 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1420
1421 /* If we were only supposed to resume one thread, only wait for
1422 that thread - if it's still alive. If it died, however - which
1423 can happen if we're coming from the thread death case below -
1424 then we need to make sure we restart the other threads. We could
1425 pick a thread at random or restart all; restarting all is less
1426 arbitrary. */
1427 if (!non_stop
1428 && !ptid_equal (cont_thread, null_ptid)
1429 && !ptid_equal (cont_thread, minus_one_ptid))
1430 {
1431 thread = (struct thread_info *) find_inferior_id (&all_threads,
1432 cont_thread);
1433
1434 /* No stepping, no signal - unless one is pending already, of course. */
1435 if (thread == NULL)
1436 {
1437 struct thread_resume resume_info;
1438 resume_info.thread = minus_one_ptid;
1439 resume_info.kind = resume_continue;
1440 resume_info.sig = 0;
1441 linux_resume (&resume_info, 1);
1442 }
1443 else
1444 ptid = cont_thread;
1445 }
1446
1447 pid = linux_wait_for_event (ptid, &w, options);
1448 if (pid == 0) /* only if TARGET_WNOHANG */
1449 return null_ptid;
1450
1451 lwp = get_thread_lwp (current_inferior);
1452
1453 /* If we are waiting for a particular child, and it exited,
1454 linux_wait_for_event will return its exit status. Similarly if
1455 the last child exited. If this is not the last child, however,
1456 do not report it as exited until there is a 'thread exited' response
1457 available in the remote protocol. Instead, just wait for another event.
1458 This should be safe, because if the thread crashed we will already
1459 have reported the termination signal to GDB; that should stop any
1460 in-progress stepping operations, etc.
1461
1462 Report the exit status of the last thread to exit. This matches
1463 LinuxThreads' behavior. */
1464
1465 if (last_thread_of_process_p (current_inferior))
1466 {
1467 if (WIFEXITED (w) || WIFSIGNALED (w))
1468 {
1469 int pid = pid_of (lwp);
1470 struct process_info *process = find_process_pid (pid);
1471
1472 #ifdef USE_THREAD_DB
1473 thread_db_free (process, 0);
1474 #endif
1475 delete_lwp (lwp);
1476 linux_remove_process (process);
1477
1478 current_inferior = NULL;
1479
1480 if (WIFEXITED (w))
1481 {
1482 ourstatus->kind = TARGET_WAITKIND_EXITED;
1483 ourstatus->value.integer = WEXITSTATUS (w);
1484
1485 if (debug_threads)
1486 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1487 }
1488 else
1489 {
1490 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1491 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1492
1493 if (debug_threads)
1494 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1495
1496 }
1497
1498 return pid_to_ptid (pid);
1499 }
1500 }
1501 else
1502 {
1503 if (!WIFSTOPPED (w))
1504 goto retry;
1505 }
1506
1507 /* In all-stop, stop all threads. Be careful to only do this if
1508 we're about to report an event to GDB. */
1509 if (!non_stop)
1510 stop_all_lwps ();
1511
1512 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1513
1514 if (lwp->suspended && WSTOPSIG (w) == SIGSTOP)
1515 {
1516 /* A thread that has been requested to stop by GDB with vCont;t,
1517 and it stopped cleanly, so report as SIG0. The use of
1518 SIGSTOP is an implementation detail. */
1519 ourstatus->value.sig = TARGET_SIGNAL_0;
1520 }
1521 else if (lwp->suspended && WSTOPSIG (w) != SIGSTOP)
1522 {
1523 /* A thread that has been requested to stop by GDB with vCont;t,
1524 but, it stopped for other reasons. Set stop_expected so the
1525 pending SIGSTOP is ignored and the LWP is resumed. */
1526 lwp->stop_expected = 1;
1527 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1528 }
1529 else
1530 {
1531 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1532 }
1533
1534 if (debug_threads)
1535 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1536 target_pid_to_str (lwp->head.id),
1537 ourstatus->kind,
1538 ourstatus->value.sig);
1539
1540 return lwp->head.id;
1541 }
1542
1543 /* Get rid of any pending event in the pipe. */
1544 static void
1545 async_file_flush (void)
1546 {
1547 int ret;
1548 char buf;
1549
1550 do
1551 ret = read (linux_event_pipe[0], &buf, 1);
1552 while (ret >= 0 || (ret == -1 && errno == EINTR));
1553 }
1554
1555 /* Put something in the pipe, so the event loop wakes up. */
1556 static void
1557 async_file_mark (void)
1558 {
1559 int ret;
1560
1561 async_file_flush ();
1562
1563 do
1564 ret = write (linux_event_pipe[1], "+", 1);
1565 while (ret == 0 || (ret == -1 && errno == EINTR));
1566
1567 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1568 be awakened anyway. */
1569 }
1570
1571 static ptid_t
1572 linux_wait (ptid_t ptid,
1573 struct target_waitstatus *ourstatus, int target_options)
1574 {
1575 ptid_t event_ptid;
1576
1577 if (debug_threads)
1578 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1579
1580 /* Flush the async file first. */
1581 if (target_is_async_p ())
1582 async_file_flush ();
1583
1584 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1585
1586 /* If at least one stop was reported, there may be more. A single
1587 SIGCHLD can signal more than one child stop. */
1588 if (target_is_async_p ()
1589 && (target_options & TARGET_WNOHANG) != 0
1590 && !ptid_equal (event_ptid, null_ptid))
1591 async_file_mark ();
1592
1593 return event_ptid;
1594 }
1595
1596 /* Send a signal to an LWP. */
1597
1598 static int
1599 kill_lwp (unsigned long lwpid, int signo)
1600 {
1601 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1602 fails, then we are not using nptl threads and we should be using kill. */
1603
1604 #ifdef __NR_tkill
1605 {
1606 static int tkill_failed;
1607
1608 if (!tkill_failed)
1609 {
1610 int ret;
1611
1612 errno = 0;
1613 ret = syscall (__NR_tkill, lwpid, signo);
1614 if (errno != ENOSYS)
1615 return ret;
1616 tkill_failed = 1;
1617 }
1618 }
1619 #endif
1620
1621 return kill (lwpid, signo);
1622 }
1623
1624 static void
1625 send_sigstop (struct inferior_list_entry *entry)
1626 {
1627 struct lwp_info *lwp = (struct lwp_info *) entry;
1628 int pid;
1629
1630 if (lwp->stopped)
1631 return;
1632
1633 pid = lwpid_of (lwp);
1634
1635 /* If we already have a pending stop signal for this process, don't
1636 send another. */
1637 if (lwp->stop_expected)
1638 {
1639 if (debug_threads)
1640 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
1641
1642 /* We clear the stop_expected flag so that wait_for_sigstop
1643 will receive the SIGSTOP event (instead of silently resuming and
1644 waiting again). It'll be reset below. */
1645 lwp->stop_expected = 0;
1646 return;
1647 }
1648
1649 if (debug_threads)
1650 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
1651
1652 kill_lwp (pid, SIGSTOP);
1653 }
1654
1655 static void
1656 mark_lwp_dead (struct lwp_info *lwp, int wstat)
1657 {
1658 /* It's dead, really. */
1659 lwp->dead = 1;
1660
1661 /* Store the exit status for later. */
1662 lwp->status_pending_p = 1;
1663 lwp->status_pending = wstat;
1664
1665 /* So that check_removed_breakpoint doesn't try to figure out if
1666 this is stopped at a breakpoint. */
1667 lwp->pending_is_breakpoint = 0;
1668
1669 /* Prevent trying to stop it. */
1670 lwp->stopped = 1;
1671
1672 /* No further stops are expected from a dead lwp. */
1673 lwp->stop_expected = 0;
1674 }
1675
1676 static void
1677 wait_for_sigstop (struct inferior_list_entry *entry)
1678 {
1679 struct lwp_info *lwp = (struct lwp_info *) entry;
1680 struct thread_info *saved_inferior;
1681 int wstat;
1682 ptid_t saved_tid;
1683 ptid_t ptid;
1684
1685 if (lwp->stopped)
1686 return;
1687
1688 saved_inferior = current_inferior;
1689 if (saved_inferior != NULL)
1690 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1691 else
1692 saved_tid = null_ptid; /* avoid bogus unused warning */
1693
1694 ptid = lwp->head.id;
1695
1696 linux_wait_for_event (ptid, &wstat, __WALL);
1697
1698 /* If we stopped with a non-SIGSTOP signal, save it for later
1699 and record the pending SIGSTOP. If the process exited, just
1700 return. */
1701 if (WIFSTOPPED (wstat)
1702 && WSTOPSIG (wstat) != SIGSTOP)
1703 {
1704 if (debug_threads)
1705 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1706 lwpid_of (lwp), wstat);
1707
1708 /* Do not leave a pending single-step finish to be reported to
1709 the client. The client will give us a new action for this
1710 thread, possibly a continue request --- otherwise, the client
1711 would consider this pending SIGTRAP reported later a spurious
1712 signal. */
1713 if (WSTOPSIG (wstat) == SIGTRAP
1714 && lwp->stepping
1715 && !linux_stopped_by_watchpoint ())
1716 {
1717 if (debug_threads)
1718 fprintf (stderr, " single-step SIGTRAP ignored\n");
1719 }
1720 else
1721 {
1722 lwp->status_pending_p = 1;
1723 lwp->status_pending = wstat;
1724 }
1725 lwp->stop_expected = 1;
1726 }
1727 else if (!WIFSTOPPED (wstat))
1728 {
1729 if (debug_threads)
1730 fprintf (stderr, "Process %ld exited while stopping LWPs\n",
1731 lwpid_of (lwp));
1732
1733 /* Leave this status pending for the next time we're able to
1734 report it. In the mean time, we'll report this lwp as dead
1735 to GDB, so GDB doesn't try to read registers and memory from
1736 it. */
1737 mark_lwp_dead (lwp, wstat);
1738 }
1739
1740 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
1741 current_inferior = saved_inferior;
1742 else
1743 {
1744 if (debug_threads)
1745 fprintf (stderr, "Previously current thread died.\n");
1746
1747 if (non_stop)
1748 {
1749 /* We can't change the current inferior behind GDB's back,
1750 otherwise, a subsequent command may apply to the wrong
1751 process. */
1752 current_inferior = NULL;
1753 }
1754 else
1755 {
1756 /* Set a valid thread as current. */
1757 set_desired_inferior (0);
1758 }
1759 }
1760 }
1761
1762 static void
1763 stop_all_lwps (void)
1764 {
1765 stopping_threads = 1;
1766 for_each_inferior (&all_lwps, send_sigstop);
1767 for_each_inferior (&all_lwps, wait_for_sigstop);
1768 stopping_threads = 0;
1769 }
1770
1771 /* Resume execution of the inferior process.
1772 If STEP is nonzero, single-step it.
1773 If SIGNAL is nonzero, give it that signal. */
1774
1775 static void
1776 linux_resume_one_lwp (struct lwp_info *lwp,
1777 int step, int signal, siginfo_t *info)
1778 {
1779 struct thread_info *saved_inferior;
1780
1781 if (lwp->stopped == 0)
1782 return;
1783
1784 /* If we have pending signals or status, and a new signal, enqueue the
1785 signal. Also enqueue the signal if we are waiting to reinsert a
1786 breakpoint; it will be picked up again below. */
1787 if (signal != 0
1788 && (lwp->status_pending_p || lwp->pending_signals != NULL
1789 || lwp->bp_reinsert != 0))
1790 {
1791 struct pending_signals *p_sig;
1792 p_sig = xmalloc (sizeof (*p_sig));
1793 p_sig->prev = lwp->pending_signals;
1794 p_sig->signal = signal;
1795 if (info == NULL)
1796 memset (&p_sig->info, 0, sizeof (siginfo_t));
1797 else
1798 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1799 lwp->pending_signals = p_sig;
1800 }
1801
1802 if (lwp->status_pending_p && !check_removed_breakpoint (lwp))
1803 return;
1804
1805 saved_inferior = current_inferior;
1806 current_inferior = get_lwp_thread (lwp);
1807
1808 if (debug_threads)
1809 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
1810 lwpid_of (lwp), step ? "step" : "continue", signal,
1811 lwp->stop_expected ? "expected" : "not expected");
1812
1813 /* This bit needs some thinking about. If we get a signal that
1814 we must report while a single-step reinsert is still pending,
1815 we often end up resuming the thread. It might be better to
1816 (ew) allow a stack of pending events; then we could be sure that
1817 the reinsert happened right away and not lose any signals.
1818
1819 Making this stack would also shrink the window in which breakpoints are
1820 uninserted (see comment in linux_wait_for_lwp) but not enough for
1821 complete correctness, so it won't solve that problem. It may be
1822 worthwhile just to solve this one, however. */
1823 if (lwp->bp_reinsert != 0)
1824 {
1825 if (debug_threads)
1826 fprintf (stderr, " pending reinsert at %08lx", (long)lwp->bp_reinsert);
1827 if (step == 0)
1828 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1829 step = 1;
1830
1831 /* Postpone any pending signal. It was enqueued above. */
1832 signal = 0;
1833 }
1834
1835 check_removed_breakpoint (lwp);
1836
1837 if (debug_threads && the_low_target.get_pc != NULL)
1838 {
1839 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
1840 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
1841 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
1842 }
1843
1844 /* If we have pending signals, consume one unless we are trying to reinsert
1845 a breakpoint. */
1846 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
1847 {
1848 struct pending_signals **p_sig;
1849
1850 p_sig = &lwp->pending_signals;
1851 while ((*p_sig)->prev != NULL)
1852 p_sig = &(*p_sig)->prev;
1853
1854 signal = (*p_sig)->signal;
1855 if ((*p_sig)->info.si_signo != 0)
1856 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1857
1858 free (*p_sig);
1859 *p_sig = NULL;
1860 }
1861
1862 if (the_low_target.prepare_to_resume != NULL)
1863 the_low_target.prepare_to_resume (lwp);
1864
1865 regcache_invalidate_one ((struct inferior_list_entry *)
1866 get_lwp_thread (lwp));
1867 errno = 0;
1868 lwp->stopped = 0;
1869 lwp->stepping = step;
1870 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
1871 /* Coerce to a uintptr_t first to avoid potential gcc warning
1872 of coercing an 8 byte integer to a 4 byte pointer. */
1873 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
1874
1875 current_inferior = saved_inferior;
1876 if (errno)
1877 {
1878 /* ESRCH from ptrace either means that the thread was already
1879 running (an error) or that it is gone (a race condition). If
1880 it's gone, we will get a notification the next time we wait,
1881 so we can ignore the error. We could differentiate these
1882 two, but it's tricky without waiting; the thread still exists
1883 as a zombie, so sending it signal 0 would succeed. So just
1884 ignore ESRCH. */
1885 if (errno == ESRCH)
1886 return;
1887
1888 perror_with_name ("ptrace");
1889 }
1890 }
1891
1892 struct thread_resume_array
1893 {
1894 struct thread_resume *resume;
1895 size_t n;
1896 };
1897
1898 /* This function is called once per thread. We look up the thread
1899 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1900 resume request.
1901
1902 This algorithm is O(threads * resume elements), but resume elements
1903 is small (and will remain small at least until GDB supports thread
1904 suspension). */
1905 static int
1906 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
1907 {
1908 struct lwp_info *lwp;
1909 struct thread_info *thread;
1910 int ndx;
1911 struct thread_resume_array *r;
1912
1913 thread = (struct thread_info *) entry;
1914 lwp = get_thread_lwp (thread);
1915 r = arg;
1916
1917 for (ndx = 0; ndx < r->n; ndx++)
1918 {
1919 ptid_t ptid = r->resume[ndx].thread;
1920 if (ptid_equal (ptid, minus_one_ptid)
1921 || ptid_equal (ptid, entry->id)
1922 || (ptid_is_pid (ptid)
1923 && (ptid_get_pid (ptid) == pid_of (lwp)))
1924 || (ptid_get_lwp (ptid) == -1
1925 && (ptid_get_pid (ptid) == pid_of (lwp))))
1926 {
1927 lwp->resume = &r->resume[ndx];
1928 return 0;
1929 }
1930 }
1931
1932 /* No resume action for this thread. */
1933 lwp->resume = NULL;
1934
1935 return 0;
1936 }
1937
1938
1939 /* Set *FLAG_P if this lwp has an interesting status pending. */
1940 static int
1941 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1942 {
1943 struct lwp_info *lwp = (struct lwp_info *) entry;
1944
1945 /* LWPs which will not be resumed are not interesting, because
1946 we might not wait for them next time through linux_wait. */
1947 if (lwp->resume == NULL)
1948 return 0;
1949
1950 /* If this thread has a removed breakpoint, we won't have any
1951 events to report later, so check now. check_removed_breakpoint
1952 may clear status_pending_p. We avoid calling check_removed_breakpoint
1953 for any thread that we are not otherwise going to resume - this
1954 lets us preserve stopped status when two threads hit a breakpoint.
1955 GDB removes the breakpoint to single-step a particular thread
1956 past it, then re-inserts it and resumes all threads. We want
1957 to report the second thread without resuming it in the interim. */
1958 if (lwp->status_pending_p)
1959 check_removed_breakpoint (lwp);
1960
1961 if (lwp->status_pending_p)
1962 * (int *) flag_p = 1;
1963
1964 return 0;
1965 }
1966
1967 /* This function is called once per thread. We check the thread's resume
1968 request, which will tell us whether to resume, step, or leave the thread
1969 stopped; and what signal, if any, it should be sent.
1970
1971 For threads which we aren't explicitly told otherwise, we preserve
1972 the stepping flag; this is used for stepping over gdbserver-placed
1973 breakpoints.
1974
1975 If pending_flags was set in any thread, we queue any needed
1976 signals, since we won't actually resume. We already have a pending
1977 event to report, so we don't need to preserve any step requests;
1978 they should be re-issued if necessary. */
1979
1980 static int
1981 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
1982 {
1983 struct lwp_info *lwp;
1984 struct thread_info *thread;
1985 int step;
1986 int pending_flag = * (int *) arg;
1987
1988 thread = (struct thread_info *) entry;
1989 lwp = get_thread_lwp (thread);
1990
1991 if (lwp->resume == NULL)
1992 return 0;
1993
1994 if (lwp->resume->kind == resume_stop)
1995 {
1996 if (debug_threads)
1997 fprintf (stderr, "suspending LWP %ld\n", lwpid_of (lwp));
1998
1999 if (!lwp->stopped)
2000 {
2001 if (debug_threads)
2002 fprintf (stderr, "running -> suspending LWP %ld\n", lwpid_of (lwp));
2003
2004 lwp->suspended = 1;
2005 send_sigstop (&lwp->head);
2006 }
2007 else
2008 {
2009 if (debug_threads)
2010 {
2011 if (lwp->suspended)
2012 fprintf (stderr, "already stopped/suspended LWP %ld\n",
2013 lwpid_of (lwp));
2014 else
2015 fprintf (stderr, "already stopped/not suspended LWP %ld\n",
2016 lwpid_of (lwp));
2017 }
2018
2019 /* Make sure we leave the LWP suspended, so we don't try to
2020 resume it without GDB telling us to. FIXME: The LWP may
2021 have been stopped in an internal event that was not meant
2022 to be notified back to GDB (e.g., gdbserver breakpoint),
2023 so we should be reporting a stop event in that case
2024 too. */
2025 lwp->suspended = 1;
2026 }
2027
2028 /* For stop requests, we're done. */
2029 lwp->resume = NULL;
2030 return 0;
2031 }
2032 else
2033 lwp->suspended = 0;
2034
2035 /* If this thread which is about to be resumed has a pending status,
2036 then don't resume any threads - we can just report the pending
2037 status. Make sure to queue any signals that would otherwise be
2038 sent. In all-stop mode, we do this decision based on if *any*
2039 thread has a pending status. */
2040 if (non_stop)
2041 resume_status_pending_p (&lwp->head, &pending_flag);
2042
2043 if (!pending_flag)
2044 {
2045 if (debug_threads)
2046 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
2047
2048 if (ptid_equal (lwp->resume->thread, minus_one_ptid)
2049 && lwp->stepping
2050 && lwp->pending_is_breakpoint)
2051 step = 1;
2052 else
2053 step = (lwp->resume->kind == resume_step);
2054
2055 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
2056 }
2057 else
2058 {
2059 if (debug_threads)
2060 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
2061
2062 /* If we have a new signal, enqueue the signal. */
2063 if (lwp->resume->sig != 0)
2064 {
2065 struct pending_signals *p_sig;
2066 p_sig = xmalloc (sizeof (*p_sig));
2067 p_sig->prev = lwp->pending_signals;
2068 p_sig->signal = lwp->resume->sig;
2069 memset (&p_sig->info, 0, sizeof (siginfo_t));
2070
2071 /* If this is the same signal we were previously stopped by,
2072 make sure to queue its siginfo. We can ignore the return
2073 value of ptrace; if it fails, we'll skip
2074 PTRACE_SETSIGINFO. */
2075 if (WIFSTOPPED (lwp->last_status)
2076 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2077 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2078
2079 lwp->pending_signals = p_sig;
2080 }
2081 }
2082
2083 lwp->resume = NULL;
2084 return 0;
2085 }
2086
2087 static void
2088 linux_resume (struct thread_resume *resume_info, size_t n)
2089 {
2090 int pending_flag;
2091 struct thread_resume_array array = { resume_info, n };
2092
2093 find_inferior (&all_threads, linux_set_resume_request, &array);
2094
2095 /* If there is a thread which would otherwise be resumed, which
2096 has a pending status, then don't resume any threads - we can just
2097 report the pending status. Make sure to queue any signals
2098 that would otherwise be sent. In non-stop mode, we'll apply this
2099 logic to each thread individually. */
2100 pending_flag = 0;
2101 if (!non_stop)
2102 find_inferior (&all_lwps, resume_status_pending_p, &pending_flag);
2103
2104 if (debug_threads)
2105 {
2106 if (pending_flag)
2107 fprintf (stderr, "Not resuming, pending status\n");
2108 else
2109 fprintf (stderr, "Resuming, no pending status\n");
2110 }
2111
2112 find_inferior (&all_threads, linux_resume_one_thread, &pending_flag);
2113 }
2114
2115 #ifdef HAVE_LINUX_USRREGS
2116
2117 int
2118 register_addr (int regnum)
2119 {
2120 int addr;
2121
2122 if (regnum < 0 || regnum >= the_low_target.num_regs)
2123 error ("Invalid register number %d.", regnum);
2124
2125 addr = the_low_target.regmap[regnum];
2126
2127 return addr;
2128 }
2129
2130 /* Fetch one register. */
2131 static void
2132 fetch_register (struct regcache *regcache, int regno)
2133 {
2134 CORE_ADDR regaddr;
2135 int i, size;
2136 char *buf;
2137 int pid;
2138
2139 if (regno >= the_low_target.num_regs)
2140 return;
2141 if ((*the_low_target.cannot_fetch_register) (regno))
2142 return;
2143
2144 regaddr = register_addr (regno);
2145 if (regaddr == -1)
2146 return;
2147
2148 pid = lwpid_of (get_thread_lwp (current_inferior));
2149 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2150 & - sizeof (PTRACE_XFER_TYPE));
2151 buf = alloca (size);
2152 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2153 {
2154 errno = 0;
2155 *(PTRACE_XFER_TYPE *) (buf + i) =
2156 ptrace (PTRACE_PEEKUSER, pid,
2157 /* Coerce to a uintptr_t first to avoid potential gcc warning
2158 of coercing an 8 byte integer to a 4 byte pointer. */
2159 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
2160 regaddr += sizeof (PTRACE_XFER_TYPE);
2161 if (errno != 0)
2162 {
2163 /* Warning, not error, in case we are attached; sometimes the
2164 kernel doesn't let us at the registers. */
2165 char *err = strerror (errno);
2166 char *msg = alloca (strlen (err) + 128);
2167 sprintf (msg, "reading register %d: %s", regno, err);
2168 error (msg);
2169 goto error_exit;
2170 }
2171 }
2172
2173 if (the_low_target.supply_ptrace_register)
2174 the_low_target.supply_ptrace_register (regcache, regno, buf);
2175 else
2176 supply_register (regcache, regno, buf);
2177
2178 error_exit:;
2179 }
2180
2181 /* Fetch all registers, or just one, from the child process. */
2182 static void
2183 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
2184 {
2185 if (regno == -1)
2186 for (regno = 0; regno < the_low_target.num_regs; regno++)
2187 fetch_register (regcache, regno);
2188 else
2189 fetch_register (regcache, regno);
2190 }
2191
2192 /* Store our register values back into the inferior.
2193 If REGNO is -1, do this for all registers.
2194 Otherwise, REGNO specifies which register (so we can save time). */
2195 static void
2196 usr_store_inferior_registers (struct regcache *regcache, int regno)
2197 {
2198 CORE_ADDR regaddr;
2199 int i, size;
2200 char *buf;
2201 int pid;
2202
2203 if (regno >= 0)
2204 {
2205 if (regno >= the_low_target.num_regs)
2206 return;
2207
2208 if ((*the_low_target.cannot_store_register) (regno) == 1)
2209 return;
2210
2211 regaddr = register_addr (regno);
2212 if (regaddr == -1)
2213 return;
2214 errno = 0;
2215 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2216 & - sizeof (PTRACE_XFER_TYPE);
2217 buf = alloca (size);
2218 memset (buf, 0, size);
2219
2220 if (the_low_target.collect_ptrace_register)
2221 the_low_target.collect_ptrace_register (regcache, regno, buf);
2222 else
2223 collect_register (regcache, regno, buf);
2224
2225 pid = lwpid_of (get_thread_lwp (current_inferior));
2226 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2227 {
2228 errno = 0;
2229 ptrace (PTRACE_POKEUSER, pid,
2230 /* Coerce to a uintptr_t first to avoid potential gcc warning
2231 about coercing an 8 byte integer to a 4 byte pointer. */
2232 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
2233 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
2234 if (errno != 0)
2235 {
2236 /* At this point, ESRCH should mean the process is
2237 already gone, in which case we simply ignore attempts
2238 to change its registers. See also the related
2239 comment in linux_resume_one_lwp. */
2240 if (errno == ESRCH)
2241 return;
2242
2243 if ((*the_low_target.cannot_store_register) (regno) == 0)
2244 {
2245 char *err = strerror (errno);
2246 char *msg = alloca (strlen (err) + 128);
2247 sprintf (msg, "writing register %d: %s",
2248 regno, err);
2249 error (msg);
2250 return;
2251 }
2252 }
2253 regaddr += sizeof (PTRACE_XFER_TYPE);
2254 }
2255 }
2256 else
2257 for (regno = 0; regno < the_low_target.num_regs; regno++)
2258 usr_store_inferior_registers (regcache, regno);
2259 }
2260 #endif /* HAVE_LINUX_USRREGS */
2261
2262
2263
2264 #ifdef HAVE_LINUX_REGSETS
2265
2266 static int
2267 regsets_fetch_inferior_registers (struct regcache *regcache)
2268 {
2269 struct regset_info *regset;
2270 int saw_general_regs = 0;
2271 int pid;
2272
2273 regset = target_regsets;
2274
2275 pid = lwpid_of (get_thread_lwp (current_inferior));
2276 while (regset->size >= 0)
2277 {
2278 void *buf;
2279 int res;
2280
2281 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2282 {
2283 regset ++;
2284 continue;
2285 }
2286
2287 buf = xmalloc (regset->size);
2288 #ifndef __sparc__
2289 res = ptrace (regset->get_request, pid, 0, buf);
2290 #else
2291 res = ptrace (regset->get_request, pid, buf, 0);
2292 #endif
2293 if (res < 0)
2294 {
2295 if (errno == EIO)
2296 {
2297 /* If we get EIO on a regset, do not try it again for
2298 this process. */
2299 disabled_regsets[regset - target_regsets] = 1;
2300 free (buf);
2301 continue;
2302 }
2303 else
2304 {
2305 char s[256];
2306 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
2307 pid);
2308 perror (s);
2309 }
2310 }
2311 else if (regset->type == GENERAL_REGS)
2312 saw_general_regs = 1;
2313 regset->store_function (regcache, buf);
2314 regset ++;
2315 free (buf);
2316 }
2317 if (saw_general_regs)
2318 return 0;
2319 else
2320 return 1;
2321 }
2322
2323 static int
2324 regsets_store_inferior_registers (struct regcache *regcache)
2325 {
2326 struct regset_info *regset;
2327 int saw_general_regs = 0;
2328 int pid;
2329
2330 regset = target_regsets;
2331
2332 pid = lwpid_of (get_thread_lwp (current_inferior));
2333 while (regset->size >= 0)
2334 {
2335 void *buf;
2336 int res;
2337
2338 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2339 {
2340 regset ++;
2341 continue;
2342 }
2343
2344 buf = xmalloc (regset->size);
2345
2346 /* First fill the buffer with the current register set contents,
2347 in case there are any items in the kernel's regset that are
2348 not in gdbserver's regcache. */
2349 #ifndef __sparc__
2350 res = ptrace (regset->get_request, pid, 0, buf);
2351 #else
2352 res = ptrace (regset->get_request, pid, buf, 0);
2353 #endif
2354
2355 if (res == 0)
2356 {
2357 /* Then overlay our cached registers on that. */
2358 regset->fill_function (regcache, buf);
2359
2360 /* Only now do we write the register set. */
2361 #ifndef __sparc__
2362 res = ptrace (regset->set_request, pid, 0, buf);
2363 #else
2364 res = ptrace (regset->set_request, pid, buf, 0);
2365 #endif
2366 }
2367
2368 if (res < 0)
2369 {
2370 if (errno == EIO)
2371 {
2372 /* If we get EIO on a regset, do not try it again for
2373 this process. */
2374 disabled_regsets[regset - target_regsets] = 1;
2375 free (buf);
2376 continue;
2377 }
2378 else if (errno == ESRCH)
2379 {
2380 /* At this point, ESRCH should mean the process is
2381 already gone, in which case we simply ignore attempts
2382 to change its registers. See also the related
2383 comment in linux_resume_one_lwp. */
2384 free (buf);
2385 return 0;
2386 }
2387 else
2388 {
2389 perror ("Warning: ptrace(regsets_store_inferior_registers)");
2390 }
2391 }
2392 else if (regset->type == GENERAL_REGS)
2393 saw_general_regs = 1;
2394 regset ++;
2395 free (buf);
2396 }
2397 if (saw_general_regs)
2398 return 0;
2399 else
2400 return 1;
2401 return 0;
2402 }
2403
2404 #endif /* HAVE_LINUX_REGSETS */
2405
2406
2407 void
2408 linux_fetch_registers (struct regcache *regcache, int regno)
2409 {
2410 #ifdef HAVE_LINUX_REGSETS
2411 if (regsets_fetch_inferior_registers (regcache) == 0)
2412 return;
2413 #endif
2414 #ifdef HAVE_LINUX_USRREGS
2415 usr_fetch_inferior_registers (regcache, regno);
2416 #endif
2417 }
2418
2419 void
2420 linux_store_registers (struct regcache *regcache, int regno)
2421 {
2422 #ifdef HAVE_LINUX_REGSETS
2423 if (regsets_store_inferior_registers (regcache) == 0)
2424 return;
2425 #endif
2426 #ifdef HAVE_LINUX_USRREGS
2427 usr_store_inferior_registers (regcache, regno);
2428 #endif
2429 }
2430
2431
2432 /* Copy LEN bytes from inferior's memory starting at MEMADDR
2433 to debugger memory starting at MYADDR. */
2434
2435 static int
2436 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
2437 {
2438 register int i;
2439 /* Round starting address down to longword boundary. */
2440 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2441 /* Round ending address up; get number of longwords that makes. */
2442 register int count
2443 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
2444 / sizeof (PTRACE_XFER_TYPE);
2445 /* Allocate buffer of that many longwords. */
2446 register PTRACE_XFER_TYPE *buffer
2447 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2448 int fd;
2449 char filename[64];
2450 int pid = lwpid_of (get_thread_lwp (current_inferior));
2451
2452 /* Try using /proc. Don't bother for one word. */
2453 if (len >= 3 * sizeof (long))
2454 {
2455 /* We could keep this file open and cache it - possibly one per
2456 thread. That requires some juggling, but is even faster. */
2457 sprintf (filename, "/proc/%d/mem", pid);
2458 fd = open (filename, O_RDONLY | O_LARGEFILE);
2459 if (fd == -1)
2460 goto no_proc;
2461
2462 /* If pread64 is available, use it. It's faster if the kernel
2463 supports it (only one syscall), and it's 64-bit safe even on
2464 32-bit platforms (for instance, SPARC debugging a SPARC64
2465 application). */
2466 #ifdef HAVE_PREAD64
2467 if (pread64 (fd, myaddr, len, memaddr) != len)
2468 #else
2469 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
2470 #endif
2471 {
2472 close (fd);
2473 goto no_proc;
2474 }
2475
2476 close (fd);
2477 return 0;
2478 }
2479
2480 no_proc:
2481 /* Read all the longwords */
2482 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2483 {
2484 errno = 0;
2485 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
2486 about coercing an 8 byte integer to a 4 byte pointer. */
2487 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
2488 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
2489 if (errno)
2490 return errno;
2491 }
2492
2493 /* Copy appropriate bytes out of the buffer. */
2494 memcpy (myaddr,
2495 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
2496 len);
2497
2498 return 0;
2499 }
2500
2501 /* Copy LEN bytes of data from debugger memory at MYADDR
2502 to inferior's memory at MEMADDR.
2503 On failure (cannot write the inferior)
2504 returns the value of errno. */
2505
2506 static int
2507 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
2508 {
2509 register int i;
2510 /* Round starting address down to longword boundary. */
2511 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2512 /* Round ending address up; get number of longwords that makes. */
2513 register int count
2514 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
2515 /* Allocate buffer of that many longwords. */
2516 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2517 int pid = lwpid_of (get_thread_lwp (current_inferior));
2518
2519 if (debug_threads)
2520 {
2521 /* Dump up to four bytes. */
2522 unsigned int val = * (unsigned int *) myaddr;
2523 if (len == 1)
2524 val = val & 0xff;
2525 else if (len == 2)
2526 val = val & 0xffff;
2527 else if (len == 3)
2528 val = val & 0xffffff;
2529 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
2530 val, (long)memaddr);
2531 }
2532
2533 /* Fill start and end extra bytes of buffer with existing memory data. */
2534
2535 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
2536 about coercing an 8 byte integer to a 4 byte pointer. */
2537 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
2538 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
2539
2540 if (count > 1)
2541 {
2542 buffer[count - 1]
2543 = ptrace (PTRACE_PEEKTEXT, pid,
2544 /* Coerce to a uintptr_t first to avoid potential gcc warning
2545 about coercing an 8 byte integer to a 4 byte pointer. */
2546 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
2547 * sizeof (PTRACE_XFER_TYPE)),
2548 0);
2549 }
2550
2551 /* Copy data to be written over corresponding part of buffer */
2552
2553 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
2554
2555 /* Write the entire buffer. */
2556
2557 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2558 {
2559 errno = 0;
2560 ptrace (PTRACE_POKETEXT, pid,
2561 /* Coerce to a uintptr_t first to avoid potential gcc warning
2562 about coercing an 8 byte integer to a 4 byte pointer. */
2563 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
2564 (PTRACE_ARG4_TYPE) buffer[i]);
2565 if (errno)
2566 return errno;
2567 }
2568
2569 return 0;
2570 }
2571
2572 static int linux_supports_tracefork_flag;
2573
2574 /* Helper functions for linux_test_for_tracefork, called via clone (). */
2575
2576 static int
2577 linux_tracefork_grandchild (void *arg)
2578 {
2579 _exit (0);
2580 }
2581
2582 #define STACK_SIZE 4096
2583
2584 static int
2585 linux_tracefork_child (void *arg)
2586 {
2587 ptrace (PTRACE_TRACEME, 0, 0, 0);
2588 kill (getpid (), SIGSTOP);
2589 #ifdef __ia64__
2590 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
2591 CLONE_VM | SIGCHLD, NULL);
2592 #else
2593 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
2594 CLONE_VM | SIGCHLD, NULL);
2595 #endif
2596 _exit (0);
2597 }
2598
2599 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
2600 sure that we can enable the option, and that it had the desired
2601 effect. */
2602
2603 static void
2604 linux_test_for_tracefork (void)
2605 {
2606 int child_pid, ret, status;
2607 long second_pid;
2608 char *stack = xmalloc (STACK_SIZE * 4);
2609
2610 linux_supports_tracefork_flag = 0;
2611
2612 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
2613 #ifdef __ia64__
2614 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
2615 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2616 #else
2617 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
2618 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2619 #endif
2620 if (child_pid == -1)
2621 perror_with_name ("clone");
2622
2623 ret = my_waitpid (child_pid, &status, 0);
2624 if (ret == -1)
2625 perror_with_name ("waitpid");
2626 else if (ret != child_pid)
2627 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
2628 if (! WIFSTOPPED (status))
2629 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
2630
2631 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
2632 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
2633 if (ret != 0)
2634 {
2635 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2636 if (ret != 0)
2637 {
2638 warning ("linux_test_for_tracefork: failed to kill child");
2639 return;
2640 }
2641
2642 ret = my_waitpid (child_pid, &status, 0);
2643 if (ret != child_pid)
2644 warning ("linux_test_for_tracefork: failed to wait for killed child");
2645 else if (!WIFSIGNALED (status))
2646 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
2647 "killed child", status);
2648
2649 return;
2650 }
2651
2652 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
2653 if (ret != 0)
2654 warning ("linux_test_for_tracefork: failed to resume child");
2655
2656 ret = my_waitpid (child_pid, &status, 0);
2657
2658 if (ret == child_pid && WIFSTOPPED (status)
2659 && status >> 16 == PTRACE_EVENT_FORK)
2660 {
2661 second_pid = 0;
2662 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
2663 if (ret == 0 && second_pid != 0)
2664 {
2665 int second_status;
2666
2667 linux_supports_tracefork_flag = 1;
2668 my_waitpid (second_pid, &second_status, 0);
2669 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
2670 if (ret != 0)
2671 warning ("linux_test_for_tracefork: failed to kill second child");
2672 my_waitpid (second_pid, &status, 0);
2673 }
2674 }
2675 else
2676 warning ("linux_test_for_tracefork: unexpected result from waitpid "
2677 "(%d, status 0x%x)", ret, status);
2678
2679 do
2680 {
2681 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2682 if (ret != 0)
2683 warning ("linux_test_for_tracefork: failed to kill child");
2684 my_waitpid (child_pid, &status, 0);
2685 }
2686 while (WIFSTOPPED (status));
2687
2688 free (stack);
2689 }
2690
2691
2692 static void
2693 linux_look_up_symbols (void)
2694 {
2695 #ifdef USE_THREAD_DB
2696 struct process_info *proc = current_process ();
2697
2698 if (proc->private->thread_db != NULL)
2699 return;
2700
2701 thread_db_init (!linux_supports_tracefork_flag);
2702 #endif
2703 }
2704
2705 static void
2706 linux_request_interrupt (void)
2707 {
2708 extern unsigned long signal_pid;
2709
2710 if (!ptid_equal (cont_thread, null_ptid)
2711 && !ptid_equal (cont_thread, minus_one_ptid))
2712 {
2713 struct lwp_info *lwp;
2714 int lwpid;
2715
2716 lwp = get_thread_lwp (current_inferior);
2717 lwpid = lwpid_of (lwp);
2718 kill_lwp (lwpid, SIGINT);
2719 }
2720 else
2721 kill_lwp (signal_pid, SIGINT);
2722 }
2723
2724 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
2725 to debugger memory starting at MYADDR. */
2726
2727 static int
2728 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
2729 {
2730 char filename[PATH_MAX];
2731 int fd, n;
2732 int pid = lwpid_of (get_thread_lwp (current_inferior));
2733
2734 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
2735
2736 fd = open (filename, O_RDONLY);
2737 if (fd < 0)
2738 return -1;
2739
2740 if (offset != (CORE_ADDR) 0
2741 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
2742 n = -1;
2743 else
2744 n = read (fd, myaddr, len);
2745
2746 close (fd);
2747
2748 return n;
2749 }
2750
2751 /* These breakpoint and watchpoint related wrapper functions simply
2752 pass on the function call if the target has registered a
2753 corresponding function. */
2754
2755 static int
2756 linux_insert_point (char type, CORE_ADDR addr, int len)
2757 {
2758 if (the_low_target.insert_point != NULL)
2759 return the_low_target.insert_point (type, addr, len);
2760 else
2761 /* Unsupported (see target.h). */
2762 return 1;
2763 }
2764
2765 static int
2766 linux_remove_point (char type, CORE_ADDR addr, int len)
2767 {
2768 if (the_low_target.remove_point != NULL)
2769 return the_low_target.remove_point (type, addr, len);
2770 else
2771 /* Unsupported (see target.h). */
2772 return 1;
2773 }
2774
2775 static int
2776 linux_stopped_by_watchpoint (void)
2777 {
2778 if (the_low_target.stopped_by_watchpoint != NULL)
2779 return the_low_target.stopped_by_watchpoint ();
2780 else
2781 return 0;
2782 }
2783
2784 static CORE_ADDR
2785 linux_stopped_data_address (void)
2786 {
2787 if (the_low_target.stopped_data_address != NULL)
2788 return the_low_target.stopped_data_address ();
2789 else
2790 return 0;
2791 }
2792
2793 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2794 #if defined(__mcoldfire__)
2795 /* These should really be defined in the kernel's ptrace.h header. */
2796 #define PT_TEXT_ADDR 49*4
2797 #define PT_DATA_ADDR 50*4
2798 #define PT_TEXT_END_ADDR 51*4
2799 #endif
2800
2801 /* Under uClinux, programs are loaded at non-zero offsets, which we need
2802 to tell gdb about. */
2803
2804 static int
2805 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2806 {
2807 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2808 unsigned long text, text_end, data;
2809 int pid = lwpid_of (get_thread_lwp (current_inferior));
2810
2811 errno = 0;
2812
2813 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2814 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2815 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2816
2817 if (errno == 0)
2818 {
2819 /* Both text and data offsets produced at compile-time (and so
2820 used by gdb) are relative to the beginning of the program,
2821 with the data segment immediately following the text segment.
2822 However, the actual runtime layout in memory may put the data
2823 somewhere else, so when we send gdb a data base-address, we
2824 use the real data base address and subtract the compile-time
2825 data base-address from it (which is just the length of the
2826 text segment). BSS immediately follows data in both
2827 cases. */
2828 *text_p = text;
2829 *data_p = data - (text_end - text);
2830
2831 return 1;
2832 }
2833 #endif
2834 return 0;
2835 }
2836 #endif
2837
2838 static int
2839 compare_ints (const void *xa, const void *xb)
2840 {
2841 int a = *(const int *)xa;
2842 int b = *(const int *)xb;
2843
2844 return a - b;
2845 }
2846
2847 static int *
2848 unique (int *b, int *e)
2849 {
2850 int *d = b;
2851 while (++b != e)
2852 if (*d != *b)
2853 *++d = *b;
2854 return ++d;
2855 }
2856
2857 /* Given PID, iterates over all threads in that process.
2858
2859 Information about each thread, in a format suitable for qXfer:osdata:thread
2860 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
2861 initialized, and the caller is responsible for finishing and appending '\0'
2862 to it.
2863
2864 The list of cores that threads are running on is assigned to *CORES, if it
2865 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
2866 should free *CORES. */
2867
2868 static void
2869 list_threads (int pid, struct buffer *buffer, char **cores)
2870 {
2871 int count = 0;
2872 int allocated = 10;
2873 int *core_numbers = xmalloc (sizeof (int) * allocated);
2874 char pathname[128];
2875 DIR *dir;
2876 struct dirent *dp;
2877 struct stat statbuf;
2878
2879 sprintf (pathname, "/proc/%d/task", pid);
2880 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
2881 {
2882 dir = opendir (pathname);
2883 if (!dir)
2884 {
2885 free (core_numbers);
2886 return;
2887 }
2888
2889 while ((dp = readdir (dir)) != NULL)
2890 {
2891 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
2892
2893 if (lwp != 0)
2894 {
2895 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
2896
2897 if (core != -1)
2898 {
2899 char s[sizeof ("4294967295")];
2900 sprintf (s, "%u", core);
2901
2902 if (count == allocated)
2903 {
2904 allocated *= 2;
2905 core_numbers = realloc (core_numbers,
2906 sizeof (int) * allocated);
2907 }
2908 core_numbers[count++] = core;
2909 if (buffer)
2910 buffer_xml_printf (buffer,
2911 "<item>"
2912 "<column name=\"pid\">%d</column>"
2913 "<column name=\"tid\">%s</column>"
2914 "<column name=\"core\">%s</column>"
2915 "</item>", pid, dp->d_name, s);
2916 }
2917 else
2918 {
2919 if (buffer)
2920 buffer_xml_printf (buffer,
2921 "<item>"
2922 "<column name=\"pid\">%d</column>"
2923 "<column name=\"tid\">%s</column>"
2924 "</item>", pid, dp->d_name);
2925 }
2926 }
2927 }
2928 }
2929
2930 if (cores)
2931 {
2932 *cores = NULL;
2933 if (count > 0)
2934 {
2935 struct buffer buffer2;
2936 int *b;
2937 int *e;
2938 qsort (core_numbers, count, sizeof (int), compare_ints);
2939
2940 /* Remove duplicates. */
2941 b = core_numbers;
2942 e = unique (b, core_numbers + count);
2943
2944 buffer_init (&buffer2);
2945
2946 for (b = core_numbers; b != e; ++b)
2947 {
2948 char number[sizeof ("4294967295")];
2949 sprintf (number, "%u", *b);
2950 buffer_xml_printf (&buffer2, "%s%s",
2951 (b == core_numbers) ? "" : ",", number);
2952 }
2953 buffer_grow_str0 (&buffer2, "");
2954
2955 *cores = buffer_finish (&buffer2);
2956 }
2957 }
2958 free (core_numbers);
2959 }
2960
2961 static void
2962 show_process (int pid, const char *username, struct buffer *buffer)
2963 {
2964 char pathname[128];
2965 FILE *f;
2966 char cmd[MAXPATHLEN + 1];
2967
2968 sprintf (pathname, "/proc/%d/cmdline", pid);
2969
2970 if ((f = fopen (pathname, "r")) != NULL)
2971 {
2972 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
2973 if (len > 0)
2974 {
2975 char *cores = 0;
2976 int i;
2977 for (i = 0; i < len; i++)
2978 if (cmd[i] == '\0')
2979 cmd[i] = ' ';
2980 cmd[len] = '\0';
2981
2982 buffer_xml_printf (buffer,
2983 "<item>"
2984 "<column name=\"pid\">%d</column>"
2985 "<column name=\"user\">%s</column>"
2986 "<column name=\"command\">%s</column>",
2987 pid,
2988 username,
2989 cmd);
2990
2991 /* This only collects core numbers, and does not print threads. */
2992 list_threads (pid, NULL, &cores);
2993
2994 if (cores)
2995 {
2996 buffer_xml_printf (buffer,
2997 "<column name=\"cores\">%s</column>", cores);
2998 free (cores);
2999 }
3000
3001 buffer_xml_printf (buffer, "</item>");
3002 }
3003 fclose (f);
3004 }
3005 }
3006
3007 static int
3008 linux_qxfer_osdata (const char *annex,
3009 unsigned char *readbuf, unsigned const char *writebuf,
3010 CORE_ADDR offset, int len)
3011 {
3012 /* We make the process list snapshot when the object starts to be
3013 read. */
3014 static const char *buf;
3015 static long len_avail = -1;
3016 static struct buffer buffer;
3017 int processes = 0;
3018 int threads = 0;
3019
3020 DIR *dirp;
3021
3022 if (strcmp (annex, "processes") == 0)
3023 processes = 1;
3024 else if (strcmp (annex, "threads") == 0)
3025 threads = 1;
3026 else
3027 return 0;
3028
3029 if (!readbuf || writebuf)
3030 return 0;
3031
3032 if (offset == 0)
3033 {
3034 if (len_avail != -1 && len_avail != 0)
3035 buffer_free (&buffer);
3036 len_avail = 0;
3037 buf = NULL;
3038 buffer_init (&buffer);
3039 if (processes)
3040 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3041 else if (threads)
3042 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
3043
3044 dirp = opendir ("/proc");
3045 if (dirp)
3046 {
3047 struct dirent *dp;
3048 while ((dp = readdir (dirp)) != NULL)
3049 {
3050 struct stat statbuf;
3051 char procentry[sizeof ("/proc/4294967295")];
3052
3053 if (!isdigit (dp->d_name[0])
3054 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3055 continue;
3056
3057 sprintf (procentry, "/proc/%s", dp->d_name);
3058 if (stat (procentry, &statbuf) == 0
3059 && S_ISDIR (statbuf.st_mode))
3060 {
3061 int pid = (int) strtoul (dp->d_name, NULL, 10);
3062
3063 if (processes)
3064 {
3065 struct passwd *entry = getpwuid (statbuf.st_uid);
3066 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3067 }
3068 else if (threads)
3069 {
3070 list_threads (pid, &buffer, NULL);
3071 }
3072 }
3073 }
3074
3075 closedir (dirp);
3076 }
3077 buffer_grow_str0 (&buffer, "</osdata>\n");
3078 buf = buffer_finish (&buffer);
3079 len_avail = strlen (buf);
3080 }
3081
3082 if (offset >= len_avail)
3083 {
3084 /* Done. Get rid of the data. */
3085 buffer_free (&buffer);
3086 buf = NULL;
3087 len_avail = 0;
3088 return 0;
3089 }
3090
3091 if (len > len_avail - offset)
3092 len = len_avail - offset;
3093 memcpy (readbuf, buf + offset, len);
3094
3095 return len;
3096 }
3097
3098 /* Convert a native/host siginfo object, into/from the siginfo in the
3099 layout of the inferiors' architecture. */
3100
3101 static void
3102 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3103 {
3104 int done = 0;
3105
3106 if (the_low_target.siginfo_fixup != NULL)
3107 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3108
3109 /* If there was no callback, or the callback didn't do anything,
3110 then just do a straight memcpy. */
3111 if (!done)
3112 {
3113 if (direction == 1)
3114 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3115 else
3116 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3117 }
3118 }
3119
3120 static int
3121 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3122 unsigned const char *writebuf, CORE_ADDR offset, int len)
3123 {
3124 int pid;
3125 struct siginfo siginfo;
3126 char inf_siginfo[sizeof (struct siginfo)];
3127
3128 if (current_inferior == NULL)
3129 return -1;
3130
3131 pid = lwpid_of (get_thread_lwp (current_inferior));
3132
3133 if (debug_threads)
3134 fprintf (stderr, "%s siginfo for lwp %d.\n",
3135 readbuf != NULL ? "Reading" : "Writing",
3136 pid);
3137
3138 if (offset > sizeof (siginfo))
3139 return -1;
3140
3141 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
3142 return -1;
3143
3144 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3145 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3146 inferior with a 64-bit GDBSERVER should look the same as debugging it
3147 with a 32-bit GDBSERVER, we need to convert it. */
3148 siginfo_fixup (&siginfo, inf_siginfo, 0);
3149
3150 if (offset + len > sizeof (siginfo))
3151 len = sizeof (siginfo) - offset;
3152
3153 if (readbuf != NULL)
3154 memcpy (readbuf, inf_siginfo + offset, len);
3155 else
3156 {
3157 memcpy (inf_siginfo + offset, writebuf, len);
3158
3159 /* Convert back to ptrace layout before flushing it out. */
3160 siginfo_fixup (&siginfo, inf_siginfo, 1);
3161
3162 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
3163 return -1;
3164 }
3165
3166 return len;
3167 }
3168
3169 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
3170 so we notice when children change state; as the handler for the
3171 sigsuspend in my_waitpid. */
3172
3173 static void
3174 sigchld_handler (int signo)
3175 {
3176 int old_errno = errno;
3177
3178 if (debug_threads)
3179 /* fprintf is not async-signal-safe, so call write directly. */
3180 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
3181
3182 if (target_is_async_p ())
3183 async_file_mark (); /* trigger a linux_wait */
3184
3185 errno = old_errno;
3186 }
3187
3188 static int
3189 linux_supports_non_stop (void)
3190 {
3191 return 1;
3192 }
3193
3194 static int
3195 linux_async (int enable)
3196 {
3197 int previous = (linux_event_pipe[0] != -1);
3198
3199 if (previous != enable)
3200 {
3201 sigset_t mask;
3202 sigemptyset (&mask);
3203 sigaddset (&mask, SIGCHLD);
3204
3205 sigprocmask (SIG_BLOCK, &mask, NULL);
3206
3207 if (enable)
3208 {
3209 if (pipe (linux_event_pipe) == -1)
3210 fatal ("creating event pipe failed.");
3211
3212 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
3213 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
3214
3215 /* Register the event loop handler. */
3216 add_file_handler (linux_event_pipe[0],
3217 handle_target_event, NULL);
3218
3219 /* Always trigger a linux_wait. */
3220 async_file_mark ();
3221 }
3222 else
3223 {
3224 delete_file_handler (linux_event_pipe[0]);
3225
3226 close (linux_event_pipe[0]);
3227 close (linux_event_pipe[1]);
3228 linux_event_pipe[0] = -1;
3229 linux_event_pipe[1] = -1;
3230 }
3231
3232 sigprocmask (SIG_UNBLOCK, &mask, NULL);
3233 }
3234
3235 return previous;
3236 }
3237
3238 static int
3239 linux_start_non_stop (int nonstop)
3240 {
3241 /* Register or unregister from event-loop accordingly. */
3242 linux_async (nonstop);
3243 return 0;
3244 }
3245
3246 static int
3247 linux_supports_multi_process (void)
3248 {
3249 return 1;
3250 }
3251
3252
3253 /* Enumerate spufs IDs for process PID. */
3254 static int
3255 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
3256 {
3257 int pos = 0;
3258 int written = 0;
3259 char path[128];
3260 DIR *dir;
3261 struct dirent *entry;
3262
3263 sprintf (path, "/proc/%ld/fd", pid);
3264 dir = opendir (path);
3265 if (!dir)
3266 return -1;
3267
3268 rewinddir (dir);
3269 while ((entry = readdir (dir)) != NULL)
3270 {
3271 struct stat st;
3272 struct statfs stfs;
3273 int fd;
3274
3275 fd = atoi (entry->d_name);
3276 if (!fd)
3277 continue;
3278
3279 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
3280 if (stat (path, &st) != 0)
3281 continue;
3282 if (!S_ISDIR (st.st_mode))
3283 continue;
3284
3285 if (statfs (path, &stfs) != 0)
3286 continue;
3287 if (stfs.f_type != SPUFS_MAGIC)
3288 continue;
3289
3290 if (pos >= offset && pos + 4 <= offset + len)
3291 {
3292 *(unsigned int *)(buf + pos - offset) = fd;
3293 written += 4;
3294 }
3295 pos += 4;
3296 }
3297
3298 closedir (dir);
3299 return written;
3300 }
3301
3302 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
3303 object type, using the /proc file system. */
3304 static int
3305 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
3306 unsigned const char *writebuf,
3307 CORE_ADDR offset, int len)
3308 {
3309 long pid = lwpid_of (get_thread_lwp (current_inferior));
3310 char buf[128];
3311 int fd = 0;
3312 int ret = 0;
3313
3314 if (!writebuf && !readbuf)
3315 return -1;
3316
3317 if (!*annex)
3318 {
3319 if (!readbuf)
3320 return -1;
3321 else
3322 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
3323 }
3324
3325 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
3326 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
3327 if (fd <= 0)
3328 return -1;
3329
3330 if (offset != 0
3331 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3332 {
3333 close (fd);
3334 return 0;
3335 }
3336
3337 if (writebuf)
3338 ret = write (fd, writebuf, (size_t) len);
3339 else
3340 ret = read (fd, readbuf, (size_t) len);
3341
3342 close (fd);
3343 return ret;
3344 }
3345
3346 static int
3347 linux_core_of_thread (ptid_t ptid)
3348 {
3349 char filename[sizeof ("/proc//task//stat")
3350 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
3351 + 1];
3352 FILE *f;
3353 char *content = NULL;
3354 char *p;
3355 char *ts = 0;
3356 int content_read = 0;
3357 int i;
3358 int core;
3359
3360 sprintf (filename, "/proc/%d/task/%ld/stat",
3361 ptid_get_pid (ptid), ptid_get_lwp (ptid));
3362 f = fopen (filename, "r");
3363 if (!f)
3364 return -1;
3365
3366 for (;;)
3367 {
3368 int n;
3369 content = realloc (content, content_read + 1024);
3370 n = fread (content + content_read, 1, 1024, f);
3371 content_read += n;
3372 if (n < 1024)
3373 {
3374 content[content_read] = '\0';
3375 break;
3376 }
3377 }
3378
3379 p = strchr (content, '(');
3380 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
3381
3382 p = strtok_r (p, " ", &ts);
3383 for (i = 0; i != 36; ++i)
3384 p = strtok_r (NULL, " ", &ts);
3385
3386 if (sscanf (p, "%d", &core) == 0)
3387 core = -1;
3388
3389 free (content);
3390 fclose (f);
3391
3392 return core;
3393 }
3394
3395 static struct target_ops linux_target_ops = {
3396 linux_create_inferior,
3397 linux_attach,
3398 linux_kill,
3399 linux_detach,
3400 linux_join,
3401 linux_thread_alive,
3402 linux_resume,
3403 linux_wait,
3404 linux_fetch_registers,
3405 linux_store_registers,
3406 linux_read_memory,
3407 linux_write_memory,
3408 linux_look_up_symbols,
3409 linux_request_interrupt,
3410 linux_read_auxv,
3411 linux_insert_point,
3412 linux_remove_point,
3413 linux_stopped_by_watchpoint,
3414 linux_stopped_data_address,
3415 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3416 linux_read_offsets,
3417 #else
3418 NULL,
3419 #endif
3420 #ifdef USE_THREAD_DB
3421 thread_db_get_tls_address,
3422 #else
3423 NULL,
3424 #endif
3425 linux_qxfer_spu,
3426 hostio_last_error_from_errno,
3427 linux_qxfer_osdata,
3428 linux_xfer_siginfo,
3429 linux_supports_non_stop,
3430 linux_async,
3431 linux_start_non_stop,
3432 linux_supports_multi_process,
3433 #ifdef USE_THREAD_DB
3434 thread_db_handle_monitor_command,
3435 #else
3436 NULL,
3437 #endif
3438 linux_core_of_thread
3439 };
3440
3441 static void
3442 linux_init_signals ()
3443 {
3444 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
3445 to find what the cancel signal actually is. */
3446 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
3447 signal (__SIGRTMIN+1, SIG_IGN);
3448 #endif
3449 }
3450
3451 void
3452 initialize_low (void)
3453 {
3454 struct sigaction sigchld_action;
3455 memset (&sigchld_action, 0, sizeof (sigchld_action));
3456 set_target_ops (&linux_target_ops);
3457 set_breakpoint_data (the_low_target.breakpoint,
3458 the_low_target.breakpoint_len);
3459 linux_init_signals ();
3460 linux_test_for_tracefork ();
3461 #ifdef HAVE_LINUX_REGSETS
3462 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
3463 ;
3464 disabled_regsets = xmalloc (num_regsets);
3465 #endif
3466
3467 sigchld_action.sa_handler = sigchld_handler;
3468 sigemptyset (&sigchld_action.sa_mask);
3469 sigchld_action.sa_flags = SA_RESTART;
3470 sigaction (SIGCHLD, &sigchld_action, NULL);
3471 }
This page took 0.124956 seconds and 4 git commands to generate.