fcdcc45fdb9cc64d8c42c04078a1bba6db24455b
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #include <sys/uio.h>
43 #ifndef ELFMAG0
44 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48 #include <elf.h>
49 #endif
50
51 #ifndef SPUFS_MAGIC
52 #define SPUFS_MAGIC 0x23c9b64e
53 #endif
54
55 #ifndef PTRACE_GETSIGINFO
56 # define PTRACE_GETSIGINFO 0x4202
57 # define PTRACE_SETSIGINFO 0x4203
58 #endif
59
60 #ifndef O_LARGEFILE
61 #define O_LARGEFILE 0
62 #endif
63
64 /* If the system headers did not provide the constants, hard-code the normal
65 values. */
66 #ifndef PTRACE_EVENT_FORK
67
68 #define PTRACE_SETOPTIONS 0x4200
69 #define PTRACE_GETEVENTMSG 0x4201
70
71 /* options set using PTRACE_SETOPTIONS */
72 #define PTRACE_O_TRACESYSGOOD 0x00000001
73 #define PTRACE_O_TRACEFORK 0x00000002
74 #define PTRACE_O_TRACEVFORK 0x00000004
75 #define PTRACE_O_TRACECLONE 0x00000008
76 #define PTRACE_O_TRACEEXEC 0x00000010
77 #define PTRACE_O_TRACEVFORKDONE 0x00000020
78 #define PTRACE_O_TRACEEXIT 0x00000040
79
80 /* Wait extended result codes for the above trace options. */
81 #define PTRACE_EVENT_FORK 1
82 #define PTRACE_EVENT_VFORK 2
83 #define PTRACE_EVENT_CLONE 3
84 #define PTRACE_EVENT_EXEC 4
85 #define PTRACE_EVENT_VFORK_DONE 5
86 #define PTRACE_EVENT_EXIT 6
87
88 #endif /* PTRACE_EVENT_FORK */
89
90 /* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93 #ifndef __WALL
94 #define __WALL 0x40000000 /* Wait for any child. */
95 #endif
96
97 #ifndef W_STOPCODE
98 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99 #endif
100
101 #ifdef __UCLIBC__
102 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
103 #define HAS_NOMMU
104 #endif
105 #endif
106
107 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108 representation of the thread ID.
109
110 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
111 the same as the LWP ID.
112
113 ``all_processes'' is keyed by the "overall process ID", which
114 GNU/Linux calls tgid, "thread group ID". */
115
116 struct inferior_list all_lwps;
117
118 /* A list of all unknown processes which receive stop signals. Some other
119 process will presumably claim each of these as forked children
120 momentarily. */
121
122 struct inferior_list stopped_pids;
123
124 /* FIXME this is a bit of a hack, and could be removed. */
125 int stopping_threads;
126
127 /* FIXME make into a target method? */
128 int using_threads = 1;
129
130 /* This flag is true iff we've just created or attached to our first
131 inferior but it has not stopped yet. As soon as it does, we need
132 to call the low target's arch_setup callback. Doing this only on
133 the first inferior avoids reinializing the architecture on every
134 inferior, and avoids messing with the register caches of the
135 already running inferiors. NOTE: this assumes all inferiors under
136 control of gdbserver have the same architecture. */
137 static int new_inferior;
138
139 static void linux_resume_one_lwp (struct lwp_info *lwp,
140 int step, int signal, siginfo_t *info);
141 static void linux_resume (struct thread_resume *resume_info, size_t n);
142 static void stop_all_lwps (void);
143 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
144 static void *add_lwp (ptid_t ptid);
145 static int linux_stopped_by_watchpoint (void);
146 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
147 static int linux_core_of_thread (ptid_t ptid);
148 static void proceed_all_lwps (void);
149 static void unstop_all_lwps (struct lwp_info *except);
150 static int finish_step_over (struct lwp_info *lwp);
151 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
152 static int kill_lwp (unsigned long lwpid, int signo);
153
154 /* True if the low target can hardware single-step. Such targets
155 don't need a BREAKPOINT_REINSERT_ADDR callback. */
156
157 static int
158 can_hardware_single_step (void)
159 {
160 return (the_low_target.breakpoint_reinsert_addr == NULL);
161 }
162
163 /* True if the low target supports memory breakpoints. If so, we'll
164 have a GET_PC implementation. */
165
166 static int
167 supports_breakpoints (void)
168 {
169 return (the_low_target.get_pc != NULL);
170 }
171
172 struct pending_signals
173 {
174 int signal;
175 siginfo_t info;
176 struct pending_signals *prev;
177 };
178
179 #define PTRACE_ARG3_TYPE void *
180 #define PTRACE_ARG4_TYPE void *
181 #define PTRACE_XFER_TYPE long
182
183 #ifdef HAVE_LINUX_REGSETS
184 static char *disabled_regsets;
185 static int num_regsets;
186 #endif
187
188 /* The read/write ends of the pipe registered as waitable file in the
189 event loop. */
190 static int linux_event_pipe[2] = { -1, -1 };
191
192 /* True if we're currently in async mode. */
193 #define target_is_async_p() (linux_event_pipe[0] != -1)
194
195 static void send_sigstop (struct inferior_list_entry *entry);
196 static void wait_for_sigstop (struct inferior_list_entry *entry);
197
198 /* Accepts an integer PID; Returns a string representing a file that
199 can be opened to get info for the child process.
200 Space for the result is malloc'd, caller must free. */
201
202 char *
203 linux_child_pid_to_exec_file (int pid)
204 {
205 char *name1, *name2;
206
207 name1 = xmalloc (MAXPATHLEN);
208 name2 = xmalloc (MAXPATHLEN);
209 memset (name2, 0, MAXPATHLEN);
210
211 sprintf (name1, "/proc/%d/exe", pid);
212 if (readlink (name1, name2, MAXPATHLEN) > 0)
213 {
214 free (name1);
215 return name2;
216 }
217 else
218 {
219 free (name2);
220 return name1;
221 }
222 }
223
224 /* Return non-zero if HEADER is a 64-bit ELF file. */
225
226 static int
227 elf_64_header_p (const Elf64_Ehdr *header)
228 {
229 return (header->e_ident[EI_MAG0] == ELFMAG0
230 && header->e_ident[EI_MAG1] == ELFMAG1
231 && header->e_ident[EI_MAG2] == ELFMAG2
232 && header->e_ident[EI_MAG3] == ELFMAG3
233 && header->e_ident[EI_CLASS] == ELFCLASS64);
234 }
235
236 /* Return non-zero if FILE is a 64-bit ELF file,
237 zero if the file is not a 64-bit ELF file,
238 and -1 if the file is not accessible or doesn't exist. */
239
240 int
241 elf_64_file_p (const char *file)
242 {
243 Elf64_Ehdr header;
244 int fd;
245
246 fd = open (file, O_RDONLY);
247 if (fd < 0)
248 return -1;
249
250 if (read (fd, &header, sizeof (header)) != sizeof (header))
251 {
252 close (fd);
253 return 0;
254 }
255 close (fd);
256
257 return elf_64_header_p (&header);
258 }
259
260 static void
261 delete_lwp (struct lwp_info *lwp)
262 {
263 remove_thread (get_lwp_thread (lwp));
264 remove_inferior (&all_lwps, &lwp->head);
265 free (lwp->arch_private);
266 free (lwp);
267 }
268
269 /* Add a process to the common process list, and set its private
270 data. */
271
272 static struct process_info *
273 linux_add_process (int pid, int attached)
274 {
275 struct process_info *proc;
276
277 /* Is this the first process? If so, then set the arch. */
278 if (all_processes.head == NULL)
279 new_inferior = 1;
280
281 proc = add_process (pid, attached);
282 proc->private = xcalloc (1, sizeof (*proc->private));
283
284 if (the_low_target.new_process != NULL)
285 proc->private->arch_private = the_low_target.new_process ();
286
287 return proc;
288 }
289
290 /* Wrapper function for waitpid which handles EINTR, and emulates
291 __WALL for systems where that is not available. */
292
293 static int
294 my_waitpid (int pid, int *status, int flags)
295 {
296 int ret, out_errno;
297
298 if (debug_threads)
299 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
300
301 if (flags & __WALL)
302 {
303 sigset_t block_mask, org_mask, wake_mask;
304 int wnohang;
305
306 wnohang = (flags & WNOHANG) != 0;
307 flags &= ~(__WALL | __WCLONE);
308 flags |= WNOHANG;
309
310 /* Block all signals while here. This avoids knowing about
311 LinuxThread's signals. */
312 sigfillset (&block_mask);
313 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
314
315 /* ... except during the sigsuspend below. */
316 sigemptyset (&wake_mask);
317
318 while (1)
319 {
320 /* Since all signals are blocked, there's no need to check
321 for EINTR here. */
322 ret = waitpid (pid, status, flags);
323 out_errno = errno;
324
325 if (ret == -1 && out_errno != ECHILD)
326 break;
327 else if (ret > 0)
328 break;
329
330 if (flags & __WCLONE)
331 {
332 /* We've tried both flavors now. If WNOHANG is set,
333 there's nothing else to do, just bail out. */
334 if (wnohang)
335 break;
336
337 if (debug_threads)
338 fprintf (stderr, "blocking\n");
339
340 /* Block waiting for signals. */
341 sigsuspend (&wake_mask);
342 }
343
344 flags ^= __WCLONE;
345 }
346
347 sigprocmask (SIG_SETMASK, &org_mask, NULL);
348 }
349 else
350 {
351 do
352 ret = waitpid (pid, status, flags);
353 while (ret == -1 && errno == EINTR);
354 out_errno = errno;
355 }
356
357 if (debug_threads)
358 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
359 pid, flags, status ? *status : -1, ret);
360
361 errno = out_errno;
362 return ret;
363 }
364
365 /* Handle a GNU/Linux extended wait response. If we see a clone
366 event, we need to add the new LWP to our list (and not report the
367 trap to higher layers). */
368
369 static void
370 handle_extended_wait (struct lwp_info *event_child, int wstat)
371 {
372 int event = wstat >> 16;
373 struct lwp_info *new_lwp;
374
375 if (event == PTRACE_EVENT_CLONE)
376 {
377 ptid_t ptid;
378 unsigned long new_pid;
379 int ret, status = W_STOPCODE (SIGSTOP);
380
381 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
382
383 /* If we haven't already seen the new PID stop, wait for it now. */
384 if (! pull_pid_from_list (&stopped_pids, new_pid))
385 {
386 /* The new child has a pending SIGSTOP. We can't affect it until it
387 hits the SIGSTOP, but we're already attached. */
388
389 ret = my_waitpid (new_pid, &status, __WALL);
390
391 if (ret == -1)
392 perror_with_name ("waiting for new child");
393 else if (ret != new_pid)
394 warning ("wait returned unexpected PID %d", ret);
395 else if (!WIFSTOPPED (status))
396 warning ("wait returned unexpected status 0x%x", status);
397 }
398
399 ptrace (PTRACE_SETOPTIONS, new_pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
400
401 ptid = ptid_build (pid_of (event_child), new_pid, 0);
402 new_lwp = (struct lwp_info *) add_lwp (ptid);
403 add_thread (ptid, new_lwp);
404
405 /* Either we're going to immediately resume the new thread
406 or leave it stopped. linux_resume_one_lwp is a nop if it
407 thinks the thread is currently running, so set this first
408 before calling linux_resume_one_lwp. */
409 new_lwp->stopped = 1;
410
411 /* Normally we will get the pending SIGSTOP. But in some cases
412 we might get another signal delivered to the group first.
413 If we do get another signal, be sure not to lose it. */
414 if (WSTOPSIG (status) == SIGSTOP)
415 {
416 if (stopping_threads)
417 new_lwp->stop_pc = get_stop_pc (new_lwp);
418 else
419 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
420 }
421 else
422 {
423 new_lwp->stop_expected = 1;
424
425 if (stopping_threads)
426 {
427 new_lwp->stop_pc = get_stop_pc (new_lwp);
428 new_lwp->status_pending_p = 1;
429 new_lwp->status_pending = status;
430 }
431 else
432 /* Pass the signal on. This is what GDB does - except
433 shouldn't we really report it instead? */
434 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
435 }
436
437 /* Always resume the current thread. If we are stopping
438 threads, it will have a pending SIGSTOP; we may as well
439 collect it now. */
440 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
441 }
442 }
443
444 /* Return the PC as read from the regcache of LWP, without any
445 adjustment. */
446
447 static CORE_ADDR
448 get_pc (struct lwp_info *lwp)
449 {
450 struct thread_info *saved_inferior;
451 struct regcache *regcache;
452 CORE_ADDR pc;
453
454 if (the_low_target.get_pc == NULL)
455 return 0;
456
457 saved_inferior = current_inferior;
458 current_inferior = get_lwp_thread (lwp);
459
460 regcache = get_thread_regcache (current_inferior, 1);
461 pc = (*the_low_target.get_pc) (regcache);
462
463 if (debug_threads)
464 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
465
466 current_inferior = saved_inferior;
467 return pc;
468 }
469
470 /* This function should only be called if LWP got a SIGTRAP.
471 The SIGTRAP could mean several things.
472
473 On i386, where decr_pc_after_break is non-zero:
474 If we were single-stepping this process using PTRACE_SINGLESTEP,
475 we will get only the one SIGTRAP (even if the instruction we
476 stepped over was a breakpoint). The value of $eip will be the
477 next instruction.
478 If we continue the process using PTRACE_CONT, we will get a
479 SIGTRAP when we hit a breakpoint. The value of $eip will be
480 the instruction after the breakpoint (i.e. needs to be
481 decremented). If we report the SIGTRAP to GDB, we must also
482 report the undecremented PC. If we cancel the SIGTRAP, we
483 must resume at the decremented PC.
484
485 (Presumably, not yet tested) On a non-decr_pc_after_break machine
486 with hardware or kernel single-step:
487 If we single-step over a breakpoint instruction, our PC will
488 point at the following instruction. If we continue and hit a
489 breakpoint instruction, our PC will point at the breakpoint
490 instruction. */
491
492 static CORE_ADDR
493 get_stop_pc (struct lwp_info *lwp)
494 {
495 CORE_ADDR stop_pc;
496
497 if (the_low_target.get_pc == NULL)
498 return 0;
499
500 stop_pc = get_pc (lwp);
501
502 if (WSTOPSIG (lwp->last_status) == SIGTRAP
503 && !lwp->stepping
504 && !lwp->stopped_by_watchpoint
505 && lwp->last_status >> 16 == 0)
506 stop_pc -= the_low_target.decr_pc_after_break;
507
508 if (debug_threads)
509 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
510
511 return stop_pc;
512 }
513
514 static void *
515 add_lwp (ptid_t ptid)
516 {
517 struct lwp_info *lwp;
518
519 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
520 memset (lwp, 0, sizeof (*lwp));
521
522 lwp->head.id = ptid;
523
524 if (the_low_target.new_thread != NULL)
525 lwp->arch_private = the_low_target.new_thread ();
526
527 add_inferior_to_list (&all_lwps, &lwp->head);
528
529 return lwp;
530 }
531
532 /* Start an inferior process and returns its pid.
533 ALLARGS is a vector of program-name and args. */
534
535 static int
536 linux_create_inferior (char *program, char **allargs)
537 {
538 struct lwp_info *new_lwp;
539 int pid;
540 ptid_t ptid;
541
542 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
543 pid = vfork ();
544 #else
545 pid = fork ();
546 #endif
547 if (pid < 0)
548 perror_with_name ("fork");
549
550 if (pid == 0)
551 {
552 ptrace (PTRACE_TRACEME, 0, 0, 0);
553
554 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
555 signal (__SIGRTMIN + 1, SIG_DFL);
556 #endif
557
558 setpgid (0, 0);
559
560 execv (program, allargs);
561 if (errno == ENOENT)
562 execvp (program, allargs);
563
564 fprintf (stderr, "Cannot exec %s: %s.\n", program,
565 strerror (errno));
566 fflush (stderr);
567 _exit (0177);
568 }
569
570 linux_add_process (pid, 0);
571
572 ptid = ptid_build (pid, pid, 0);
573 new_lwp = add_lwp (ptid);
574 add_thread (ptid, new_lwp);
575 new_lwp->must_set_ptrace_flags = 1;
576
577 return pid;
578 }
579
580 /* Attach to an inferior process. */
581
582 static void
583 linux_attach_lwp_1 (unsigned long lwpid, int initial)
584 {
585 ptid_t ptid;
586 struct lwp_info *new_lwp;
587
588 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
589 {
590 if (!initial)
591 {
592 /* If we fail to attach to an LWP, just warn. */
593 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
594 strerror (errno), errno);
595 fflush (stderr);
596 return;
597 }
598 else
599 /* If we fail to attach to a process, report an error. */
600 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
601 strerror (errno), errno);
602 }
603
604 if (initial)
605 /* NOTE/FIXME: This lwp might have not been the tgid. */
606 ptid = ptid_build (lwpid, lwpid, 0);
607 else
608 {
609 /* Note that extracting the pid from the current inferior is
610 safe, since we're always called in the context of the same
611 process as this new thread. */
612 int pid = pid_of (get_thread_lwp (current_inferior));
613 ptid = ptid_build (pid, lwpid, 0);
614 }
615
616 new_lwp = (struct lwp_info *) add_lwp (ptid);
617 add_thread (ptid, new_lwp);
618
619 /* We need to wait for SIGSTOP before being able to make the next
620 ptrace call on this LWP. */
621 new_lwp->must_set_ptrace_flags = 1;
622
623 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
624 brings it to a halt.
625
626 There are several cases to consider here:
627
628 1) gdbserver has already attached to the process and is being notified
629 of a new thread that is being created.
630 In this case we should ignore that SIGSTOP and resume the
631 process. This is handled below by setting stop_expected = 1,
632 and the fact that add_thread sets last_resume_kind ==
633 resume_continue.
634
635 2) This is the first thread (the process thread), and we're attaching
636 to it via attach_inferior.
637 In this case we want the process thread to stop.
638 This is handled by having linux_attach set last_resume_kind ==
639 resume_stop after we return.
640 ??? If the process already has several threads we leave the other
641 threads running.
642
643 3) GDB is connecting to gdbserver and is requesting an enumeration of all
644 existing threads.
645 In this case we want the thread to stop.
646 FIXME: This case is currently not properly handled.
647 We should wait for the SIGSTOP but don't. Things work apparently
648 because enough time passes between when we ptrace (ATTACH) and when
649 gdb makes the next ptrace call on the thread.
650
651 On the other hand, if we are currently trying to stop all threads, we
652 should treat the new thread as if we had sent it a SIGSTOP. This works
653 because we are guaranteed that the add_lwp call above added us to the
654 end of the list, and so the new thread has not yet reached
655 wait_for_sigstop (but will). */
656 new_lwp->stop_expected = 1;
657 }
658
659 void
660 linux_attach_lwp (unsigned long lwpid)
661 {
662 linux_attach_lwp_1 (lwpid, 0);
663 }
664
665 int
666 linux_attach (unsigned long pid)
667 {
668 linux_attach_lwp_1 (pid, 1);
669 linux_add_process (pid, 1);
670
671 if (!non_stop)
672 {
673 struct thread_info *thread;
674
675 /* Don't ignore the initial SIGSTOP if we just attached to this
676 process. It will be collected by wait shortly. */
677 thread = find_thread_ptid (ptid_build (pid, pid, 0));
678 thread->last_resume_kind = resume_stop;
679 }
680
681 return 0;
682 }
683
684 struct counter
685 {
686 int pid;
687 int count;
688 };
689
690 static int
691 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
692 {
693 struct counter *counter = args;
694
695 if (ptid_get_pid (entry->id) == counter->pid)
696 {
697 if (++counter->count > 1)
698 return 1;
699 }
700
701 return 0;
702 }
703
704 static int
705 last_thread_of_process_p (struct thread_info *thread)
706 {
707 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
708 int pid = ptid_get_pid (ptid);
709 struct counter counter = { pid , 0 };
710
711 return (find_inferior (&all_threads,
712 second_thread_of_pid_p, &counter) == NULL);
713 }
714
715 /* Kill the inferior lwp. */
716
717 static int
718 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
719 {
720 struct thread_info *thread = (struct thread_info *) entry;
721 struct lwp_info *lwp = get_thread_lwp (thread);
722 int wstat;
723 int pid = * (int *) args;
724
725 if (ptid_get_pid (entry->id) != pid)
726 return 0;
727
728 /* We avoid killing the first thread here, because of a Linux kernel (at
729 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
730 the children get a chance to be reaped, it will remain a zombie
731 forever. */
732
733 if (lwpid_of (lwp) == pid)
734 {
735 if (debug_threads)
736 fprintf (stderr, "lkop: is last of process %s\n",
737 target_pid_to_str (entry->id));
738 return 0;
739 }
740
741 /* If we're killing a running inferior, make sure it is stopped
742 first, as PTRACE_KILL will not work otherwise. */
743 if (!lwp->stopped)
744 send_sigstop (&lwp->head);
745
746 do
747 {
748 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
749
750 /* Make sure it died. The loop is most likely unnecessary. */
751 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
752 } while (pid > 0 && WIFSTOPPED (wstat));
753
754 return 0;
755 }
756
757 static int
758 linux_kill (int pid)
759 {
760 struct process_info *process;
761 struct lwp_info *lwp;
762 struct thread_info *thread;
763 int wstat;
764 int lwpid;
765
766 process = find_process_pid (pid);
767 if (process == NULL)
768 return -1;
769
770 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
771
772 /* See the comment in linux_kill_one_lwp. We did not kill the first
773 thread in the list, so do so now. */
774 lwp = find_lwp_pid (pid_to_ptid (pid));
775 thread = get_lwp_thread (lwp);
776
777 if (debug_threads)
778 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
779 lwpid_of (lwp), pid);
780
781 /* If we're killing a running inferior, make sure it is stopped
782 first, as PTRACE_KILL will not work otherwise. */
783 if (!lwp->stopped)
784 send_sigstop (&lwp->head);
785
786 do
787 {
788 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
789
790 /* Make sure it died. The loop is most likely unnecessary. */
791 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
792 } while (lwpid > 0 && WIFSTOPPED (wstat));
793
794 delete_lwp (lwp);
795
796 the_target->mourn (process);
797 return 0;
798 }
799
800 static int
801 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
802 {
803 struct thread_info *thread = (struct thread_info *) entry;
804 struct lwp_info *lwp = get_thread_lwp (thread);
805 int pid = * (int *) args;
806
807 if (ptid_get_pid (entry->id) != pid)
808 return 0;
809
810 /* If we're detaching from a running inferior, make sure it is
811 stopped first, as PTRACE_DETACH will not work otherwise. */
812 if (!lwp->stopped)
813 {
814 int lwpid = lwpid_of (lwp);
815
816 stopping_threads = 1;
817 send_sigstop (&lwp->head);
818
819 /* If this detects a new thread through a clone event, the new
820 thread is appended to the end of the lwp list, so we'll
821 eventually detach from it. */
822 wait_for_sigstop (&lwp->head);
823 stopping_threads = 0;
824
825 /* If LWP exits while we're trying to stop it, there's nothing
826 left to do. */
827 lwp = find_lwp_pid (pid_to_ptid (lwpid));
828 if (lwp == NULL)
829 return 0;
830 }
831
832 /* If this process is stopped but is expecting a SIGSTOP, then make
833 sure we take care of that now. This isn't absolutely guaranteed
834 to collect the SIGSTOP, but is fairly likely to. */
835 if (lwp->stop_expected)
836 {
837 int wstat;
838 /* Clear stop_expected, so that the SIGSTOP will be reported. */
839 lwp->stop_expected = 0;
840 if (lwp->stopped)
841 linux_resume_one_lwp (lwp, 0, 0, NULL);
842 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
843 }
844
845 /* Flush any pending changes to the process's registers. */
846 regcache_invalidate_one ((struct inferior_list_entry *)
847 get_lwp_thread (lwp));
848
849 /* Finally, let it resume. */
850 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
851
852 delete_lwp (lwp);
853 return 0;
854 }
855
856 static int
857 any_thread_of (struct inferior_list_entry *entry, void *args)
858 {
859 int *pid_p = args;
860
861 if (ptid_get_pid (entry->id) == *pid_p)
862 return 1;
863
864 return 0;
865 }
866
867 static int
868 linux_detach (int pid)
869 {
870 struct process_info *process;
871
872 process = find_process_pid (pid);
873 if (process == NULL)
874 return -1;
875
876 #ifdef USE_THREAD_DB
877 thread_db_detach (process);
878 #endif
879
880 current_inferior =
881 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
882
883 delete_all_breakpoints ();
884 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
885
886 the_target->mourn (process);
887 return 0;
888 }
889
890 static void
891 linux_mourn (struct process_info *process)
892 {
893 struct process_info_private *priv;
894
895 #ifdef USE_THREAD_DB
896 thread_db_mourn (process);
897 #endif
898
899 /* Freeing all private data. */
900 priv = process->private;
901 free (priv->arch_private);
902 free (priv);
903 process->private = NULL;
904 }
905
906 static void
907 linux_join (int pid)
908 {
909 int status, ret;
910 struct process_info *process;
911
912 process = find_process_pid (pid);
913 if (process == NULL)
914 return;
915
916 do {
917 ret = my_waitpid (pid, &status, 0);
918 if (WIFEXITED (status) || WIFSIGNALED (status))
919 break;
920 } while (ret != -1 || errno != ECHILD);
921 }
922
923 /* Return nonzero if the given thread is still alive. */
924 static int
925 linux_thread_alive (ptid_t ptid)
926 {
927 struct lwp_info *lwp = find_lwp_pid (ptid);
928
929 /* We assume we always know if a thread exits. If a whole process
930 exited but we still haven't been able to report it to GDB, we'll
931 hold on to the last lwp of the dead process. */
932 if (lwp != NULL)
933 return !lwp->dead;
934 else
935 return 0;
936 }
937
938 /* Return 1 if this lwp has an interesting status pending. */
939 static int
940 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
941 {
942 struct lwp_info *lwp = (struct lwp_info *) entry;
943 ptid_t ptid = * (ptid_t *) arg;
944 struct thread_info *thread = get_lwp_thread (lwp);
945
946 /* Check if we're only interested in events from a specific process
947 or its lwps. */
948 if (!ptid_equal (minus_one_ptid, ptid)
949 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
950 return 0;
951
952 thread = get_lwp_thread (lwp);
953
954 /* If we got a `vCont;t', but we haven't reported a stop yet, do
955 report any status pending the LWP may have. */
956 if (thread->last_resume_kind == resume_stop
957 && thread->last_status.kind == TARGET_WAITKIND_STOPPED)
958 return 0;
959
960 return lwp->status_pending_p;
961 }
962
963 static int
964 same_lwp (struct inferior_list_entry *entry, void *data)
965 {
966 ptid_t ptid = *(ptid_t *) data;
967 int lwp;
968
969 if (ptid_get_lwp (ptid) != 0)
970 lwp = ptid_get_lwp (ptid);
971 else
972 lwp = ptid_get_pid (ptid);
973
974 if (ptid_get_lwp (entry->id) == lwp)
975 return 1;
976
977 return 0;
978 }
979
980 struct lwp_info *
981 find_lwp_pid (ptid_t ptid)
982 {
983 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
984 }
985
986 static struct lwp_info *
987 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
988 {
989 int ret;
990 int to_wait_for = -1;
991 struct lwp_info *child = NULL;
992
993 if (debug_threads)
994 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
995
996 if (ptid_equal (ptid, minus_one_ptid))
997 to_wait_for = -1; /* any child */
998 else
999 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1000
1001 options |= __WALL;
1002
1003 retry:
1004
1005 ret = my_waitpid (to_wait_for, wstatp, options);
1006 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1007 return NULL;
1008 else if (ret == -1)
1009 perror_with_name ("waitpid");
1010
1011 if (debug_threads
1012 && (!WIFSTOPPED (*wstatp)
1013 || (WSTOPSIG (*wstatp) != 32
1014 && WSTOPSIG (*wstatp) != 33)))
1015 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1016
1017 child = find_lwp_pid (pid_to_ptid (ret));
1018
1019 /* If we didn't find a process, one of two things presumably happened:
1020 - A process we started and then detached from has exited. Ignore it.
1021 - A process we are controlling has forked and the new child's stop
1022 was reported to us by the kernel. Save its PID. */
1023 if (child == NULL && WIFSTOPPED (*wstatp))
1024 {
1025 add_pid_to_list (&stopped_pids, ret);
1026 goto retry;
1027 }
1028 else if (child == NULL)
1029 goto retry;
1030
1031 child->stopped = 1;
1032
1033 child->last_status = *wstatp;
1034
1035 /* Architecture-specific setup after inferior is running.
1036 This needs to happen after we have attached to the inferior
1037 and it is stopped for the first time, but before we access
1038 any inferior registers. */
1039 if (new_inferior)
1040 {
1041 the_low_target.arch_setup ();
1042 #ifdef HAVE_LINUX_REGSETS
1043 memset (disabled_regsets, 0, num_regsets);
1044 #endif
1045 new_inferior = 0;
1046 }
1047
1048 /* Fetch the possibly triggered data watchpoint info and store it in
1049 CHILD.
1050
1051 On some archs, like x86, that use debug registers to set
1052 watchpoints, it's possible that the way to know which watched
1053 address trapped, is to check the register that is used to select
1054 which address to watch. Problem is, between setting the
1055 watchpoint and reading back which data address trapped, the user
1056 may change the set of watchpoints, and, as a consequence, GDB
1057 changes the debug registers in the inferior. To avoid reading
1058 back a stale stopped-data-address when that happens, we cache in
1059 LP the fact that a watchpoint trapped, and the corresponding data
1060 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1061 changes the debug registers meanwhile, we have the cached data we
1062 can rely on. */
1063
1064 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1065 {
1066 if (the_low_target.stopped_by_watchpoint == NULL)
1067 {
1068 child->stopped_by_watchpoint = 0;
1069 }
1070 else
1071 {
1072 struct thread_info *saved_inferior;
1073
1074 saved_inferior = current_inferior;
1075 current_inferior = get_lwp_thread (child);
1076
1077 child->stopped_by_watchpoint
1078 = the_low_target.stopped_by_watchpoint ();
1079
1080 if (child->stopped_by_watchpoint)
1081 {
1082 if (the_low_target.stopped_data_address != NULL)
1083 child->stopped_data_address
1084 = the_low_target.stopped_data_address ();
1085 else
1086 child->stopped_data_address = 0;
1087 }
1088
1089 current_inferior = saved_inferior;
1090 }
1091 }
1092
1093 /* Store the STOP_PC, with adjustment applied. This depends on the
1094 architecture being defined already (so that CHILD has a valid
1095 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1096 not). */
1097 if (WIFSTOPPED (*wstatp))
1098 child->stop_pc = get_stop_pc (child);
1099
1100 if (debug_threads
1101 && WIFSTOPPED (*wstatp)
1102 && the_low_target.get_pc != NULL)
1103 {
1104 struct thread_info *saved_inferior = current_inferior;
1105 struct regcache *regcache;
1106 CORE_ADDR pc;
1107
1108 current_inferior = get_lwp_thread (child);
1109 regcache = get_thread_regcache (current_inferior, 1);
1110 pc = (*the_low_target.get_pc) (regcache);
1111 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1112 current_inferior = saved_inferior;
1113 }
1114
1115 return child;
1116 }
1117
1118 /* This function should only be called if the LWP got a SIGTRAP.
1119
1120 Handle any tracepoint steps or hits. Return true if a tracepoint
1121 event was handled, 0 otherwise. */
1122
1123 static int
1124 handle_tracepoints (struct lwp_info *lwp)
1125 {
1126 struct thread_info *tinfo = get_lwp_thread (lwp);
1127 int tpoint_related_event = 0;
1128
1129 /* And we need to be sure that any all-threads-stopping doesn't try
1130 to move threads out of the jump pads, as it could deadlock the
1131 inferior (LWP could be in the jump pad, maybe even holding the
1132 lock.) */
1133
1134 /* Do any necessary step collect actions. */
1135 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1136
1137 /* See if we just hit a tracepoint and do its main collect
1138 actions. */
1139 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1140
1141 if (tpoint_related_event)
1142 {
1143 if (debug_threads)
1144 fprintf (stderr, "got a tracepoint event\n");
1145 return 1;
1146 }
1147
1148 return 0;
1149 }
1150
1151 /* Arrange for a breakpoint to be hit again later. We don't keep the
1152 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1153 will handle the current event, eventually we will resume this LWP,
1154 and this breakpoint will trap again. */
1155
1156 static int
1157 cancel_breakpoint (struct lwp_info *lwp)
1158 {
1159 struct thread_info *saved_inferior;
1160
1161 /* There's nothing to do if we don't support breakpoints. */
1162 if (!supports_breakpoints ())
1163 return 0;
1164
1165 /* breakpoint_at reads from current inferior. */
1166 saved_inferior = current_inferior;
1167 current_inferior = get_lwp_thread (lwp);
1168
1169 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1170 {
1171 if (debug_threads)
1172 fprintf (stderr,
1173 "CB: Push back breakpoint for %s\n",
1174 target_pid_to_str (ptid_of (lwp)));
1175
1176 /* Back up the PC if necessary. */
1177 if (the_low_target.decr_pc_after_break)
1178 {
1179 struct regcache *regcache
1180 = get_thread_regcache (current_inferior, 1);
1181 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1182 }
1183
1184 current_inferior = saved_inferior;
1185 return 1;
1186 }
1187 else
1188 {
1189 if (debug_threads)
1190 fprintf (stderr,
1191 "CB: No breakpoint found at %s for [%s]\n",
1192 paddress (lwp->stop_pc),
1193 target_pid_to_str (ptid_of (lwp)));
1194 }
1195
1196 current_inferior = saved_inferior;
1197 return 0;
1198 }
1199
1200 /* When the event-loop is doing a step-over, this points at the thread
1201 being stepped. */
1202 ptid_t step_over_bkpt;
1203
1204 /* Wait for an event from child PID. If PID is -1, wait for any
1205 child. Store the stop status through the status pointer WSTAT.
1206 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1207 event was found and OPTIONS contains WNOHANG. Return the PID of
1208 the stopped child otherwise. */
1209
1210 static int
1211 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1212 {
1213 struct lwp_info *event_child, *requested_child;
1214
1215 event_child = NULL;
1216 requested_child = NULL;
1217
1218 /* Check for a lwp with a pending status. */
1219
1220 if (ptid_equal (ptid, minus_one_ptid)
1221 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1222 {
1223 event_child = (struct lwp_info *)
1224 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1225 if (debug_threads && event_child)
1226 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1227 }
1228 else
1229 {
1230 requested_child = find_lwp_pid (ptid);
1231
1232 if (requested_child->status_pending_p)
1233 event_child = requested_child;
1234 }
1235
1236 if (event_child != NULL)
1237 {
1238 if (debug_threads)
1239 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1240 lwpid_of (event_child), event_child->status_pending);
1241 *wstat = event_child->status_pending;
1242 event_child->status_pending_p = 0;
1243 event_child->status_pending = 0;
1244 current_inferior = get_lwp_thread (event_child);
1245 return lwpid_of (event_child);
1246 }
1247
1248 /* We only enter this loop if no process has a pending wait status. Thus
1249 any action taken in response to a wait status inside this loop is
1250 responding as soon as we detect the status, not after any pending
1251 events. */
1252 while (1)
1253 {
1254 event_child = linux_wait_for_lwp (ptid, wstat, options);
1255
1256 if ((options & WNOHANG) && event_child == NULL)
1257 {
1258 if (debug_threads)
1259 fprintf (stderr, "WNOHANG set, no event found\n");
1260 return 0;
1261 }
1262
1263 if (event_child == NULL)
1264 error ("event from unknown child");
1265
1266 current_inferior = get_lwp_thread (event_child);
1267
1268 /* Check for thread exit. */
1269 if (! WIFSTOPPED (*wstat))
1270 {
1271 if (debug_threads)
1272 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1273
1274 /* If the last thread is exiting, just return. */
1275 if (last_thread_of_process_p (current_inferior))
1276 {
1277 if (debug_threads)
1278 fprintf (stderr, "LWP %ld is last lwp of process\n",
1279 lwpid_of (event_child));
1280 return lwpid_of (event_child);
1281 }
1282
1283 if (!non_stop)
1284 {
1285 current_inferior = (struct thread_info *) all_threads.head;
1286 if (debug_threads)
1287 fprintf (stderr, "Current inferior is now %ld\n",
1288 lwpid_of (get_thread_lwp (current_inferior)));
1289 }
1290 else
1291 {
1292 current_inferior = NULL;
1293 if (debug_threads)
1294 fprintf (stderr, "Current inferior is now <NULL>\n");
1295 }
1296
1297 /* If we were waiting for this particular child to do something...
1298 well, it did something. */
1299 if (requested_child != NULL)
1300 {
1301 int lwpid = lwpid_of (event_child);
1302
1303 /* Cancel the step-over operation --- the thread that
1304 started it is gone. */
1305 if (finish_step_over (event_child))
1306 unstop_all_lwps (event_child);
1307 delete_lwp (event_child);
1308 return lwpid;
1309 }
1310
1311 delete_lwp (event_child);
1312
1313 /* Wait for a more interesting event. */
1314 continue;
1315 }
1316
1317 if (event_child->must_set_ptrace_flags)
1318 {
1319 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1320 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
1321 event_child->must_set_ptrace_flags = 0;
1322 }
1323
1324 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1325 && *wstat >> 16 != 0)
1326 {
1327 handle_extended_wait (event_child, *wstat);
1328 continue;
1329 }
1330
1331 /* If GDB is not interested in this signal, don't stop other
1332 threads, and don't report it to GDB. Just resume the
1333 inferior right away. We do this for threading-related
1334 signals as well as any that GDB specifically requested we
1335 ignore. But never ignore SIGSTOP if we sent it ourselves,
1336 and do not ignore signals when stepping - they may require
1337 special handling to skip the signal handler. */
1338 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1339 thread library? */
1340 if (WIFSTOPPED (*wstat)
1341 && !event_child->stepping
1342 && (
1343 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1344 (current_process ()->private->thread_db != NULL
1345 && (WSTOPSIG (*wstat) == __SIGRTMIN
1346 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1347 ||
1348 #endif
1349 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1350 && !(WSTOPSIG (*wstat) == SIGSTOP
1351 && event_child->stop_expected))))
1352 {
1353 siginfo_t info, *info_p;
1354
1355 if (debug_threads)
1356 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1357 WSTOPSIG (*wstat), lwpid_of (event_child));
1358
1359 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1360 info_p = &info;
1361 else
1362 info_p = NULL;
1363 linux_resume_one_lwp (event_child, event_child->stepping,
1364 WSTOPSIG (*wstat), info_p);
1365 continue;
1366 }
1367
1368 if (WIFSTOPPED (*wstat)
1369 && WSTOPSIG (*wstat) == SIGSTOP
1370 && event_child->stop_expected)
1371 {
1372 int should_stop;
1373
1374 if (debug_threads)
1375 fprintf (stderr, "Expected stop.\n");
1376 event_child->stop_expected = 0;
1377
1378 should_stop = (current_inferior->last_resume_kind == resume_stop
1379 || stopping_threads);
1380
1381 if (!should_stop)
1382 {
1383 linux_resume_one_lwp (event_child,
1384 event_child->stepping, 0, NULL);
1385 continue;
1386 }
1387 }
1388
1389 return lwpid_of (event_child);
1390 }
1391
1392 /* NOTREACHED */
1393 return 0;
1394 }
1395
1396 static int
1397 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1398 {
1399 ptid_t wait_ptid;
1400
1401 if (ptid_is_pid (ptid))
1402 {
1403 /* A request to wait for a specific tgid. This is not possible
1404 with waitpid, so instead, we wait for any child, and leave
1405 children we're not interested in right now with a pending
1406 status to report later. */
1407 wait_ptid = minus_one_ptid;
1408 }
1409 else
1410 wait_ptid = ptid;
1411
1412 while (1)
1413 {
1414 int event_pid;
1415
1416 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1417
1418 if (event_pid > 0
1419 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1420 {
1421 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1422
1423 if (! WIFSTOPPED (*wstat))
1424 mark_lwp_dead (event_child, *wstat);
1425 else
1426 {
1427 event_child->status_pending_p = 1;
1428 event_child->status_pending = *wstat;
1429 }
1430 }
1431 else
1432 return event_pid;
1433 }
1434 }
1435
1436
1437 /* Count the LWP's that have had events. */
1438
1439 static int
1440 count_events_callback (struct inferior_list_entry *entry, void *data)
1441 {
1442 struct lwp_info *lp = (struct lwp_info *) entry;
1443 struct thread_info *thread = get_lwp_thread (lp);
1444 int *count = data;
1445
1446 gdb_assert (count != NULL);
1447
1448 /* Count only resumed LWPs that have a SIGTRAP event pending that
1449 should be reported to GDB. */
1450 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1451 && thread->last_resume_kind != resume_stop
1452 && lp->status_pending_p
1453 && WIFSTOPPED (lp->status_pending)
1454 && WSTOPSIG (lp->status_pending) == SIGTRAP
1455 && !breakpoint_inserted_here (lp->stop_pc))
1456 (*count)++;
1457
1458 return 0;
1459 }
1460
1461 /* Select the LWP (if any) that is currently being single-stepped. */
1462
1463 static int
1464 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1465 {
1466 struct lwp_info *lp = (struct lwp_info *) entry;
1467 struct thread_info *thread = get_lwp_thread (lp);
1468
1469 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1470 && thread->last_resume_kind == resume_step
1471 && lp->status_pending_p)
1472 return 1;
1473 else
1474 return 0;
1475 }
1476
1477 /* Select the Nth LWP that has had a SIGTRAP event that should be
1478 reported to GDB. */
1479
1480 static int
1481 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1482 {
1483 struct lwp_info *lp = (struct lwp_info *) entry;
1484 struct thread_info *thread = get_lwp_thread (lp);
1485 int *selector = data;
1486
1487 gdb_assert (selector != NULL);
1488
1489 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1490 if (thread->last_resume_kind != resume_stop
1491 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1492 && lp->status_pending_p
1493 && WIFSTOPPED (lp->status_pending)
1494 && WSTOPSIG (lp->status_pending) == SIGTRAP
1495 && !breakpoint_inserted_here (lp->stop_pc))
1496 if ((*selector)-- == 0)
1497 return 1;
1498
1499 return 0;
1500 }
1501
1502 static int
1503 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1504 {
1505 struct lwp_info *lp = (struct lwp_info *) entry;
1506 struct thread_info *thread = get_lwp_thread (lp);
1507 struct lwp_info *event_lp = data;
1508
1509 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1510 if (lp == event_lp)
1511 return 0;
1512
1513 /* If a LWP other than the LWP that we're reporting an event for has
1514 hit a GDB breakpoint (as opposed to some random trap signal),
1515 then just arrange for it to hit it again later. We don't keep
1516 the SIGTRAP status and don't forward the SIGTRAP signal to the
1517 LWP. We will handle the current event, eventually we will resume
1518 all LWPs, and this one will get its breakpoint trap again.
1519
1520 If we do not do this, then we run the risk that the user will
1521 delete or disable the breakpoint, but the LWP will have already
1522 tripped on it. */
1523
1524 if (thread->last_resume_kind != resume_stop
1525 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1526 && lp->status_pending_p
1527 && WIFSTOPPED (lp->status_pending)
1528 && WSTOPSIG (lp->status_pending) == SIGTRAP
1529 && !lp->stepping
1530 && !lp->stopped_by_watchpoint
1531 && cancel_breakpoint (lp))
1532 /* Throw away the SIGTRAP. */
1533 lp->status_pending_p = 0;
1534
1535 return 0;
1536 }
1537
1538 /* Select one LWP out of those that have events pending. */
1539
1540 static void
1541 select_event_lwp (struct lwp_info **orig_lp)
1542 {
1543 int num_events = 0;
1544 int random_selector;
1545 struct lwp_info *event_lp;
1546
1547 /* Give preference to any LWP that is being single-stepped. */
1548 event_lp
1549 = (struct lwp_info *) find_inferior (&all_lwps,
1550 select_singlestep_lwp_callback, NULL);
1551 if (event_lp != NULL)
1552 {
1553 if (debug_threads)
1554 fprintf (stderr,
1555 "SEL: Select single-step %s\n",
1556 target_pid_to_str (ptid_of (event_lp)));
1557 }
1558 else
1559 {
1560 /* No single-stepping LWP. Select one at random, out of those
1561 which have had SIGTRAP events. */
1562
1563 /* First see how many SIGTRAP events we have. */
1564 find_inferior (&all_lwps, count_events_callback, &num_events);
1565
1566 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1567 random_selector = (int)
1568 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1569
1570 if (debug_threads && num_events > 1)
1571 fprintf (stderr,
1572 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1573 num_events, random_selector);
1574
1575 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1576 select_event_lwp_callback,
1577 &random_selector);
1578 }
1579
1580 if (event_lp != NULL)
1581 {
1582 /* Switch the event LWP. */
1583 *orig_lp = event_lp;
1584 }
1585 }
1586
1587 /* Set this inferior LWP's state as "want-stopped". We won't resume
1588 this LWP until the client gives us another action for it. */
1589
1590 static void
1591 gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1592 {
1593 struct lwp_info *lwp = (struct lwp_info *) entry;
1594 struct thread_info *thread = get_lwp_thread (lwp);
1595
1596 /* Most threads are stopped implicitly (all-stop); tag that with
1597 signal 0. The thread being explicitly reported stopped to the
1598 client, gets it's status fixed up afterwards. */
1599 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1600 thread->last_status.value.sig = TARGET_SIGNAL_0;
1601
1602 thread->last_resume_kind = resume_stop;
1603 }
1604
1605 /* Set all LWP's states as "want-stopped". */
1606
1607 static void
1608 gdb_wants_all_stopped (void)
1609 {
1610 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1611 }
1612
1613 /* Wait for process, returns status. */
1614
1615 static ptid_t
1616 linux_wait_1 (ptid_t ptid,
1617 struct target_waitstatus *ourstatus, int target_options)
1618 {
1619 int w;
1620 struct lwp_info *event_child;
1621 int options;
1622 int pid;
1623 int step_over_finished;
1624 int bp_explains_trap;
1625 int maybe_internal_trap;
1626 int report_to_gdb;
1627 int trace_event;
1628
1629 /* Translate generic target options into linux options. */
1630 options = __WALL;
1631 if (target_options & TARGET_WNOHANG)
1632 options |= WNOHANG;
1633
1634 retry:
1635 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1636
1637 /* If we were only supposed to resume one thread, only wait for
1638 that thread - if it's still alive. If it died, however - which
1639 can happen if we're coming from the thread death case below -
1640 then we need to make sure we restart the other threads. We could
1641 pick a thread at random or restart all; restarting all is less
1642 arbitrary. */
1643 if (!non_stop
1644 && !ptid_equal (cont_thread, null_ptid)
1645 && !ptid_equal (cont_thread, minus_one_ptid))
1646 {
1647 struct thread_info *thread;
1648
1649 thread = (struct thread_info *) find_inferior_id (&all_threads,
1650 cont_thread);
1651
1652 /* No stepping, no signal - unless one is pending already, of course. */
1653 if (thread == NULL)
1654 {
1655 struct thread_resume resume_info;
1656 resume_info.thread = minus_one_ptid;
1657 resume_info.kind = resume_continue;
1658 resume_info.sig = 0;
1659 linux_resume (&resume_info, 1);
1660 }
1661 else
1662 ptid = cont_thread;
1663 }
1664
1665 if (ptid_equal (step_over_bkpt, null_ptid))
1666 pid = linux_wait_for_event (ptid, &w, options);
1667 else
1668 {
1669 if (debug_threads)
1670 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1671 target_pid_to_str (step_over_bkpt));
1672 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1673 }
1674
1675 if (pid == 0) /* only if TARGET_WNOHANG */
1676 return null_ptid;
1677
1678 event_child = get_thread_lwp (current_inferior);
1679
1680 /* If we are waiting for a particular child, and it exited,
1681 linux_wait_for_event will return its exit status. Similarly if
1682 the last child exited. If this is not the last child, however,
1683 do not report it as exited until there is a 'thread exited' response
1684 available in the remote protocol. Instead, just wait for another event.
1685 This should be safe, because if the thread crashed we will already
1686 have reported the termination signal to GDB; that should stop any
1687 in-progress stepping operations, etc.
1688
1689 Report the exit status of the last thread to exit. This matches
1690 LinuxThreads' behavior. */
1691
1692 if (last_thread_of_process_p (current_inferior))
1693 {
1694 if (WIFEXITED (w) || WIFSIGNALED (w))
1695 {
1696 delete_lwp (event_child);
1697
1698 current_inferior = NULL;
1699
1700 if (WIFEXITED (w))
1701 {
1702 ourstatus->kind = TARGET_WAITKIND_EXITED;
1703 ourstatus->value.integer = WEXITSTATUS (w);
1704
1705 if (debug_threads)
1706 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1707 }
1708 else
1709 {
1710 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1711 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1712
1713 if (debug_threads)
1714 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1715
1716 }
1717
1718 return pid_to_ptid (pid);
1719 }
1720 }
1721 else
1722 {
1723 if (!WIFSTOPPED (w))
1724 goto retry;
1725 }
1726
1727 /* If this event was not handled before, and is not a SIGTRAP, we
1728 report it. SIGILL and SIGSEGV are also treated as traps in case
1729 a breakpoint is inserted at the current PC. If this target does
1730 not support internal breakpoints at all, we also report the
1731 SIGTRAP without further processing; it's of no concern to us. */
1732 maybe_internal_trap
1733 = (supports_breakpoints ()
1734 && (WSTOPSIG (w) == SIGTRAP
1735 || ((WSTOPSIG (w) == SIGILL
1736 || WSTOPSIG (w) == SIGSEGV)
1737 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1738
1739 if (maybe_internal_trap)
1740 {
1741 /* Handle anything that requires bookkeeping before deciding to
1742 report the event or continue waiting. */
1743
1744 /* First check if we can explain the SIGTRAP with an internal
1745 breakpoint, or if we should possibly report the event to GDB.
1746 Do this before anything that may remove or insert a
1747 breakpoint. */
1748 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1749
1750 /* We have a SIGTRAP, possibly a step-over dance has just
1751 finished. If so, tweak the state machine accordingly,
1752 reinsert breakpoints and delete any reinsert (software
1753 single-step) breakpoints. */
1754 step_over_finished = finish_step_over (event_child);
1755
1756 /* Now invoke the callbacks of any internal breakpoints there. */
1757 check_breakpoints (event_child->stop_pc);
1758
1759 /* Handle tracepoint data collecting. This may overflow the
1760 trace buffer, and cause a tracing stop, removing
1761 breakpoints. */
1762 trace_event = handle_tracepoints (event_child);
1763
1764 if (bp_explains_trap)
1765 {
1766 /* If we stepped or ran into an internal breakpoint, we've
1767 already handled it. So next time we resume (from this
1768 PC), we should step over it. */
1769 if (debug_threads)
1770 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1771
1772 if (breakpoint_here (event_child->stop_pc))
1773 event_child->need_step_over = 1;
1774 }
1775 }
1776 else
1777 {
1778 /* We have some other signal, possibly a step-over dance was in
1779 progress, and it should be cancelled too. */
1780 step_over_finished = finish_step_over (event_child);
1781
1782 trace_event = 0;
1783 }
1784
1785 /* We have all the data we need. Either report the event to GDB, or
1786 resume threads and keep waiting for more. */
1787
1788 /* Check If GDB would be interested in this event. If GDB wanted
1789 this thread to single step, we always want to report the SIGTRAP,
1790 and let GDB handle it. Watchpoints should always be reported.
1791 So should signals we can't explain. A SIGTRAP we can't explain
1792 could be a GDB breakpoint --- we may or not support Z0
1793 breakpoints. If we do, we're be able to handle GDB breakpoints
1794 on top of internal breakpoints, by handling the internal
1795 breakpoint and still reporting the event to GDB. If we don't,
1796 we're out of luck, GDB won't see the breakpoint hit. */
1797 report_to_gdb = (!maybe_internal_trap
1798 || current_inferior->last_resume_kind == resume_step
1799 || event_child->stopped_by_watchpoint
1800 || (!step_over_finished && !bp_explains_trap && !trace_event)
1801 || gdb_breakpoint_here (event_child->stop_pc));
1802
1803 /* We found no reason GDB would want us to stop. We either hit one
1804 of our own breakpoints, or finished an internal step GDB
1805 shouldn't know about. */
1806 if (!report_to_gdb)
1807 {
1808 if (debug_threads)
1809 {
1810 if (bp_explains_trap)
1811 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1812 if (step_over_finished)
1813 fprintf (stderr, "Step-over finished.\n");
1814 if (trace_event)
1815 fprintf (stderr, "Tracepoint event.\n");
1816 }
1817
1818 /* We're not reporting this breakpoint to GDB, so apply the
1819 decr_pc_after_break adjustment to the inferior's regcache
1820 ourselves. */
1821
1822 if (the_low_target.set_pc != NULL)
1823 {
1824 struct regcache *regcache
1825 = get_thread_regcache (get_lwp_thread (event_child), 1);
1826 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1827 }
1828
1829 /* We've finished stepping over a breakpoint. We've stopped all
1830 LWPs momentarily except the stepping one. This is where we
1831 resume them all again. We're going to keep waiting, so use
1832 proceed, which handles stepping over the next breakpoint. */
1833 if (debug_threads)
1834 fprintf (stderr, "proceeding all threads.\n");
1835 proceed_all_lwps ();
1836 goto retry;
1837 }
1838
1839 if (debug_threads)
1840 {
1841 if (current_inferior->last_resume_kind == resume_step)
1842 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1843 if (event_child->stopped_by_watchpoint)
1844 fprintf (stderr, "Stopped by watchpoint.\n");
1845 if (gdb_breakpoint_here (event_child->stop_pc))
1846 fprintf (stderr, "Stopped by GDB breakpoint.\n");
1847 if (debug_threads)
1848 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1849 }
1850
1851 /* Alright, we're going to report a stop. */
1852
1853 if (!non_stop)
1854 {
1855 /* In all-stop, stop all threads. */
1856 stop_all_lwps ();
1857
1858 /* If we're not waiting for a specific LWP, choose an event LWP
1859 from among those that have had events. Giving equal priority
1860 to all LWPs that have had events helps prevent
1861 starvation. */
1862 if (ptid_equal (ptid, minus_one_ptid))
1863 {
1864 event_child->status_pending_p = 1;
1865 event_child->status_pending = w;
1866
1867 select_event_lwp (&event_child);
1868
1869 event_child->status_pending_p = 0;
1870 w = event_child->status_pending;
1871 }
1872
1873 /* Now that we've selected our final event LWP, cancel any
1874 breakpoints in other LWPs that have hit a GDB breakpoint.
1875 See the comment in cancel_breakpoints_callback to find out
1876 why. */
1877 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1878 }
1879 else
1880 {
1881 /* If we just finished a step-over, then all threads had been
1882 momentarily paused. In all-stop, that's fine, we want
1883 threads stopped by now anyway. In non-stop, we need to
1884 re-resume threads that GDB wanted to be running. */
1885 if (step_over_finished)
1886 unstop_all_lwps (event_child);
1887 }
1888
1889 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1890
1891 /* Do this before the gdb_wants_all_stopped calls below, since they
1892 always set last_resume_kind to resume_stop. */
1893 if (current_inferior->last_resume_kind == resume_stop
1894 && WSTOPSIG (w) == SIGSTOP)
1895 {
1896 /* A thread that has been requested to stop by GDB with vCont;t,
1897 and it stopped cleanly, so report as SIG0. The use of
1898 SIGSTOP is an implementation detail. */
1899 ourstatus->value.sig = TARGET_SIGNAL_0;
1900 }
1901 else if (current_inferior->last_resume_kind == resume_stop
1902 && WSTOPSIG (w) != SIGSTOP)
1903 {
1904 /* A thread that has been requested to stop by GDB with vCont;t,
1905 but, it stopped for other reasons. */
1906 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1907 }
1908 else
1909 {
1910 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1911 }
1912
1913 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1914
1915 if (!non_stop)
1916 {
1917 /* From GDB's perspective, all-stop mode always stops all
1918 threads implicitly. Tag all threads as "want-stopped". */
1919 gdb_wants_all_stopped ();
1920 }
1921 else
1922 {
1923 /* We're reporting this LWP as stopped. Update it's
1924 "want-stopped" state to what the client wants, until it gets
1925 a new resume action. */
1926 gdb_wants_lwp_stopped (&event_child->head);
1927 }
1928
1929 if (debug_threads)
1930 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1931 target_pid_to_str (ptid_of (event_child)),
1932 ourstatus->kind,
1933 ourstatus->value.sig);
1934
1935 get_lwp_thread (event_child)->last_status = *ourstatus;
1936 return ptid_of (event_child);
1937 }
1938
1939 /* Get rid of any pending event in the pipe. */
1940 static void
1941 async_file_flush (void)
1942 {
1943 int ret;
1944 char buf;
1945
1946 do
1947 ret = read (linux_event_pipe[0], &buf, 1);
1948 while (ret >= 0 || (ret == -1 && errno == EINTR));
1949 }
1950
1951 /* Put something in the pipe, so the event loop wakes up. */
1952 static void
1953 async_file_mark (void)
1954 {
1955 int ret;
1956
1957 async_file_flush ();
1958
1959 do
1960 ret = write (linux_event_pipe[1], "+", 1);
1961 while (ret == 0 || (ret == -1 && errno == EINTR));
1962
1963 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1964 be awakened anyway. */
1965 }
1966
1967 static ptid_t
1968 linux_wait (ptid_t ptid,
1969 struct target_waitstatus *ourstatus, int target_options)
1970 {
1971 ptid_t event_ptid;
1972
1973 if (debug_threads)
1974 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1975
1976 /* Flush the async file first. */
1977 if (target_is_async_p ())
1978 async_file_flush ();
1979
1980 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1981
1982 /* If at least one stop was reported, there may be more. A single
1983 SIGCHLD can signal more than one child stop. */
1984 if (target_is_async_p ()
1985 && (target_options & TARGET_WNOHANG) != 0
1986 && !ptid_equal (event_ptid, null_ptid))
1987 async_file_mark ();
1988
1989 return event_ptid;
1990 }
1991
1992 /* Send a signal to an LWP. */
1993
1994 static int
1995 kill_lwp (unsigned long lwpid, int signo)
1996 {
1997 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1998 fails, then we are not using nptl threads and we should be using kill. */
1999
2000 #ifdef __NR_tkill
2001 {
2002 static int tkill_failed;
2003
2004 if (!tkill_failed)
2005 {
2006 int ret;
2007
2008 errno = 0;
2009 ret = syscall (__NR_tkill, lwpid, signo);
2010 if (errno != ENOSYS)
2011 return ret;
2012 tkill_failed = 1;
2013 }
2014 }
2015 #endif
2016
2017 return kill (lwpid, signo);
2018 }
2019
2020 static void
2021 send_sigstop (struct inferior_list_entry *entry)
2022 {
2023 struct lwp_info *lwp = (struct lwp_info *) entry;
2024 int pid;
2025
2026 if (lwp->stopped)
2027 return;
2028
2029 pid = lwpid_of (lwp);
2030
2031 /* If we already have a pending stop signal for this process, don't
2032 send another. */
2033 if (lwp->stop_expected)
2034 {
2035 if (debug_threads)
2036 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2037
2038 return;
2039 }
2040
2041 if (debug_threads)
2042 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2043
2044 lwp->stop_expected = 1;
2045 kill_lwp (pid, SIGSTOP);
2046 }
2047
2048 static void
2049 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2050 {
2051 /* It's dead, really. */
2052 lwp->dead = 1;
2053
2054 /* Store the exit status for later. */
2055 lwp->status_pending_p = 1;
2056 lwp->status_pending = wstat;
2057
2058 /* Prevent trying to stop it. */
2059 lwp->stopped = 1;
2060
2061 /* No further stops are expected from a dead lwp. */
2062 lwp->stop_expected = 0;
2063 }
2064
2065 static void
2066 wait_for_sigstop (struct inferior_list_entry *entry)
2067 {
2068 struct lwp_info *lwp = (struct lwp_info *) entry;
2069 struct thread_info *saved_inferior;
2070 int wstat;
2071 ptid_t saved_tid;
2072 ptid_t ptid;
2073 int pid;
2074
2075 if (lwp->stopped)
2076 {
2077 if (debug_threads)
2078 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2079 lwpid_of (lwp));
2080 return;
2081 }
2082
2083 saved_inferior = current_inferior;
2084 if (saved_inferior != NULL)
2085 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2086 else
2087 saved_tid = null_ptid; /* avoid bogus unused warning */
2088
2089 ptid = lwp->head.id;
2090
2091 if (debug_threads)
2092 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2093
2094 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2095
2096 /* If we stopped with a non-SIGSTOP signal, save it for later
2097 and record the pending SIGSTOP. If the process exited, just
2098 return. */
2099 if (WIFSTOPPED (wstat))
2100 {
2101 if (debug_threads)
2102 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2103 lwpid_of (lwp), WSTOPSIG (wstat));
2104
2105 if (WSTOPSIG (wstat) != SIGSTOP)
2106 {
2107 if (debug_threads)
2108 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2109 lwpid_of (lwp), wstat);
2110
2111 lwp->status_pending_p = 1;
2112 lwp->status_pending = wstat;
2113 }
2114 }
2115 else
2116 {
2117 if (debug_threads)
2118 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2119
2120 lwp = find_lwp_pid (pid_to_ptid (pid));
2121 if (lwp)
2122 {
2123 /* Leave this status pending for the next time we're able to
2124 report it. In the mean time, we'll report this lwp as
2125 dead to GDB, so GDB doesn't try to read registers and
2126 memory from it. This can only happen if this was the
2127 last thread of the process; otherwise, PID is removed
2128 from the thread tables before linux_wait_for_event
2129 returns. */
2130 mark_lwp_dead (lwp, wstat);
2131 }
2132 }
2133
2134 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2135 current_inferior = saved_inferior;
2136 else
2137 {
2138 if (debug_threads)
2139 fprintf (stderr, "Previously current thread died.\n");
2140
2141 if (non_stop)
2142 {
2143 /* We can't change the current inferior behind GDB's back,
2144 otherwise, a subsequent command may apply to the wrong
2145 process. */
2146 current_inferior = NULL;
2147 }
2148 else
2149 {
2150 /* Set a valid thread as current. */
2151 set_desired_inferior (0);
2152 }
2153 }
2154 }
2155
2156 static void
2157 stop_all_lwps (void)
2158 {
2159 stopping_threads = 1;
2160 for_each_inferior (&all_lwps, send_sigstop);
2161 for_each_inferior (&all_lwps, wait_for_sigstop);
2162 stopping_threads = 0;
2163 }
2164
2165 /* Resume execution of the inferior process.
2166 If STEP is nonzero, single-step it.
2167 If SIGNAL is nonzero, give it that signal. */
2168
2169 static void
2170 linux_resume_one_lwp (struct lwp_info *lwp,
2171 int step, int signal, siginfo_t *info)
2172 {
2173 struct thread_info *saved_inferior;
2174
2175 if (lwp->stopped == 0)
2176 return;
2177
2178 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2179 user used the "jump" command, or "set $pc = foo"). */
2180 if (lwp->stop_pc != get_pc (lwp))
2181 {
2182 /* Collecting 'while-stepping' actions doesn't make sense
2183 anymore. */
2184 release_while_stepping_state_list (get_lwp_thread (lwp));
2185 }
2186
2187 /* If we have pending signals or status, and a new signal, enqueue the
2188 signal. Also enqueue the signal if we are waiting to reinsert a
2189 breakpoint; it will be picked up again below. */
2190 if (signal != 0
2191 && (lwp->status_pending_p || lwp->pending_signals != NULL
2192 || lwp->bp_reinsert != 0))
2193 {
2194 struct pending_signals *p_sig;
2195 p_sig = xmalloc (sizeof (*p_sig));
2196 p_sig->prev = lwp->pending_signals;
2197 p_sig->signal = signal;
2198 if (info == NULL)
2199 memset (&p_sig->info, 0, sizeof (siginfo_t));
2200 else
2201 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2202 lwp->pending_signals = p_sig;
2203 }
2204
2205 if (lwp->status_pending_p)
2206 {
2207 if (debug_threads)
2208 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2209 " has pending status\n",
2210 lwpid_of (lwp), step ? "step" : "continue", signal,
2211 lwp->stop_expected ? "expected" : "not expected");
2212 return;
2213 }
2214
2215 saved_inferior = current_inferior;
2216 current_inferior = get_lwp_thread (lwp);
2217
2218 if (debug_threads)
2219 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2220 lwpid_of (lwp), step ? "step" : "continue", signal,
2221 lwp->stop_expected ? "expected" : "not expected");
2222
2223 /* This bit needs some thinking about. If we get a signal that
2224 we must report while a single-step reinsert is still pending,
2225 we often end up resuming the thread. It might be better to
2226 (ew) allow a stack of pending events; then we could be sure that
2227 the reinsert happened right away and not lose any signals.
2228
2229 Making this stack would also shrink the window in which breakpoints are
2230 uninserted (see comment in linux_wait_for_lwp) but not enough for
2231 complete correctness, so it won't solve that problem. It may be
2232 worthwhile just to solve this one, however. */
2233 if (lwp->bp_reinsert != 0)
2234 {
2235 if (debug_threads)
2236 fprintf (stderr, " pending reinsert at 0x%s\n",
2237 paddress (lwp->bp_reinsert));
2238
2239 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2240 {
2241 if (step == 0)
2242 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2243
2244 step = 1;
2245 }
2246
2247 /* Postpone any pending signal. It was enqueued above. */
2248 signal = 0;
2249 }
2250
2251 /* If we have while-stepping actions in this thread set it stepping.
2252 If we have a signal to deliver, it may or may not be set to
2253 SIG_IGN, we don't know. Assume so, and allow collecting
2254 while-stepping into a signal handler. A possible smart thing to
2255 do would be to set an internal breakpoint at the signal return
2256 address, continue, and carry on catching this while-stepping
2257 action only when that breakpoint is hit. A future
2258 enhancement. */
2259 if (get_lwp_thread (lwp)->while_stepping != NULL
2260 && can_hardware_single_step ())
2261 {
2262 if (debug_threads)
2263 fprintf (stderr,
2264 "lwp %ld has a while-stepping action -> forcing step.\n",
2265 lwpid_of (lwp));
2266 step = 1;
2267 }
2268
2269 if (debug_threads && the_low_target.get_pc != NULL)
2270 {
2271 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2272 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2273 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2274 }
2275
2276 /* If we have pending signals, consume one unless we are trying to reinsert
2277 a breakpoint. */
2278 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
2279 {
2280 struct pending_signals **p_sig;
2281
2282 p_sig = &lwp->pending_signals;
2283 while ((*p_sig)->prev != NULL)
2284 p_sig = &(*p_sig)->prev;
2285
2286 signal = (*p_sig)->signal;
2287 if ((*p_sig)->info.si_signo != 0)
2288 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2289
2290 free (*p_sig);
2291 *p_sig = NULL;
2292 }
2293
2294 if (the_low_target.prepare_to_resume != NULL)
2295 the_low_target.prepare_to_resume (lwp);
2296
2297 regcache_invalidate_one ((struct inferior_list_entry *)
2298 get_lwp_thread (lwp));
2299 errno = 0;
2300 lwp->stopped = 0;
2301 lwp->stopped_by_watchpoint = 0;
2302 lwp->stepping = step;
2303 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2304 /* Coerce to a uintptr_t first to avoid potential gcc warning
2305 of coercing an 8 byte integer to a 4 byte pointer. */
2306 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2307
2308 current_inferior = saved_inferior;
2309 if (errno)
2310 {
2311 /* ESRCH from ptrace either means that the thread was already
2312 running (an error) or that it is gone (a race condition). If
2313 it's gone, we will get a notification the next time we wait,
2314 so we can ignore the error. We could differentiate these
2315 two, but it's tricky without waiting; the thread still exists
2316 as a zombie, so sending it signal 0 would succeed. So just
2317 ignore ESRCH. */
2318 if (errno == ESRCH)
2319 return;
2320
2321 perror_with_name ("ptrace");
2322 }
2323 }
2324
2325 struct thread_resume_array
2326 {
2327 struct thread_resume *resume;
2328 size_t n;
2329 };
2330
2331 /* This function is called once per thread. We look up the thread
2332 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2333 resume request.
2334
2335 This algorithm is O(threads * resume elements), but resume elements
2336 is small (and will remain small at least until GDB supports thread
2337 suspension). */
2338 static int
2339 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
2340 {
2341 struct lwp_info *lwp;
2342 struct thread_info *thread;
2343 int ndx;
2344 struct thread_resume_array *r;
2345
2346 thread = (struct thread_info *) entry;
2347 lwp = get_thread_lwp (thread);
2348 r = arg;
2349
2350 for (ndx = 0; ndx < r->n; ndx++)
2351 {
2352 ptid_t ptid = r->resume[ndx].thread;
2353 if (ptid_equal (ptid, minus_one_ptid)
2354 || ptid_equal (ptid, entry->id)
2355 || (ptid_is_pid (ptid)
2356 && (ptid_get_pid (ptid) == pid_of (lwp)))
2357 || (ptid_get_lwp (ptid) == -1
2358 && (ptid_get_pid (ptid) == pid_of (lwp))))
2359 {
2360 if (r->resume[ndx].kind == resume_stop
2361 && thread->last_resume_kind == resume_stop)
2362 {
2363 if (debug_threads)
2364 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2365 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2366 ? "stopped"
2367 : "stopping",
2368 lwpid_of (lwp));
2369
2370 continue;
2371 }
2372
2373 lwp->resume = &r->resume[ndx];
2374 thread->last_resume_kind = lwp->resume->kind;
2375 return 0;
2376 }
2377 }
2378
2379 /* No resume action for this thread. */
2380 lwp->resume = NULL;
2381
2382 return 0;
2383 }
2384
2385
2386 /* Set *FLAG_P if this lwp has an interesting status pending. */
2387 static int
2388 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
2389 {
2390 struct lwp_info *lwp = (struct lwp_info *) entry;
2391
2392 /* LWPs which will not be resumed are not interesting, because
2393 we might not wait for them next time through linux_wait. */
2394 if (lwp->resume == NULL)
2395 return 0;
2396
2397 if (lwp->status_pending_p)
2398 * (int *) flag_p = 1;
2399
2400 return 0;
2401 }
2402
2403 /* Return 1 if this lwp that GDB wants running is stopped at an
2404 internal breakpoint that we need to step over. It assumes that any
2405 required STOP_PC adjustment has already been propagated to the
2406 inferior's regcache. */
2407
2408 static int
2409 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2410 {
2411 struct lwp_info *lwp = (struct lwp_info *) entry;
2412 struct thread_info *thread;
2413 struct thread_info *saved_inferior;
2414 CORE_ADDR pc;
2415
2416 /* LWPs which will not be resumed are not interesting, because we
2417 might not wait for them next time through linux_wait. */
2418
2419 if (!lwp->stopped)
2420 {
2421 if (debug_threads)
2422 fprintf (stderr,
2423 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2424 lwpid_of (lwp));
2425 return 0;
2426 }
2427
2428 thread = get_lwp_thread (lwp);
2429
2430 if (thread->last_resume_kind == resume_stop)
2431 {
2432 if (debug_threads)
2433 fprintf (stderr,
2434 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2435 lwpid_of (lwp));
2436 return 0;
2437 }
2438
2439 if (!lwp->need_step_over)
2440 {
2441 if (debug_threads)
2442 fprintf (stderr,
2443 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2444 }
2445
2446 if (lwp->status_pending_p)
2447 {
2448 if (debug_threads)
2449 fprintf (stderr,
2450 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2451 lwpid_of (lwp));
2452 return 0;
2453 }
2454
2455 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2456 or we have. */
2457 pc = get_pc (lwp);
2458
2459 /* If the PC has changed since we stopped, then don't do anything,
2460 and let the breakpoint/tracepoint be hit. This happens if, for
2461 instance, GDB handled the decr_pc_after_break subtraction itself,
2462 GDB is OOL stepping this thread, or the user has issued a "jump"
2463 command, or poked thread's registers herself. */
2464 if (pc != lwp->stop_pc)
2465 {
2466 if (debug_threads)
2467 fprintf (stderr,
2468 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2469 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2470 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2471
2472 lwp->need_step_over = 0;
2473 return 0;
2474 }
2475
2476 saved_inferior = current_inferior;
2477 current_inferior = thread;
2478
2479 /* We can only step over breakpoints we know about. */
2480 if (breakpoint_here (pc))
2481 {
2482 /* Don't step over a breakpoint that GDB expects to hit
2483 though. */
2484 if (gdb_breakpoint_here (pc))
2485 {
2486 if (debug_threads)
2487 fprintf (stderr,
2488 "Need step over [LWP %ld]? yes, but found"
2489 " GDB breakpoint at 0x%s; skipping step over\n",
2490 lwpid_of (lwp), paddress (pc));
2491
2492 current_inferior = saved_inferior;
2493 return 0;
2494 }
2495 else
2496 {
2497 if (debug_threads)
2498 fprintf (stderr,
2499 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2500 lwpid_of (lwp), paddress (pc));
2501
2502 /* We've found an lwp that needs stepping over --- return 1 so
2503 that find_inferior stops looking. */
2504 current_inferior = saved_inferior;
2505
2506 /* If the step over is cancelled, this is set again. */
2507 lwp->need_step_over = 0;
2508 return 1;
2509 }
2510 }
2511
2512 current_inferior = saved_inferior;
2513
2514 if (debug_threads)
2515 fprintf (stderr,
2516 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2517 lwpid_of (lwp), paddress (pc));
2518
2519 return 0;
2520 }
2521
2522 /* Start a step-over operation on LWP. When LWP stopped at a
2523 breakpoint, to make progress, we need to remove the breakpoint out
2524 of the way. If we let other threads run while we do that, they may
2525 pass by the breakpoint location and miss hitting it. To avoid
2526 that, a step-over momentarily stops all threads while LWP is
2527 single-stepped while the breakpoint is temporarily uninserted from
2528 the inferior. When the single-step finishes, we reinsert the
2529 breakpoint, and let all threads that are supposed to be running,
2530 run again.
2531
2532 On targets that don't support hardware single-step, we don't
2533 currently support full software single-stepping. Instead, we only
2534 support stepping over the thread event breakpoint, by asking the
2535 low target where to place a reinsert breakpoint. Since this
2536 routine assumes the breakpoint being stepped over is a thread event
2537 breakpoint, it usually assumes the return address of the current
2538 function is a good enough place to set the reinsert breakpoint. */
2539
2540 static int
2541 start_step_over (struct lwp_info *lwp)
2542 {
2543 struct thread_info *saved_inferior;
2544 CORE_ADDR pc;
2545 int step;
2546
2547 if (debug_threads)
2548 fprintf (stderr,
2549 "Starting step-over on LWP %ld. Stopping all threads\n",
2550 lwpid_of (lwp));
2551
2552 stop_all_lwps ();
2553
2554 if (debug_threads)
2555 fprintf (stderr, "Done stopping all threads for step-over.\n");
2556
2557 /* Note, we should always reach here with an already adjusted PC,
2558 either by GDB (if we're resuming due to GDB's request), or by our
2559 caller, if we just finished handling an internal breakpoint GDB
2560 shouldn't care about. */
2561 pc = get_pc (lwp);
2562
2563 saved_inferior = current_inferior;
2564 current_inferior = get_lwp_thread (lwp);
2565
2566 lwp->bp_reinsert = pc;
2567 uninsert_breakpoints_at (pc);
2568
2569 if (can_hardware_single_step ())
2570 {
2571 step = 1;
2572 }
2573 else
2574 {
2575 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2576 set_reinsert_breakpoint (raddr);
2577 step = 0;
2578 }
2579
2580 current_inferior = saved_inferior;
2581
2582 linux_resume_one_lwp (lwp, step, 0, NULL);
2583
2584 /* Require next event from this LWP. */
2585 step_over_bkpt = lwp->head.id;
2586 return 1;
2587 }
2588
2589 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
2590 start_step_over, if still there, and delete any reinsert
2591 breakpoints we've set, on non hardware single-step targets. */
2592
2593 static int
2594 finish_step_over (struct lwp_info *lwp)
2595 {
2596 if (lwp->bp_reinsert != 0)
2597 {
2598 if (debug_threads)
2599 fprintf (stderr, "Finished step over.\n");
2600
2601 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2602 may be no breakpoint to reinsert there by now. */
2603 reinsert_breakpoints_at (lwp->bp_reinsert);
2604
2605 lwp->bp_reinsert = 0;
2606
2607 /* Delete any software-single-step reinsert breakpoints. No
2608 longer needed. We don't have to worry about other threads
2609 hitting this trap, and later not being able to explain it,
2610 because we were stepping over a breakpoint, and we hold all
2611 threads but LWP stopped while doing that. */
2612 if (!can_hardware_single_step ())
2613 delete_reinsert_breakpoints ();
2614
2615 step_over_bkpt = null_ptid;
2616 return 1;
2617 }
2618 else
2619 return 0;
2620 }
2621
2622 /* This function is called once per thread. We check the thread's resume
2623 request, which will tell us whether to resume, step, or leave the thread
2624 stopped; and what signal, if any, it should be sent.
2625
2626 For threads which we aren't explicitly told otherwise, we preserve
2627 the stepping flag; this is used for stepping over gdbserver-placed
2628 breakpoints.
2629
2630 If pending_flags was set in any thread, we queue any needed
2631 signals, since we won't actually resume. We already have a pending
2632 event to report, so we don't need to preserve any step requests;
2633 they should be re-issued if necessary. */
2634
2635 static int
2636 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
2637 {
2638 struct lwp_info *lwp;
2639 struct thread_info *thread;
2640 int step;
2641 int leave_all_stopped = * (int *) arg;
2642 int leave_pending;
2643
2644 thread = (struct thread_info *) entry;
2645 lwp = get_thread_lwp (thread);
2646
2647 if (lwp->resume == NULL)
2648 return 0;
2649
2650 if (lwp->resume->kind == resume_stop)
2651 {
2652 if (debug_threads)
2653 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
2654
2655 if (!lwp->stopped)
2656 {
2657 if (debug_threads)
2658 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
2659
2660 /* Stop the thread, and wait for the event asynchronously,
2661 through the event loop. */
2662 send_sigstop (&lwp->head);
2663 }
2664 else
2665 {
2666 if (debug_threads)
2667 fprintf (stderr, "already stopped LWP %ld\n",
2668 lwpid_of (lwp));
2669
2670 /* The LWP may have been stopped in an internal event that
2671 was not meant to be notified back to GDB (e.g., gdbserver
2672 breakpoint), so we should be reporting a stop event in
2673 this case too. */
2674
2675 /* If the thread already has a pending SIGSTOP, this is a
2676 no-op. Otherwise, something later will presumably resume
2677 the thread and this will cause it to cancel any pending
2678 operation, due to last_resume_kind == resume_stop. If
2679 the thread already has a pending status to report, we
2680 will still report it the next time we wait - see
2681 status_pending_p_callback. */
2682 send_sigstop (&lwp->head);
2683 }
2684
2685 /* For stop requests, we're done. */
2686 lwp->resume = NULL;
2687 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2688 return 0;
2689 }
2690
2691 /* If this thread which is about to be resumed has a pending status,
2692 then don't resume any threads - we can just report the pending
2693 status. Make sure to queue any signals that would otherwise be
2694 sent. In all-stop mode, we do this decision based on if *any*
2695 thread has a pending status. If there's a thread that needs the
2696 step-over-breakpoint dance, then don't resume any other thread
2697 but that particular one. */
2698 leave_pending = (lwp->status_pending_p || leave_all_stopped);
2699
2700 if (!leave_pending)
2701 {
2702 if (debug_threads)
2703 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
2704
2705 step = (lwp->resume->kind == resume_step);
2706 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
2707 }
2708 else
2709 {
2710 if (debug_threads)
2711 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
2712
2713 /* If we have a new signal, enqueue the signal. */
2714 if (lwp->resume->sig != 0)
2715 {
2716 struct pending_signals *p_sig;
2717 p_sig = xmalloc (sizeof (*p_sig));
2718 p_sig->prev = lwp->pending_signals;
2719 p_sig->signal = lwp->resume->sig;
2720 memset (&p_sig->info, 0, sizeof (siginfo_t));
2721
2722 /* If this is the same signal we were previously stopped by,
2723 make sure to queue its siginfo. We can ignore the return
2724 value of ptrace; if it fails, we'll skip
2725 PTRACE_SETSIGINFO. */
2726 if (WIFSTOPPED (lwp->last_status)
2727 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2728 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2729
2730 lwp->pending_signals = p_sig;
2731 }
2732 }
2733
2734 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2735 lwp->resume = NULL;
2736 return 0;
2737 }
2738
2739 static void
2740 linux_resume (struct thread_resume *resume_info, size_t n)
2741 {
2742 struct thread_resume_array array = { resume_info, n };
2743 struct lwp_info *need_step_over = NULL;
2744 int any_pending;
2745 int leave_all_stopped;
2746
2747 find_inferior (&all_threads, linux_set_resume_request, &array);
2748
2749 /* If there is a thread which would otherwise be resumed, which has
2750 a pending status, then don't resume any threads - we can just
2751 report the pending status. Make sure to queue any signals that
2752 would otherwise be sent. In non-stop mode, we'll apply this
2753 logic to each thread individually. We consume all pending events
2754 before considering to start a step-over (in all-stop). */
2755 any_pending = 0;
2756 if (!non_stop)
2757 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2758
2759 /* If there is a thread which would otherwise be resumed, which is
2760 stopped at a breakpoint that needs stepping over, then don't
2761 resume any threads - have it step over the breakpoint with all
2762 other threads stopped, then resume all threads again. Make sure
2763 to queue any signals that would otherwise be delivered or
2764 queued. */
2765 if (!any_pending && supports_breakpoints ())
2766 need_step_over
2767 = (struct lwp_info *) find_inferior (&all_lwps,
2768 need_step_over_p, NULL);
2769
2770 leave_all_stopped = (need_step_over != NULL || any_pending);
2771
2772 if (debug_threads)
2773 {
2774 if (need_step_over != NULL)
2775 fprintf (stderr, "Not resuming all, need step over\n");
2776 else if (any_pending)
2777 fprintf (stderr,
2778 "Not resuming, all-stop and found "
2779 "an LWP with pending status\n");
2780 else
2781 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2782 }
2783
2784 /* Even if we're leaving threads stopped, queue all signals we'd
2785 otherwise deliver. */
2786 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2787
2788 if (need_step_over)
2789 start_step_over (need_step_over);
2790 }
2791
2792 /* This function is called once per thread. We check the thread's
2793 last resume request, which will tell us whether to resume, step, or
2794 leave the thread stopped. Any signal the client requested to be
2795 delivered has already been enqueued at this point.
2796
2797 If any thread that GDB wants running is stopped at an internal
2798 breakpoint that needs stepping over, we start a step-over operation
2799 on that particular thread, and leave all others stopped. */
2800
2801 static void
2802 proceed_one_lwp (struct inferior_list_entry *entry)
2803 {
2804 struct lwp_info *lwp;
2805 struct thread_info *thread;
2806 int step;
2807
2808 lwp = (struct lwp_info *) entry;
2809
2810 if (debug_threads)
2811 fprintf (stderr,
2812 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2813
2814 if (!lwp->stopped)
2815 {
2816 if (debug_threads)
2817 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
2818 return;
2819 }
2820
2821 thread = get_lwp_thread (lwp);
2822
2823 if (thread->last_resume_kind == resume_stop)
2824 {
2825 if (debug_threads)
2826 fprintf (stderr, " client wants LWP %ld stopped\n", lwpid_of (lwp));
2827 return;
2828 }
2829
2830 if (lwp->status_pending_p)
2831 {
2832 if (debug_threads)
2833 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2834 lwpid_of (lwp));
2835 return;
2836 }
2837
2838 if (lwp->suspended)
2839 {
2840 if (debug_threads)
2841 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
2842 return;
2843 }
2844
2845 step = thread->last_resume_kind == resume_step;
2846 linux_resume_one_lwp (lwp, step, 0, NULL);
2847 }
2848
2849 /* When we finish a step-over, set threads running again. If there's
2850 another thread that may need a step-over, now's the time to start
2851 it. Eventually, we'll move all threads past their breakpoints. */
2852
2853 static void
2854 proceed_all_lwps (void)
2855 {
2856 struct lwp_info *need_step_over;
2857
2858 /* If there is a thread which would otherwise be resumed, which is
2859 stopped at a breakpoint that needs stepping over, then don't
2860 resume any threads - have it step over the breakpoint with all
2861 other threads stopped, then resume all threads again. */
2862
2863 if (supports_breakpoints ())
2864 {
2865 need_step_over
2866 = (struct lwp_info *) find_inferior (&all_lwps,
2867 need_step_over_p, NULL);
2868
2869 if (need_step_over != NULL)
2870 {
2871 if (debug_threads)
2872 fprintf (stderr, "proceed_all_lwps: found "
2873 "thread %ld needing a step-over\n",
2874 lwpid_of (need_step_over));
2875
2876 start_step_over (need_step_over);
2877 return;
2878 }
2879 }
2880
2881 if (debug_threads)
2882 fprintf (stderr, "Proceeding, no step-over needed\n");
2883
2884 for_each_inferior (&all_lwps, proceed_one_lwp);
2885 }
2886
2887 /* Stopped LWPs that the client wanted to be running, that don't have
2888 pending statuses, are set to run again, except for EXCEPT, if not
2889 NULL. This undoes a stop_all_lwps call. */
2890
2891 static void
2892 unstop_all_lwps (struct lwp_info *except)
2893 {
2894 if (debug_threads)
2895 {
2896 if (except)
2897 fprintf (stderr,
2898 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
2899 else
2900 fprintf (stderr,
2901 "unstopping all lwps\n");
2902 }
2903
2904 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2905 if (except != NULL)
2906 ++except->suspended;
2907
2908 for_each_inferior (&all_lwps, proceed_one_lwp);
2909
2910 if (except != NULL)
2911 --except->suspended;
2912 }
2913
2914 #ifdef HAVE_LINUX_USRREGS
2915
2916 int
2917 register_addr (int regnum)
2918 {
2919 int addr;
2920
2921 if (regnum < 0 || regnum >= the_low_target.num_regs)
2922 error ("Invalid register number %d.", regnum);
2923
2924 addr = the_low_target.regmap[regnum];
2925
2926 return addr;
2927 }
2928
2929 /* Fetch one register. */
2930 static void
2931 fetch_register (struct regcache *regcache, int regno)
2932 {
2933 CORE_ADDR regaddr;
2934 int i, size;
2935 char *buf;
2936 int pid;
2937
2938 if (regno >= the_low_target.num_regs)
2939 return;
2940 if ((*the_low_target.cannot_fetch_register) (regno))
2941 return;
2942
2943 regaddr = register_addr (regno);
2944 if (regaddr == -1)
2945 return;
2946
2947 pid = lwpid_of (get_thread_lwp (current_inferior));
2948 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2949 & - sizeof (PTRACE_XFER_TYPE));
2950 buf = alloca (size);
2951 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2952 {
2953 errno = 0;
2954 *(PTRACE_XFER_TYPE *) (buf + i) =
2955 ptrace (PTRACE_PEEKUSER, pid,
2956 /* Coerce to a uintptr_t first to avoid potential gcc warning
2957 of coercing an 8 byte integer to a 4 byte pointer. */
2958 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
2959 regaddr += sizeof (PTRACE_XFER_TYPE);
2960 if (errno != 0)
2961 error ("reading register %d: %s", regno, strerror (errno));
2962 }
2963
2964 if (the_low_target.supply_ptrace_register)
2965 the_low_target.supply_ptrace_register (regcache, regno, buf);
2966 else
2967 supply_register (regcache, regno, buf);
2968 }
2969
2970 /* Fetch all registers, or just one, from the child process. */
2971 static void
2972 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
2973 {
2974 if (regno == -1)
2975 for (regno = 0; regno < the_low_target.num_regs; regno++)
2976 fetch_register (regcache, regno);
2977 else
2978 fetch_register (regcache, regno);
2979 }
2980
2981 /* Store our register values back into the inferior.
2982 If REGNO is -1, do this for all registers.
2983 Otherwise, REGNO specifies which register (so we can save time). */
2984 static void
2985 usr_store_inferior_registers (struct regcache *regcache, int regno)
2986 {
2987 CORE_ADDR regaddr;
2988 int i, size;
2989 char *buf;
2990 int pid;
2991
2992 if (regno >= 0)
2993 {
2994 if (regno >= the_low_target.num_regs)
2995 return;
2996
2997 if ((*the_low_target.cannot_store_register) (regno) == 1)
2998 return;
2999
3000 regaddr = register_addr (regno);
3001 if (regaddr == -1)
3002 return;
3003 errno = 0;
3004 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3005 & - sizeof (PTRACE_XFER_TYPE);
3006 buf = alloca (size);
3007 memset (buf, 0, size);
3008
3009 if (the_low_target.collect_ptrace_register)
3010 the_low_target.collect_ptrace_register (regcache, regno, buf);
3011 else
3012 collect_register (regcache, regno, buf);
3013
3014 pid = lwpid_of (get_thread_lwp (current_inferior));
3015 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3016 {
3017 errno = 0;
3018 ptrace (PTRACE_POKEUSER, pid,
3019 /* Coerce to a uintptr_t first to avoid potential gcc warning
3020 about coercing an 8 byte integer to a 4 byte pointer. */
3021 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3022 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3023 if (errno != 0)
3024 {
3025 /* At this point, ESRCH should mean the process is
3026 already gone, in which case we simply ignore attempts
3027 to change its registers. See also the related
3028 comment in linux_resume_one_lwp. */
3029 if (errno == ESRCH)
3030 return;
3031
3032 if ((*the_low_target.cannot_store_register) (regno) == 0)
3033 error ("writing register %d: %s", regno, strerror (errno));
3034 }
3035 regaddr += sizeof (PTRACE_XFER_TYPE);
3036 }
3037 }
3038 else
3039 for (regno = 0; regno < the_low_target.num_regs; regno++)
3040 usr_store_inferior_registers (regcache, regno);
3041 }
3042 #endif /* HAVE_LINUX_USRREGS */
3043
3044
3045
3046 #ifdef HAVE_LINUX_REGSETS
3047
3048 static int
3049 regsets_fetch_inferior_registers (struct regcache *regcache)
3050 {
3051 struct regset_info *regset;
3052 int saw_general_regs = 0;
3053 int pid;
3054 struct iovec iov;
3055
3056 regset = target_regsets;
3057
3058 pid = lwpid_of (get_thread_lwp (current_inferior));
3059 while (regset->size >= 0)
3060 {
3061 void *buf, *data;
3062 int nt_type, res;
3063
3064 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3065 {
3066 regset ++;
3067 continue;
3068 }
3069
3070 buf = xmalloc (regset->size);
3071
3072 nt_type = regset->nt_type;
3073 if (nt_type)
3074 {
3075 iov.iov_base = buf;
3076 iov.iov_len = regset->size;
3077 data = (void *) &iov;
3078 }
3079 else
3080 data = buf;
3081
3082 #ifndef __sparc__
3083 res = ptrace (regset->get_request, pid, nt_type, data);
3084 #else
3085 res = ptrace (regset->get_request, pid, data, nt_type);
3086 #endif
3087 if (res < 0)
3088 {
3089 if (errno == EIO)
3090 {
3091 /* If we get EIO on a regset, do not try it again for
3092 this process. */
3093 disabled_regsets[regset - target_regsets] = 1;
3094 free (buf);
3095 continue;
3096 }
3097 else
3098 {
3099 char s[256];
3100 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3101 pid);
3102 perror (s);
3103 }
3104 }
3105 else if (regset->type == GENERAL_REGS)
3106 saw_general_regs = 1;
3107 regset->store_function (regcache, buf);
3108 regset ++;
3109 free (buf);
3110 }
3111 if (saw_general_regs)
3112 return 0;
3113 else
3114 return 1;
3115 }
3116
3117 static int
3118 regsets_store_inferior_registers (struct regcache *regcache)
3119 {
3120 struct regset_info *regset;
3121 int saw_general_regs = 0;
3122 int pid;
3123 struct iovec iov;
3124
3125 regset = target_regsets;
3126
3127 pid = lwpid_of (get_thread_lwp (current_inferior));
3128 while (regset->size >= 0)
3129 {
3130 void *buf, *data;
3131 int nt_type, res;
3132
3133 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3134 {
3135 regset ++;
3136 continue;
3137 }
3138
3139 buf = xmalloc (regset->size);
3140
3141 /* First fill the buffer with the current register set contents,
3142 in case there are any items in the kernel's regset that are
3143 not in gdbserver's regcache. */
3144
3145 nt_type = regset->nt_type;
3146 if (nt_type)
3147 {
3148 iov.iov_base = buf;
3149 iov.iov_len = regset->size;
3150 data = (void *) &iov;
3151 }
3152 else
3153 data = buf;
3154
3155 #ifndef __sparc__
3156 res = ptrace (regset->get_request, pid, nt_type, data);
3157 #else
3158 res = ptrace (regset->get_request, pid, &iov, data);
3159 #endif
3160
3161 if (res == 0)
3162 {
3163 /* Then overlay our cached registers on that. */
3164 regset->fill_function (regcache, buf);
3165
3166 /* Only now do we write the register set. */
3167 #ifndef __sparc__
3168 res = ptrace (regset->set_request, pid, nt_type, data);
3169 #else
3170 res = ptrace (regset->set_request, pid, data, nt_type);
3171 #endif
3172 }
3173
3174 if (res < 0)
3175 {
3176 if (errno == EIO)
3177 {
3178 /* If we get EIO on a regset, do not try it again for
3179 this process. */
3180 disabled_regsets[regset - target_regsets] = 1;
3181 free (buf);
3182 continue;
3183 }
3184 else if (errno == ESRCH)
3185 {
3186 /* At this point, ESRCH should mean the process is
3187 already gone, in which case we simply ignore attempts
3188 to change its registers. See also the related
3189 comment in linux_resume_one_lwp. */
3190 free (buf);
3191 return 0;
3192 }
3193 else
3194 {
3195 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3196 }
3197 }
3198 else if (regset->type == GENERAL_REGS)
3199 saw_general_regs = 1;
3200 regset ++;
3201 free (buf);
3202 }
3203 if (saw_general_regs)
3204 return 0;
3205 else
3206 return 1;
3207 return 0;
3208 }
3209
3210 #endif /* HAVE_LINUX_REGSETS */
3211
3212
3213 void
3214 linux_fetch_registers (struct regcache *regcache, int regno)
3215 {
3216 #ifdef HAVE_LINUX_REGSETS
3217 if (regsets_fetch_inferior_registers (regcache) == 0)
3218 return;
3219 #endif
3220 #ifdef HAVE_LINUX_USRREGS
3221 usr_fetch_inferior_registers (regcache, regno);
3222 #endif
3223 }
3224
3225 void
3226 linux_store_registers (struct regcache *regcache, int regno)
3227 {
3228 #ifdef HAVE_LINUX_REGSETS
3229 if (regsets_store_inferior_registers (regcache) == 0)
3230 return;
3231 #endif
3232 #ifdef HAVE_LINUX_USRREGS
3233 usr_store_inferior_registers (regcache, regno);
3234 #endif
3235 }
3236
3237
3238 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3239 to debugger memory starting at MYADDR. */
3240
3241 static int
3242 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
3243 {
3244 register int i;
3245 /* Round starting address down to longword boundary. */
3246 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3247 /* Round ending address up; get number of longwords that makes. */
3248 register int count
3249 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
3250 / sizeof (PTRACE_XFER_TYPE);
3251 /* Allocate buffer of that many longwords. */
3252 register PTRACE_XFER_TYPE *buffer
3253 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3254 int fd;
3255 char filename[64];
3256 int pid = lwpid_of (get_thread_lwp (current_inferior));
3257
3258 /* Try using /proc. Don't bother for one word. */
3259 if (len >= 3 * sizeof (long))
3260 {
3261 /* We could keep this file open and cache it - possibly one per
3262 thread. That requires some juggling, but is even faster. */
3263 sprintf (filename, "/proc/%d/mem", pid);
3264 fd = open (filename, O_RDONLY | O_LARGEFILE);
3265 if (fd == -1)
3266 goto no_proc;
3267
3268 /* If pread64 is available, use it. It's faster if the kernel
3269 supports it (only one syscall), and it's 64-bit safe even on
3270 32-bit platforms (for instance, SPARC debugging a SPARC64
3271 application). */
3272 #ifdef HAVE_PREAD64
3273 if (pread64 (fd, myaddr, len, memaddr) != len)
3274 #else
3275 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
3276 #endif
3277 {
3278 close (fd);
3279 goto no_proc;
3280 }
3281
3282 close (fd);
3283 return 0;
3284 }
3285
3286 no_proc:
3287 /* Read all the longwords */
3288 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3289 {
3290 errno = 0;
3291 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3292 about coercing an 8 byte integer to a 4 byte pointer. */
3293 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3294 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3295 if (errno)
3296 return errno;
3297 }
3298
3299 /* Copy appropriate bytes out of the buffer. */
3300 memcpy (myaddr,
3301 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3302 len);
3303
3304 return 0;
3305 }
3306
3307 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3308 memory at MEMADDR. On failure (cannot write to the inferior)
3309 returns the value of errno. */
3310
3311 static int
3312 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
3313 {
3314 register int i;
3315 /* Round starting address down to longword boundary. */
3316 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3317 /* Round ending address up; get number of longwords that makes. */
3318 register int count
3319 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3320 /* Allocate buffer of that many longwords. */
3321 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3322 int pid = lwpid_of (get_thread_lwp (current_inferior));
3323
3324 if (debug_threads)
3325 {
3326 /* Dump up to four bytes. */
3327 unsigned int val = * (unsigned int *) myaddr;
3328 if (len == 1)
3329 val = val & 0xff;
3330 else if (len == 2)
3331 val = val & 0xffff;
3332 else if (len == 3)
3333 val = val & 0xffffff;
3334 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3335 val, (long)memaddr);
3336 }
3337
3338 /* Fill start and end extra bytes of buffer with existing memory data. */
3339
3340 errno = 0;
3341 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3342 about coercing an 8 byte integer to a 4 byte pointer. */
3343 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3344 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3345 if (errno)
3346 return errno;
3347
3348 if (count > 1)
3349 {
3350 errno = 0;
3351 buffer[count - 1]
3352 = ptrace (PTRACE_PEEKTEXT, pid,
3353 /* Coerce to a uintptr_t first to avoid potential gcc warning
3354 about coercing an 8 byte integer to a 4 byte pointer. */
3355 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3356 * sizeof (PTRACE_XFER_TYPE)),
3357 0);
3358 if (errno)
3359 return errno;
3360 }
3361
3362 /* Copy data to be written over corresponding part of buffer. */
3363
3364 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3365
3366 /* Write the entire buffer. */
3367
3368 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3369 {
3370 errno = 0;
3371 ptrace (PTRACE_POKETEXT, pid,
3372 /* Coerce to a uintptr_t first to avoid potential gcc warning
3373 about coercing an 8 byte integer to a 4 byte pointer. */
3374 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3375 (PTRACE_ARG4_TYPE) buffer[i]);
3376 if (errno)
3377 return errno;
3378 }
3379
3380 return 0;
3381 }
3382
3383 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
3384 static int linux_supports_tracefork_flag;
3385
3386 /* Helper functions for linux_test_for_tracefork, called via clone (). */
3387
3388 static int
3389 linux_tracefork_grandchild (void *arg)
3390 {
3391 _exit (0);
3392 }
3393
3394 #define STACK_SIZE 4096
3395
3396 static int
3397 linux_tracefork_child (void *arg)
3398 {
3399 ptrace (PTRACE_TRACEME, 0, 0, 0);
3400 kill (getpid (), SIGSTOP);
3401
3402 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3403
3404 if (fork () == 0)
3405 linux_tracefork_grandchild (NULL);
3406
3407 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3408
3409 #ifdef __ia64__
3410 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3411 CLONE_VM | SIGCHLD, NULL);
3412 #else
3413 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3414 CLONE_VM | SIGCHLD, NULL);
3415 #endif
3416
3417 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3418
3419 _exit (0);
3420 }
3421
3422 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3423 sure that we can enable the option, and that it had the desired
3424 effect. */
3425
3426 static void
3427 linux_test_for_tracefork (void)
3428 {
3429 int child_pid, ret, status;
3430 long second_pid;
3431 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3432 char *stack = xmalloc (STACK_SIZE * 4);
3433 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3434
3435 linux_supports_tracefork_flag = 0;
3436
3437 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3438
3439 child_pid = fork ();
3440 if (child_pid == 0)
3441 linux_tracefork_child (NULL);
3442
3443 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3444
3445 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
3446 #ifdef __ia64__
3447 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3448 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3449 #else /* !__ia64__ */
3450 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3451 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3452 #endif /* !__ia64__ */
3453
3454 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3455
3456 if (child_pid == -1)
3457 perror_with_name ("clone");
3458
3459 ret = my_waitpid (child_pid, &status, 0);
3460 if (ret == -1)
3461 perror_with_name ("waitpid");
3462 else if (ret != child_pid)
3463 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3464 if (! WIFSTOPPED (status))
3465 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3466
3467 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3468 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
3469 if (ret != 0)
3470 {
3471 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3472 if (ret != 0)
3473 {
3474 warning ("linux_test_for_tracefork: failed to kill child");
3475 return;
3476 }
3477
3478 ret = my_waitpid (child_pid, &status, 0);
3479 if (ret != child_pid)
3480 warning ("linux_test_for_tracefork: failed to wait for killed child");
3481 else if (!WIFSIGNALED (status))
3482 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3483 "killed child", status);
3484
3485 return;
3486 }
3487
3488 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3489 if (ret != 0)
3490 warning ("linux_test_for_tracefork: failed to resume child");
3491
3492 ret = my_waitpid (child_pid, &status, 0);
3493
3494 if (ret == child_pid && WIFSTOPPED (status)
3495 && status >> 16 == PTRACE_EVENT_FORK)
3496 {
3497 second_pid = 0;
3498 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3499 if (ret == 0 && second_pid != 0)
3500 {
3501 int second_status;
3502
3503 linux_supports_tracefork_flag = 1;
3504 my_waitpid (second_pid, &second_status, 0);
3505 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3506 if (ret != 0)
3507 warning ("linux_test_for_tracefork: failed to kill second child");
3508 my_waitpid (second_pid, &status, 0);
3509 }
3510 }
3511 else
3512 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3513 "(%d, status 0x%x)", ret, status);
3514
3515 do
3516 {
3517 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3518 if (ret != 0)
3519 warning ("linux_test_for_tracefork: failed to kill child");
3520 my_waitpid (child_pid, &status, 0);
3521 }
3522 while (WIFSTOPPED (status));
3523
3524 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3525 free (stack);
3526 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3527 }
3528
3529
3530 static void
3531 linux_look_up_symbols (void)
3532 {
3533 #ifdef USE_THREAD_DB
3534 struct process_info *proc = current_process ();
3535
3536 if (proc->private->thread_db != NULL)
3537 return;
3538
3539 /* If the kernel supports tracing forks then it also supports tracing
3540 clones, and then we don't need to use the magic thread event breakpoint
3541 to learn about threads. */
3542 thread_db_init (!linux_supports_tracefork_flag);
3543 #endif
3544 }
3545
3546 static void
3547 linux_request_interrupt (void)
3548 {
3549 extern unsigned long signal_pid;
3550
3551 if (!ptid_equal (cont_thread, null_ptid)
3552 && !ptid_equal (cont_thread, minus_one_ptid))
3553 {
3554 struct lwp_info *lwp;
3555 int lwpid;
3556
3557 lwp = get_thread_lwp (current_inferior);
3558 lwpid = lwpid_of (lwp);
3559 kill_lwp (lwpid, SIGINT);
3560 }
3561 else
3562 kill_lwp (signal_pid, SIGINT);
3563 }
3564
3565 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3566 to debugger memory starting at MYADDR. */
3567
3568 static int
3569 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
3570 {
3571 char filename[PATH_MAX];
3572 int fd, n;
3573 int pid = lwpid_of (get_thread_lwp (current_inferior));
3574
3575 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
3576
3577 fd = open (filename, O_RDONLY);
3578 if (fd < 0)
3579 return -1;
3580
3581 if (offset != (CORE_ADDR) 0
3582 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3583 n = -1;
3584 else
3585 n = read (fd, myaddr, len);
3586
3587 close (fd);
3588
3589 return n;
3590 }
3591
3592 /* These breakpoint and watchpoint related wrapper functions simply
3593 pass on the function call if the target has registered a
3594 corresponding function. */
3595
3596 static int
3597 linux_insert_point (char type, CORE_ADDR addr, int len)
3598 {
3599 if (the_low_target.insert_point != NULL)
3600 return the_low_target.insert_point (type, addr, len);
3601 else
3602 /* Unsupported (see target.h). */
3603 return 1;
3604 }
3605
3606 static int
3607 linux_remove_point (char type, CORE_ADDR addr, int len)
3608 {
3609 if (the_low_target.remove_point != NULL)
3610 return the_low_target.remove_point (type, addr, len);
3611 else
3612 /* Unsupported (see target.h). */
3613 return 1;
3614 }
3615
3616 static int
3617 linux_stopped_by_watchpoint (void)
3618 {
3619 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3620
3621 return lwp->stopped_by_watchpoint;
3622 }
3623
3624 static CORE_ADDR
3625 linux_stopped_data_address (void)
3626 {
3627 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3628
3629 return lwp->stopped_data_address;
3630 }
3631
3632 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3633 #if defined(__mcoldfire__)
3634 /* These should really be defined in the kernel's ptrace.h header. */
3635 #define PT_TEXT_ADDR 49*4
3636 #define PT_DATA_ADDR 50*4
3637 #define PT_TEXT_END_ADDR 51*4
3638 #endif
3639
3640 /* Under uClinux, programs are loaded at non-zero offsets, which we need
3641 to tell gdb about. */
3642
3643 static int
3644 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3645 {
3646 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3647 unsigned long text, text_end, data;
3648 int pid = lwpid_of (get_thread_lwp (current_inferior));
3649
3650 errno = 0;
3651
3652 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3653 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3654 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3655
3656 if (errno == 0)
3657 {
3658 /* Both text and data offsets produced at compile-time (and so
3659 used by gdb) are relative to the beginning of the program,
3660 with the data segment immediately following the text segment.
3661 However, the actual runtime layout in memory may put the data
3662 somewhere else, so when we send gdb a data base-address, we
3663 use the real data base address and subtract the compile-time
3664 data base-address from it (which is just the length of the
3665 text segment). BSS immediately follows data in both
3666 cases. */
3667 *text_p = text;
3668 *data_p = data - (text_end - text);
3669
3670 return 1;
3671 }
3672 #endif
3673 return 0;
3674 }
3675 #endif
3676
3677 static int
3678 compare_ints (const void *xa, const void *xb)
3679 {
3680 int a = *(const int *)xa;
3681 int b = *(const int *)xb;
3682
3683 return a - b;
3684 }
3685
3686 static int *
3687 unique (int *b, int *e)
3688 {
3689 int *d = b;
3690 while (++b != e)
3691 if (*d != *b)
3692 *++d = *b;
3693 return ++d;
3694 }
3695
3696 /* Given PID, iterates over all threads in that process.
3697
3698 Information about each thread, in a format suitable for qXfer:osdata:thread
3699 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3700 initialized, and the caller is responsible for finishing and appending '\0'
3701 to it.
3702
3703 The list of cores that threads are running on is assigned to *CORES, if it
3704 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3705 should free *CORES. */
3706
3707 static void
3708 list_threads (int pid, struct buffer *buffer, char **cores)
3709 {
3710 int count = 0;
3711 int allocated = 10;
3712 int *core_numbers = xmalloc (sizeof (int) * allocated);
3713 char pathname[128];
3714 DIR *dir;
3715 struct dirent *dp;
3716 struct stat statbuf;
3717
3718 sprintf (pathname, "/proc/%d/task", pid);
3719 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3720 {
3721 dir = opendir (pathname);
3722 if (!dir)
3723 {
3724 free (core_numbers);
3725 return;
3726 }
3727
3728 while ((dp = readdir (dir)) != NULL)
3729 {
3730 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3731
3732 if (lwp != 0)
3733 {
3734 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3735
3736 if (core != -1)
3737 {
3738 char s[sizeof ("4294967295")];
3739 sprintf (s, "%u", core);
3740
3741 if (count == allocated)
3742 {
3743 allocated *= 2;
3744 core_numbers = realloc (core_numbers,
3745 sizeof (int) * allocated);
3746 }
3747 core_numbers[count++] = core;
3748 if (buffer)
3749 buffer_xml_printf (buffer,
3750 "<item>"
3751 "<column name=\"pid\">%d</column>"
3752 "<column name=\"tid\">%s</column>"
3753 "<column name=\"core\">%s</column>"
3754 "</item>", pid, dp->d_name, s);
3755 }
3756 else
3757 {
3758 if (buffer)
3759 buffer_xml_printf (buffer,
3760 "<item>"
3761 "<column name=\"pid\">%d</column>"
3762 "<column name=\"tid\">%s</column>"
3763 "</item>", pid, dp->d_name);
3764 }
3765 }
3766 }
3767 }
3768
3769 if (cores)
3770 {
3771 *cores = NULL;
3772 if (count > 0)
3773 {
3774 struct buffer buffer2;
3775 int *b;
3776 int *e;
3777 qsort (core_numbers, count, sizeof (int), compare_ints);
3778
3779 /* Remove duplicates. */
3780 b = core_numbers;
3781 e = unique (b, core_numbers + count);
3782
3783 buffer_init (&buffer2);
3784
3785 for (b = core_numbers; b != e; ++b)
3786 {
3787 char number[sizeof ("4294967295")];
3788 sprintf (number, "%u", *b);
3789 buffer_xml_printf (&buffer2, "%s%s",
3790 (b == core_numbers) ? "" : ",", number);
3791 }
3792 buffer_grow_str0 (&buffer2, "");
3793
3794 *cores = buffer_finish (&buffer2);
3795 }
3796 }
3797 free (core_numbers);
3798 }
3799
3800 static void
3801 show_process (int pid, const char *username, struct buffer *buffer)
3802 {
3803 char pathname[128];
3804 FILE *f;
3805 char cmd[MAXPATHLEN + 1];
3806
3807 sprintf (pathname, "/proc/%d/cmdline", pid);
3808
3809 if ((f = fopen (pathname, "r")) != NULL)
3810 {
3811 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3812 if (len > 0)
3813 {
3814 char *cores = 0;
3815 int i;
3816 for (i = 0; i < len; i++)
3817 if (cmd[i] == '\0')
3818 cmd[i] = ' ';
3819 cmd[len] = '\0';
3820
3821 buffer_xml_printf (buffer,
3822 "<item>"
3823 "<column name=\"pid\">%d</column>"
3824 "<column name=\"user\">%s</column>"
3825 "<column name=\"command\">%s</column>",
3826 pid,
3827 username,
3828 cmd);
3829
3830 /* This only collects core numbers, and does not print threads. */
3831 list_threads (pid, NULL, &cores);
3832
3833 if (cores)
3834 {
3835 buffer_xml_printf (buffer,
3836 "<column name=\"cores\">%s</column>", cores);
3837 free (cores);
3838 }
3839
3840 buffer_xml_printf (buffer, "</item>");
3841 }
3842 fclose (f);
3843 }
3844 }
3845
3846 static int
3847 linux_qxfer_osdata (const char *annex,
3848 unsigned char *readbuf, unsigned const char *writebuf,
3849 CORE_ADDR offset, int len)
3850 {
3851 /* We make the process list snapshot when the object starts to be
3852 read. */
3853 static const char *buf;
3854 static long len_avail = -1;
3855 static struct buffer buffer;
3856 int processes = 0;
3857 int threads = 0;
3858
3859 DIR *dirp;
3860
3861 if (strcmp (annex, "processes") == 0)
3862 processes = 1;
3863 else if (strcmp (annex, "threads") == 0)
3864 threads = 1;
3865 else
3866 return 0;
3867
3868 if (!readbuf || writebuf)
3869 return 0;
3870
3871 if (offset == 0)
3872 {
3873 if (len_avail != -1 && len_avail != 0)
3874 buffer_free (&buffer);
3875 len_avail = 0;
3876 buf = NULL;
3877 buffer_init (&buffer);
3878 if (processes)
3879 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3880 else if (threads)
3881 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
3882
3883 dirp = opendir ("/proc");
3884 if (dirp)
3885 {
3886 struct dirent *dp;
3887 while ((dp = readdir (dirp)) != NULL)
3888 {
3889 struct stat statbuf;
3890 char procentry[sizeof ("/proc/4294967295")];
3891
3892 if (!isdigit (dp->d_name[0])
3893 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3894 continue;
3895
3896 sprintf (procentry, "/proc/%s", dp->d_name);
3897 if (stat (procentry, &statbuf) == 0
3898 && S_ISDIR (statbuf.st_mode))
3899 {
3900 int pid = (int) strtoul (dp->d_name, NULL, 10);
3901
3902 if (processes)
3903 {
3904 struct passwd *entry = getpwuid (statbuf.st_uid);
3905 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3906 }
3907 else if (threads)
3908 {
3909 list_threads (pid, &buffer, NULL);
3910 }
3911 }
3912 }
3913
3914 closedir (dirp);
3915 }
3916 buffer_grow_str0 (&buffer, "</osdata>\n");
3917 buf = buffer_finish (&buffer);
3918 len_avail = strlen (buf);
3919 }
3920
3921 if (offset >= len_avail)
3922 {
3923 /* Done. Get rid of the data. */
3924 buffer_free (&buffer);
3925 buf = NULL;
3926 len_avail = 0;
3927 return 0;
3928 }
3929
3930 if (len > len_avail - offset)
3931 len = len_avail - offset;
3932 memcpy (readbuf, buf + offset, len);
3933
3934 return len;
3935 }
3936
3937 /* Convert a native/host siginfo object, into/from the siginfo in the
3938 layout of the inferiors' architecture. */
3939
3940 static void
3941 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3942 {
3943 int done = 0;
3944
3945 if (the_low_target.siginfo_fixup != NULL)
3946 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3947
3948 /* If there was no callback, or the callback didn't do anything,
3949 then just do a straight memcpy. */
3950 if (!done)
3951 {
3952 if (direction == 1)
3953 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3954 else
3955 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3956 }
3957 }
3958
3959 static int
3960 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3961 unsigned const char *writebuf, CORE_ADDR offset, int len)
3962 {
3963 int pid;
3964 struct siginfo siginfo;
3965 char inf_siginfo[sizeof (struct siginfo)];
3966
3967 if (current_inferior == NULL)
3968 return -1;
3969
3970 pid = lwpid_of (get_thread_lwp (current_inferior));
3971
3972 if (debug_threads)
3973 fprintf (stderr, "%s siginfo for lwp %d.\n",
3974 readbuf != NULL ? "Reading" : "Writing",
3975 pid);
3976
3977 if (offset > sizeof (siginfo))
3978 return -1;
3979
3980 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
3981 return -1;
3982
3983 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3984 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3985 inferior with a 64-bit GDBSERVER should look the same as debugging it
3986 with a 32-bit GDBSERVER, we need to convert it. */
3987 siginfo_fixup (&siginfo, inf_siginfo, 0);
3988
3989 if (offset + len > sizeof (siginfo))
3990 len = sizeof (siginfo) - offset;
3991
3992 if (readbuf != NULL)
3993 memcpy (readbuf, inf_siginfo + offset, len);
3994 else
3995 {
3996 memcpy (inf_siginfo + offset, writebuf, len);
3997
3998 /* Convert back to ptrace layout before flushing it out. */
3999 siginfo_fixup (&siginfo, inf_siginfo, 1);
4000
4001 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4002 return -1;
4003 }
4004
4005 return len;
4006 }
4007
4008 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4009 so we notice when children change state; as the handler for the
4010 sigsuspend in my_waitpid. */
4011
4012 static void
4013 sigchld_handler (int signo)
4014 {
4015 int old_errno = errno;
4016
4017 if (debug_threads)
4018 /* fprintf is not async-signal-safe, so call write directly. */
4019 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
4020
4021 if (target_is_async_p ())
4022 async_file_mark (); /* trigger a linux_wait */
4023
4024 errno = old_errno;
4025 }
4026
4027 static int
4028 linux_supports_non_stop (void)
4029 {
4030 return 1;
4031 }
4032
4033 static int
4034 linux_async (int enable)
4035 {
4036 int previous = (linux_event_pipe[0] != -1);
4037
4038 if (debug_threads)
4039 fprintf (stderr, "linux_async (%d), previous=%d\n",
4040 enable, previous);
4041
4042 if (previous != enable)
4043 {
4044 sigset_t mask;
4045 sigemptyset (&mask);
4046 sigaddset (&mask, SIGCHLD);
4047
4048 sigprocmask (SIG_BLOCK, &mask, NULL);
4049
4050 if (enable)
4051 {
4052 if (pipe (linux_event_pipe) == -1)
4053 fatal ("creating event pipe failed.");
4054
4055 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4056 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4057
4058 /* Register the event loop handler. */
4059 add_file_handler (linux_event_pipe[0],
4060 handle_target_event, NULL);
4061
4062 /* Always trigger a linux_wait. */
4063 async_file_mark ();
4064 }
4065 else
4066 {
4067 delete_file_handler (linux_event_pipe[0]);
4068
4069 close (linux_event_pipe[0]);
4070 close (linux_event_pipe[1]);
4071 linux_event_pipe[0] = -1;
4072 linux_event_pipe[1] = -1;
4073 }
4074
4075 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4076 }
4077
4078 return previous;
4079 }
4080
4081 static int
4082 linux_start_non_stop (int nonstop)
4083 {
4084 /* Register or unregister from event-loop accordingly. */
4085 linux_async (nonstop);
4086 return 0;
4087 }
4088
4089 static int
4090 linux_supports_multi_process (void)
4091 {
4092 return 1;
4093 }
4094
4095
4096 /* Enumerate spufs IDs for process PID. */
4097 static int
4098 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4099 {
4100 int pos = 0;
4101 int written = 0;
4102 char path[128];
4103 DIR *dir;
4104 struct dirent *entry;
4105
4106 sprintf (path, "/proc/%ld/fd", pid);
4107 dir = opendir (path);
4108 if (!dir)
4109 return -1;
4110
4111 rewinddir (dir);
4112 while ((entry = readdir (dir)) != NULL)
4113 {
4114 struct stat st;
4115 struct statfs stfs;
4116 int fd;
4117
4118 fd = atoi (entry->d_name);
4119 if (!fd)
4120 continue;
4121
4122 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4123 if (stat (path, &st) != 0)
4124 continue;
4125 if (!S_ISDIR (st.st_mode))
4126 continue;
4127
4128 if (statfs (path, &stfs) != 0)
4129 continue;
4130 if (stfs.f_type != SPUFS_MAGIC)
4131 continue;
4132
4133 if (pos >= offset && pos + 4 <= offset + len)
4134 {
4135 *(unsigned int *)(buf + pos - offset) = fd;
4136 written += 4;
4137 }
4138 pos += 4;
4139 }
4140
4141 closedir (dir);
4142 return written;
4143 }
4144
4145 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4146 object type, using the /proc file system. */
4147 static int
4148 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4149 unsigned const char *writebuf,
4150 CORE_ADDR offset, int len)
4151 {
4152 long pid = lwpid_of (get_thread_lwp (current_inferior));
4153 char buf[128];
4154 int fd = 0;
4155 int ret = 0;
4156
4157 if (!writebuf && !readbuf)
4158 return -1;
4159
4160 if (!*annex)
4161 {
4162 if (!readbuf)
4163 return -1;
4164 else
4165 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4166 }
4167
4168 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4169 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4170 if (fd <= 0)
4171 return -1;
4172
4173 if (offset != 0
4174 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4175 {
4176 close (fd);
4177 return 0;
4178 }
4179
4180 if (writebuf)
4181 ret = write (fd, writebuf, (size_t) len);
4182 else
4183 ret = read (fd, readbuf, (size_t) len);
4184
4185 close (fd);
4186 return ret;
4187 }
4188
4189 static int
4190 linux_core_of_thread (ptid_t ptid)
4191 {
4192 char filename[sizeof ("/proc//task//stat")
4193 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4194 + 1];
4195 FILE *f;
4196 char *content = NULL;
4197 char *p;
4198 char *ts = 0;
4199 int content_read = 0;
4200 int i;
4201 int core;
4202
4203 sprintf (filename, "/proc/%d/task/%ld/stat",
4204 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4205 f = fopen (filename, "r");
4206 if (!f)
4207 return -1;
4208
4209 for (;;)
4210 {
4211 int n;
4212 content = realloc (content, content_read + 1024);
4213 n = fread (content + content_read, 1, 1024, f);
4214 content_read += n;
4215 if (n < 1024)
4216 {
4217 content[content_read] = '\0';
4218 break;
4219 }
4220 }
4221
4222 p = strchr (content, '(');
4223 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
4224
4225 p = strtok_r (p, " ", &ts);
4226 for (i = 0; i != 36; ++i)
4227 p = strtok_r (NULL, " ", &ts);
4228
4229 if (sscanf (p, "%d", &core) == 0)
4230 core = -1;
4231
4232 free (content);
4233 fclose (f);
4234
4235 return core;
4236 }
4237
4238 static void
4239 linux_process_qsupported (const char *query)
4240 {
4241 if (the_low_target.process_qsupported != NULL)
4242 the_low_target.process_qsupported (query);
4243 }
4244
4245 static int
4246 linux_supports_tracepoints (void)
4247 {
4248 if (*the_low_target.supports_tracepoints == NULL)
4249 return 0;
4250
4251 return (*the_low_target.supports_tracepoints) ();
4252 }
4253
4254 static CORE_ADDR
4255 linux_read_pc (struct regcache *regcache)
4256 {
4257 if (the_low_target.get_pc == NULL)
4258 return 0;
4259
4260 return (*the_low_target.get_pc) (regcache);
4261 }
4262
4263 static void
4264 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4265 {
4266 gdb_assert (the_low_target.set_pc != NULL);
4267
4268 (*the_low_target.set_pc) (regcache, pc);
4269 }
4270
4271 static int
4272 linux_thread_stopped (struct thread_info *thread)
4273 {
4274 return get_thread_lwp (thread)->stopped;
4275 }
4276
4277 /* This exposes stop-all-threads functionality to other modules. */
4278
4279 static void
4280 linux_pause_all (void)
4281 {
4282 stop_all_lwps ();
4283 }
4284
4285 static struct target_ops linux_target_ops = {
4286 linux_create_inferior,
4287 linux_attach,
4288 linux_kill,
4289 linux_detach,
4290 linux_mourn,
4291 linux_join,
4292 linux_thread_alive,
4293 linux_resume,
4294 linux_wait,
4295 linux_fetch_registers,
4296 linux_store_registers,
4297 linux_read_memory,
4298 linux_write_memory,
4299 linux_look_up_symbols,
4300 linux_request_interrupt,
4301 linux_read_auxv,
4302 linux_insert_point,
4303 linux_remove_point,
4304 linux_stopped_by_watchpoint,
4305 linux_stopped_data_address,
4306 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4307 linux_read_offsets,
4308 #else
4309 NULL,
4310 #endif
4311 #ifdef USE_THREAD_DB
4312 thread_db_get_tls_address,
4313 #else
4314 NULL,
4315 #endif
4316 linux_qxfer_spu,
4317 hostio_last_error_from_errno,
4318 linux_qxfer_osdata,
4319 linux_xfer_siginfo,
4320 linux_supports_non_stop,
4321 linux_async,
4322 linux_start_non_stop,
4323 linux_supports_multi_process,
4324 #ifdef USE_THREAD_DB
4325 thread_db_handle_monitor_command,
4326 #else
4327 NULL,
4328 #endif
4329 linux_core_of_thread,
4330 linux_process_qsupported,
4331 linux_supports_tracepoints,
4332 linux_read_pc,
4333 linux_write_pc,
4334 linux_thread_stopped,
4335 linux_pause_all
4336 };
4337
4338 static void
4339 linux_init_signals ()
4340 {
4341 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4342 to find what the cancel signal actually is. */
4343 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
4344 signal (__SIGRTMIN+1, SIG_IGN);
4345 #endif
4346 }
4347
4348 void
4349 initialize_low (void)
4350 {
4351 struct sigaction sigchld_action;
4352 memset (&sigchld_action, 0, sizeof (sigchld_action));
4353 set_target_ops (&linux_target_ops);
4354 set_breakpoint_data (the_low_target.breakpoint,
4355 the_low_target.breakpoint_len);
4356 linux_init_signals ();
4357 linux_test_for_tracefork ();
4358 #ifdef HAVE_LINUX_REGSETS
4359 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4360 ;
4361 disabled_regsets = xmalloc (num_regsets);
4362 #endif
4363
4364 sigchld_action.sa_handler = sigchld_handler;
4365 sigemptyset (&sigchld_action.sa_mask);
4366 sigchld_action.sa_flags = SA_RESTART;
4367 sigaction (SIGCHLD, &sigchld_action, NULL);
4368 }
This page took 0.112298 seconds and 3 git commands to generate.