d758330941140cb7163799a5c6dfc3c924d4698b
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <pwd.h>
40 #include <sys/types.h>
41 #include <dirent.h>
42 #include <sys/stat.h>
43 #include <sys/vfs.h>
44 #include <sys/uio.h>
45 #include "filestuff.h"
46 #include "tracepoint.h"
47 #include "hostio.h"
48 #ifndef ELFMAG0
49 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
50 then ELFMAG0 will have been defined. If it didn't get included by
51 gdb_proc_service.h then including it will likely introduce a duplicate
52 definition of elf_fpregset_t. */
53 #include <elf.h>
54 #endif
55
56 #ifndef SPUFS_MAGIC
57 #define SPUFS_MAGIC 0x23c9b64e
58 #endif
59
60 #ifdef HAVE_PERSONALITY
61 # include <sys/personality.h>
62 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
63 # define ADDR_NO_RANDOMIZE 0x0040000
64 # endif
65 #endif
66
67 #ifndef O_LARGEFILE
68 #define O_LARGEFILE 0
69 #endif
70
71 #ifndef W_STOPCODE
72 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
73 #endif
74
75 /* This is the kernel's hard limit. Not to be confused with
76 SIGRTMIN. */
77 #ifndef __SIGRTMIN
78 #define __SIGRTMIN 32
79 #endif
80
81 /* Some targets did not define these ptrace constants from the start,
82 so gdbserver defines them locally here. In the future, these may
83 be removed after they are added to asm/ptrace.h. */
84 #if !(defined(PT_TEXT_ADDR) \
85 || defined(PT_DATA_ADDR) \
86 || defined(PT_TEXT_END_ADDR))
87 #if defined(__mcoldfire__)
88 /* These are still undefined in 3.10 kernels. */
89 #define PT_TEXT_ADDR 49*4
90 #define PT_DATA_ADDR 50*4
91 #define PT_TEXT_END_ADDR 51*4
92 /* BFIN already defines these since at least 2.6.32 kernels. */
93 #elif defined(BFIN)
94 #define PT_TEXT_ADDR 220
95 #define PT_TEXT_END_ADDR 224
96 #define PT_DATA_ADDR 228
97 /* These are still undefined in 3.10 kernels. */
98 #elif defined(__TMS320C6X__)
99 #define PT_TEXT_ADDR (0x10000*4)
100 #define PT_DATA_ADDR (0x10004*4)
101 #define PT_TEXT_END_ADDR (0x10008*4)
102 #endif
103 #endif
104
105 #ifdef HAVE_LINUX_BTRACE
106 # include "nat/linux-btrace.h"
107 #endif
108
109 #ifndef HAVE_ELF32_AUXV_T
110 /* Copied from glibc's elf.h. */
111 typedef struct
112 {
113 uint32_t a_type; /* Entry type */
114 union
115 {
116 uint32_t a_val; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
120 } a_un;
121 } Elf32_auxv_t;
122 #endif
123
124 #ifndef HAVE_ELF64_AUXV_T
125 /* Copied from glibc's elf.h. */
126 typedef struct
127 {
128 uint64_t a_type; /* Entry type */
129 union
130 {
131 uint64_t a_val; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
135 } a_un;
136 } Elf64_auxv_t;
137 #endif
138
139 /* A list of all unknown processes which receive stop signals. Some
140 other process will presumably claim each of these as forked
141 children momentarily. */
142
143 struct simple_pid_list
144 {
145 /* The process ID. */
146 int pid;
147
148 /* The status as reported by waitpid. */
149 int status;
150
151 /* Next in chain. */
152 struct simple_pid_list *next;
153 };
154 struct simple_pid_list *stopped_pids;
155
156 /* Trivial list manipulation functions to keep track of a list of new
157 stopped processes. */
158
159 static void
160 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
161 {
162 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
163
164 new_pid->pid = pid;
165 new_pid->status = status;
166 new_pid->next = *listp;
167 *listp = new_pid;
168 }
169
170 static int
171 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
172 {
173 struct simple_pid_list **p;
174
175 for (p = listp; *p != NULL; p = &(*p)->next)
176 if ((*p)->pid == pid)
177 {
178 struct simple_pid_list *next = (*p)->next;
179
180 *statusp = (*p)->status;
181 xfree (*p);
182 *p = next;
183 return 1;
184 }
185 return 0;
186 }
187
188 enum stopping_threads_kind
189 {
190 /* Not stopping threads presently. */
191 NOT_STOPPING_THREADS,
192
193 /* Stopping threads. */
194 STOPPING_THREADS,
195
196 /* Stopping and suspending threads. */
197 STOPPING_AND_SUSPENDING_THREADS
198 };
199
200 /* This is set while stop_all_lwps is in effect. */
201 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
202
203 /* FIXME make into a target method? */
204 int using_threads = 1;
205
206 /* True if we're presently stabilizing threads (moving them out of
207 jump pads). */
208 static int stabilizing_threads;
209
210 static void linux_resume_one_lwp (struct lwp_info *lwp,
211 int step, int signal, siginfo_t *info);
212 static void linux_resume (struct thread_resume *resume_info, size_t n);
213 static void stop_all_lwps (int suspend, struct lwp_info *except);
214 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
215 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
216 int *wstat, int options);
217 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
218 static struct lwp_info *add_lwp (ptid_t ptid);
219 static int linux_stopped_by_watchpoint (void);
220 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
221 static void proceed_all_lwps (void);
222 static int finish_step_over (struct lwp_info *lwp);
223 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
224 static int kill_lwp (unsigned long lwpid, int signo);
225
226 /* True if the low target can hardware single-step. Such targets
227 don't need a BREAKPOINT_REINSERT_ADDR callback. */
228
229 static int
230 can_hardware_single_step (void)
231 {
232 return (the_low_target.breakpoint_reinsert_addr == NULL);
233 }
234
235 /* True if the low target supports memory breakpoints. If so, we'll
236 have a GET_PC implementation. */
237
238 static int
239 supports_breakpoints (void)
240 {
241 return (the_low_target.get_pc != NULL);
242 }
243
244 /* Returns true if this target can support fast tracepoints. This
245 does not mean that the in-process agent has been loaded in the
246 inferior. */
247
248 static int
249 supports_fast_tracepoints (void)
250 {
251 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
252 }
253
254 /* True if LWP is stopped in its stepping range. */
255
256 static int
257 lwp_in_step_range (struct lwp_info *lwp)
258 {
259 CORE_ADDR pc = lwp->stop_pc;
260
261 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
262 }
263
264 struct pending_signals
265 {
266 int signal;
267 siginfo_t info;
268 struct pending_signals *prev;
269 };
270
271 /* The read/write ends of the pipe registered as waitable file in the
272 event loop. */
273 static int linux_event_pipe[2] = { -1, -1 };
274
275 /* True if we're currently in async mode. */
276 #define target_is_async_p() (linux_event_pipe[0] != -1)
277
278 static void send_sigstop (struct lwp_info *lwp);
279 static void wait_for_sigstop (void);
280
281 /* Return non-zero if HEADER is a 64-bit ELF file. */
282
283 static int
284 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
285 {
286 if (header->e_ident[EI_MAG0] == ELFMAG0
287 && header->e_ident[EI_MAG1] == ELFMAG1
288 && header->e_ident[EI_MAG2] == ELFMAG2
289 && header->e_ident[EI_MAG3] == ELFMAG3)
290 {
291 *machine = header->e_machine;
292 return header->e_ident[EI_CLASS] == ELFCLASS64;
293
294 }
295 *machine = EM_NONE;
296 return -1;
297 }
298
299 /* Return non-zero if FILE is a 64-bit ELF file,
300 zero if the file is not a 64-bit ELF file,
301 and -1 if the file is not accessible or doesn't exist. */
302
303 static int
304 elf_64_file_p (const char *file, unsigned int *machine)
305 {
306 Elf64_Ehdr header;
307 int fd;
308
309 fd = open (file, O_RDONLY);
310 if (fd < 0)
311 return -1;
312
313 if (read (fd, &header, sizeof (header)) != sizeof (header))
314 {
315 close (fd);
316 return 0;
317 }
318 close (fd);
319
320 return elf_64_header_p (&header, machine);
321 }
322
323 /* Accepts an integer PID; Returns true if the executable PID is
324 running is a 64-bit ELF file.. */
325
326 int
327 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
328 {
329 char file[PATH_MAX];
330
331 sprintf (file, "/proc/%d/exe", pid);
332 return elf_64_file_p (file, machine);
333 }
334
335 static void
336 delete_lwp (struct lwp_info *lwp)
337 {
338 struct thread_info *thr = get_lwp_thread (lwp);
339
340 if (debug_threads)
341 debug_printf ("deleting %ld\n", lwpid_of (thr));
342
343 remove_thread (thr);
344 free (lwp->arch_private);
345 free (lwp);
346 }
347
348 /* Add a process to the common process list, and set its private
349 data. */
350
351 static struct process_info *
352 linux_add_process (int pid, int attached)
353 {
354 struct process_info *proc;
355
356 proc = add_process (pid, attached);
357 proc->private = xcalloc (1, sizeof (*proc->private));
358
359 /* Set the arch when the first LWP stops. */
360 proc->private->new_inferior = 1;
361
362 if (the_low_target.new_process != NULL)
363 proc->private->arch_private = the_low_target.new_process ();
364
365 return proc;
366 }
367
368 /* Handle a GNU/Linux extended wait response. If we see a clone
369 event, we need to add the new LWP to our list (and not report the
370 trap to higher layers). */
371
372 static void
373 handle_extended_wait (struct lwp_info *event_child, int wstat)
374 {
375 int event = wstat >> 16;
376 struct thread_info *event_thr = get_lwp_thread (event_child);
377 struct lwp_info *new_lwp;
378
379 if (event == PTRACE_EVENT_CLONE)
380 {
381 ptid_t ptid;
382 unsigned long new_pid;
383 int ret, status;
384
385 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
386 &new_pid);
387
388 /* If we haven't already seen the new PID stop, wait for it now. */
389 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
390 {
391 /* The new child has a pending SIGSTOP. We can't affect it until it
392 hits the SIGSTOP, but we're already attached. */
393
394 ret = my_waitpid (new_pid, &status, __WALL);
395
396 if (ret == -1)
397 perror_with_name ("waiting for new child");
398 else if (ret != new_pid)
399 warning ("wait returned unexpected PID %d", ret);
400 else if (!WIFSTOPPED (status))
401 warning ("wait returned unexpected status 0x%x", status);
402 }
403
404 if (debug_threads)
405 debug_printf ("HEW: Got clone event "
406 "from LWP %ld, new child is LWP %ld\n",
407 lwpid_of (event_thr), new_pid);
408
409 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
410 new_lwp = add_lwp (ptid);
411
412 /* Either we're going to immediately resume the new thread
413 or leave it stopped. linux_resume_one_lwp is a nop if it
414 thinks the thread is currently running, so set this first
415 before calling linux_resume_one_lwp. */
416 new_lwp->stopped = 1;
417
418 /* If we're suspending all threads, leave this one suspended
419 too. */
420 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
421 new_lwp->suspended = 1;
422
423 /* Normally we will get the pending SIGSTOP. But in some cases
424 we might get another signal delivered to the group first.
425 If we do get another signal, be sure not to lose it. */
426 if (WSTOPSIG (status) == SIGSTOP)
427 {
428 if (stopping_threads != NOT_STOPPING_THREADS)
429 new_lwp->stop_pc = get_stop_pc (new_lwp);
430 else
431 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
432 }
433 else
434 {
435 new_lwp->stop_expected = 1;
436
437 if (stopping_threads != NOT_STOPPING_THREADS)
438 {
439 new_lwp->stop_pc = get_stop_pc (new_lwp);
440 new_lwp->status_pending_p = 1;
441 new_lwp->status_pending = status;
442 }
443 else
444 /* Pass the signal on. This is what GDB does - except
445 shouldn't we really report it instead? */
446 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
447 }
448
449 /* Always resume the current thread. If we are stopping
450 threads, it will have a pending SIGSTOP; we may as well
451 collect it now. */
452 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
453 }
454 }
455
456 /* Return the PC as read from the regcache of LWP, without any
457 adjustment. */
458
459 static CORE_ADDR
460 get_pc (struct lwp_info *lwp)
461 {
462 struct thread_info *saved_inferior;
463 struct regcache *regcache;
464 CORE_ADDR pc;
465
466 if (the_low_target.get_pc == NULL)
467 return 0;
468
469 saved_inferior = current_inferior;
470 current_inferior = get_lwp_thread (lwp);
471
472 regcache = get_thread_regcache (current_inferior, 1);
473 pc = (*the_low_target.get_pc) (regcache);
474
475 if (debug_threads)
476 debug_printf ("pc is 0x%lx\n", (long) pc);
477
478 current_inferior = saved_inferior;
479 return pc;
480 }
481
482 /* This function should only be called if LWP got a SIGTRAP.
483 The SIGTRAP could mean several things.
484
485 On i386, where decr_pc_after_break is non-zero:
486 If we were single-stepping this process using PTRACE_SINGLESTEP,
487 we will get only the one SIGTRAP (even if the instruction we
488 stepped over was a breakpoint). The value of $eip will be the
489 next instruction.
490 If we continue the process using PTRACE_CONT, we will get a
491 SIGTRAP when we hit a breakpoint. The value of $eip will be
492 the instruction after the breakpoint (i.e. needs to be
493 decremented). If we report the SIGTRAP to GDB, we must also
494 report the undecremented PC. If we cancel the SIGTRAP, we
495 must resume at the decremented PC.
496
497 (Presumably, not yet tested) On a non-decr_pc_after_break machine
498 with hardware or kernel single-step:
499 If we single-step over a breakpoint instruction, our PC will
500 point at the following instruction. If we continue and hit a
501 breakpoint instruction, our PC will point at the breakpoint
502 instruction. */
503
504 static CORE_ADDR
505 get_stop_pc (struct lwp_info *lwp)
506 {
507 CORE_ADDR stop_pc;
508
509 if (the_low_target.get_pc == NULL)
510 return 0;
511
512 stop_pc = get_pc (lwp);
513
514 if (WSTOPSIG (lwp->last_status) == SIGTRAP
515 && !lwp->stepping
516 && !lwp->stopped_by_watchpoint
517 && lwp->last_status >> 16 == 0)
518 stop_pc -= the_low_target.decr_pc_after_break;
519
520 if (debug_threads)
521 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
522
523 return stop_pc;
524 }
525
526 static struct lwp_info *
527 add_lwp (ptid_t ptid)
528 {
529 struct lwp_info *lwp;
530
531 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
532 memset (lwp, 0, sizeof (*lwp));
533
534 if (the_low_target.new_thread != NULL)
535 lwp->arch_private = the_low_target.new_thread ();
536
537 lwp->thread = add_thread (ptid, lwp);
538
539 return lwp;
540 }
541
542 /* Start an inferior process and returns its pid.
543 ALLARGS is a vector of program-name and args. */
544
545 static int
546 linux_create_inferior (char *program, char **allargs)
547 {
548 #ifdef HAVE_PERSONALITY
549 int personality_orig = 0, personality_set = 0;
550 #endif
551 struct lwp_info *new_lwp;
552 int pid;
553 ptid_t ptid;
554
555 #ifdef HAVE_PERSONALITY
556 if (disable_randomization)
557 {
558 errno = 0;
559 personality_orig = personality (0xffffffff);
560 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
561 {
562 personality_set = 1;
563 personality (personality_orig | ADDR_NO_RANDOMIZE);
564 }
565 if (errno != 0 || (personality_set
566 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
567 warning ("Error disabling address space randomization: %s",
568 strerror (errno));
569 }
570 #endif
571
572 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
573 pid = vfork ();
574 #else
575 pid = fork ();
576 #endif
577 if (pid < 0)
578 perror_with_name ("fork");
579
580 if (pid == 0)
581 {
582 close_most_fds ();
583 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
584
585 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
586 signal (__SIGRTMIN + 1, SIG_DFL);
587 #endif
588
589 setpgid (0, 0);
590
591 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
592 stdout to stderr so that inferior i/o doesn't corrupt the connection.
593 Also, redirect stdin to /dev/null. */
594 if (remote_connection_is_stdio ())
595 {
596 close (0);
597 open ("/dev/null", O_RDONLY);
598 dup2 (2, 1);
599 if (write (2, "stdin/stdout redirected\n",
600 sizeof ("stdin/stdout redirected\n") - 1) < 0)
601 {
602 /* Errors ignored. */;
603 }
604 }
605
606 execv (program, allargs);
607 if (errno == ENOENT)
608 execvp (program, allargs);
609
610 fprintf (stderr, "Cannot exec %s: %s.\n", program,
611 strerror (errno));
612 fflush (stderr);
613 _exit (0177);
614 }
615
616 #ifdef HAVE_PERSONALITY
617 if (personality_set)
618 {
619 errno = 0;
620 personality (personality_orig);
621 if (errno != 0)
622 warning ("Error restoring address space randomization: %s",
623 strerror (errno));
624 }
625 #endif
626
627 linux_add_process (pid, 0);
628
629 ptid = ptid_build (pid, pid, 0);
630 new_lwp = add_lwp (ptid);
631 new_lwp->must_set_ptrace_flags = 1;
632
633 return pid;
634 }
635
636 char *
637 linux_attach_fail_reason_string (ptid_t ptid, int err)
638 {
639 static char *reason_string;
640 struct buffer buffer;
641 char *warnings;
642 long lwpid = ptid_get_lwp (ptid);
643
644 xfree (reason_string);
645
646 buffer_init (&buffer);
647 linux_ptrace_attach_fail_reason (lwpid, &buffer);
648 buffer_grow_str0 (&buffer, "");
649 warnings = buffer_finish (&buffer);
650 if (warnings[0] != '\0')
651 reason_string = xstrprintf ("%s (%d), %s",
652 strerror (err), err, warnings);
653 else
654 reason_string = xstrprintf ("%s (%d)",
655 strerror (err), err);
656 xfree (warnings);
657 return reason_string;
658 }
659
660 /* Attach to an inferior process. */
661
662 int
663 linux_attach_lwp (ptid_t ptid)
664 {
665 struct lwp_info *new_lwp;
666 int lwpid = ptid_get_lwp (ptid);
667
668 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
669 != 0)
670 return errno;
671
672 new_lwp = add_lwp (ptid);
673
674 /* We need to wait for SIGSTOP before being able to make the next
675 ptrace call on this LWP. */
676 new_lwp->must_set_ptrace_flags = 1;
677
678 if (linux_proc_pid_is_stopped (lwpid))
679 {
680 if (debug_threads)
681 debug_printf ("Attached to a stopped process\n");
682
683 /* The process is definitely stopped. It is in a job control
684 stop, unless the kernel predates the TASK_STOPPED /
685 TASK_TRACED distinction, in which case it might be in a
686 ptrace stop. Make sure it is in a ptrace stop; from there we
687 can kill it, signal it, et cetera.
688
689 First make sure there is a pending SIGSTOP. Since we are
690 already attached, the process can not transition from stopped
691 to running without a PTRACE_CONT; so we know this signal will
692 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
693 probably already in the queue (unless this kernel is old
694 enough to use TASK_STOPPED for ptrace stops); but since
695 SIGSTOP is not an RT signal, it can only be queued once. */
696 kill_lwp (lwpid, SIGSTOP);
697
698 /* Finally, resume the stopped process. This will deliver the
699 SIGSTOP (or a higher priority signal, just like normal
700 PTRACE_ATTACH), which we'll catch later on. */
701 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
702 }
703
704 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
705 brings it to a halt.
706
707 There are several cases to consider here:
708
709 1) gdbserver has already attached to the process and is being notified
710 of a new thread that is being created.
711 In this case we should ignore that SIGSTOP and resume the
712 process. This is handled below by setting stop_expected = 1,
713 and the fact that add_thread sets last_resume_kind ==
714 resume_continue.
715
716 2) This is the first thread (the process thread), and we're attaching
717 to it via attach_inferior.
718 In this case we want the process thread to stop.
719 This is handled by having linux_attach set last_resume_kind ==
720 resume_stop after we return.
721
722 If the pid we are attaching to is also the tgid, we attach to and
723 stop all the existing threads. Otherwise, we attach to pid and
724 ignore any other threads in the same group as this pid.
725
726 3) GDB is connecting to gdbserver and is requesting an enumeration of all
727 existing threads.
728 In this case we want the thread to stop.
729 FIXME: This case is currently not properly handled.
730 We should wait for the SIGSTOP but don't. Things work apparently
731 because enough time passes between when we ptrace (ATTACH) and when
732 gdb makes the next ptrace call on the thread.
733
734 On the other hand, if we are currently trying to stop all threads, we
735 should treat the new thread as if we had sent it a SIGSTOP. This works
736 because we are guaranteed that the add_lwp call above added us to the
737 end of the list, and so the new thread has not yet reached
738 wait_for_sigstop (but will). */
739 new_lwp->stop_expected = 1;
740
741 return 0;
742 }
743
744 /* Attach to PID. If PID is the tgid, attach to it and all
745 of its threads. */
746
747 static int
748 linux_attach (unsigned long pid)
749 {
750 ptid_t ptid = ptid_build (pid, pid, 0);
751 int err;
752
753 /* Attach to PID. We will check for other threads
754 soon. */
755 err = linux_attach_lwp (ptid);
756 if (err != 0)
757 error ("Cannot attach to process %ld: %s",
758 pid, linux_attach_fail_reason_string (ptid, err));
759
760 linux_add_process (pid, 1);
761
762 if (!non_stop)
763 {
764 struct thread_info *thread;
765
766 /* Don't ignore the initial SIGSTOP if we just attached to this
767 process. It will be collected by wait shortly. */
768 thread = find_thread_ptid (ptid_build (pid, pid, 0));
769 thread->last_resume_kind = resume_stop;
770 }
771
772 if (linux_proc_get_tgid (pid) == pid)
773 {
774 DIR *dir;
775 char pathname[128];
776
777 sprintf (pathname, "/proc/%ld/task", pid);
778
779 dir = opendir (pathname);
780
781 if (!dir)
782 {
783 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
784 fflush (stderr);
785 }
786 else
787 {
788 /* At this point we attached to the tgid. Scan the task for
789 existing threads. */
790 int new_threads_found;
791 int iterations = 0;
792
793 while (iterations < 2)
794 {
795 struct dirent *dp;
796
797 new_threads_found = 0;
798 /* Add all the other threads. While we go through the
799 threads, new threads may be spawned. Cycle through
800 the list of threads until we have done two iterations without
801 finding new threads. */
802 while ((dp = readdir (dir)) != NULL)
803 {
804 unsigned long lwp;
805 ptid_t ptid;
806
807 /* Fetch one lwp. */
808 lwp = strtoul (dp->d_name, NULL, 10);
809
810 ptid = ptid_build (pid, lwp, 0);
811
812 /* Is this a new thread? */
813 if (lwp != 0 && find_thread_ptid (ptid) == NULL)
814 {
815 int err;
816
817 if (debug_threads)
818 debug_printf ("Found new lwp %ld\n", lwp);
819
820 err = linux_attach_lwp (ptid);
821 if (err != 0)
822 warning ("Cannot attach to lwp %ld: %s",
823 lwp,
824 linux_attach_fail_reason_string (ptid, err));
825
826 new_threads_found++;
827 }
828 }
829
830 if (!new_threads_found)
831 iterations++;
832 else
833 iterations = 0;
834
835 rewinddir (dir);
836 }
837 closedir (dir);
838 }
839 }
840
841 return 0;
842 }
843
844 struct counter
845 {
846 int pid;
847 int count;
848 };
849
850 static int
851 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
852 {
853 struct counter *counter = args;
854
855 if (ptid_get_pid (entry->id) == counter->pid)
856 {
857 if (++counter->count > 1)
858 return 1;
859 }
860
861 return 0;
862 }
863
864 static int
865 last_thread_of_process_p (int pid)
866 {
867 struct counter counter = { pid , 0 };
868
869 return (find_inferior (&all_threads,
870 second_thread_of_pid_p, &counter) == NULL);
871 }
872
873 /* Kill LWP. */
874
875 static void
876 linux_kill_one_lwp (struct lwp_info *lwp)
877 {
878 struct thread_info *thr = get_lwp_thread (lwp);
879 int pid = lwpid_of (thr);
880
881 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
882 there is no signal context, and ptrace(PTRACE_KILL) (or
883 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
884 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
885 alternative is to kill with SIGKILL. We only need one SIGKILL
886 per process, not one for each thread. But since we still support
887 linuxthreads, and we also support debugging programs using raw
888 clone without CLONE_THREAD, we send one for each thread. For
889 years, we used PTRACE_KILL only, so we're being a bit paranoid
890 about some old kernels where PTRACE_KILL might work better
891 (dubious if there are any such, but that's why it's paranoia), so
892 we try SIGKILL first, PTRACE_KILL second, and so we're fine
893 everywhere. */
894
895 errno = 0;
896 kill_lwp (pid, SIGKILL);
897 if (debug_threads)
898 {
899 int save_errno = errno;
900
901 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
902 target_pid_to_str (ptid_of (thr)),
903 save_errno ? strerror (save_errno) : "OK");
904 }
905
906 errno = 0;
907 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
908 if (debug_threads)
909 {
910 int save_errno = errno;
911
912 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
913 target_pid_to_str (ptid_of (thr)),
914 save_errno ? strerror (save_errno) : "OK");
915 }
916 }
917
918 /* Kill LWP and wait for it to die. */
919
920 static void
921 kill_wait_lwp (struct lwp_info *lwp)
922 {
923 struct thread_info *thr = get_lwp_thread (lwp);
924 int pid = ptid_get_pid (ptid_of (thr));
925 int lwpid = ptid_get_lwp (ptid_of (thr));
926 int wstat;
927 int res;
928
929 if (debug_threads)
930 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
931
932 do
933 {
934 linux_kill_one_lwp (lwp);
935
936 /* Make sure it died. Notes:
937
938 - The loop is most likely unnecessary.
939
940 - We don't use linux_wait_for_event as that could delete lwps
941 while we're iterating over them. We're not interested in
942 any pending status at this point, only in making sure all
943 wait status on the kernel side are collected until the
944 process is reaped.
945
946 - We don't use __WALL here as the __WALL emulation relies on
947 SIGCHLD, and killing a stopped process doesn't generate
948 one, nor an exit status.
949 */
950 res = my_waitpid (lwpid, &wstat, 0);
951 if (res == -1 && errno == ECHILD)
952 res = my_waitpid (lwpid, &wstat, __WCLONE);
953 } while (res > 0 && WIFSTOPPED (wstat));
954
955 gdb_assert (res > 0);
956 }
957
958 /* Callback for `find_inferior'. Kills an lwp of a given process,
959 except the leader. */
960
961 static int
962 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
963 {
964 struct thread_info *thread = (struct thread_info *) entry;
965 struct lwp_info *lwp = get_thread_lwp (thread);
966 int pid = * (int *) args;
967
968 if (ptid_get_pid (entry->id) != pid)
969 return 0;
970
971 /* We avoid killing the first thread here, because of a Linux kernel (at
972 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
973 the children get a chance to be reaped, it will remain a zombie
974 forever. */
975
976 if (lwpid_of (thread) == pid)
977 {
978 if (debug_threads)
979 debug_printf ("lkop: is last of process %s\n",
980 target_pid_to_str (entry->id));
981 return 0;
982 }
983
984 kill_wait_lwp (lwp);
985 return 0;
986 }
987
988 static int
989 linux_kill (int pid)
990 {
991 struct process_info *process;
992 struct lwp_info *lwp;
993
994 process = find_process_pid (pid);
995 if (process == NULL)
996 return -1;
997
998 /* If we're killing a running inferior, make sure it is stopped
999 first, as PTRACE_KILL will not work otherwise. */
1000 stop_all_lwps (0, NULL);
1001
1002 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1003
1004 /* See the comment in linux_kill_one_lwp. We did not kill the first
1005 thread in the list, so do so now. */
1006 lwp = find_lwp_pid (pid_to_ptid (pid));
1007
1008 if (lwp == NULL)
1009 {
1010 if (debug_threads)
1011 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1012 pid);
1013 }
1014 else
1015 kill_wait_lwp (lwp);
1016
1017 the_target->mourn (process);
1018
1019 /* Since we presently can only stop all lwps of all processes, we
1020 need to unstop lwps of other processes. */
1021 unstop_all_lwps (0, NULL);
1022 return 0;
1023 }
1024
1025 /* Get pending signal of THREAD, for detaching purposes. This is the
1026 signal the thread last stopped for, which we need to deliver to the
1027 thread when detaching, otherwise, it'd be suppressed/lost. */
1028
1029 static int
1030 get_detach_signal (struct thread_info *thread)
1031 {
1032 enum gdb_signal signo = GDB_SIGNAL_0;
1033 int status;
1034 struct lwp_info *lp = get_thread_lwp (thread);
1035
1036 if (lp->status_pending_p)
1037 status = lp->status_pending;
1038 else
1039 {
1040 /* If the thread had been suspended by gdbserver, and it stopped
1041 cleanly, then it'll have stopped with SIGSTOP. But we don't
1042 want to deliver that SIGSTOP. */
1043 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1044 || thread->last_status.value.sig == GDB_SIGNAL_0)
1045 return 0;
1046
1047 /* Otherwise, we may need to deliver the signal we
1048 intercepted. */
1049 status = lp->last_status;
1050 }
1051
1052 if (!WIFSTOPPED (status))
1053 {
1054 if (debug_threads)
1055 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1056 target_pid_to_str (ptid_of (thread)));
1057 return 0;
1058 }
1059
1060 /* Extended wait statuses aren't real SIGTRAPs. */
1061 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1062 {
1063 if (debug_threads)
1064 debug_printf ("GPS: lwp %s had stopped with extended "
1065 "status: no pending signal\n",
1066 target_pid_to_str (ptid_of (thread)));
1067 return 0;
1068 }
1069
1070 signo = gdb_signal_from_host (WSTOPSIG (status));
1071
1072 if (program_signals_p && !program_signals[signo])
1073 {
1074 if (debug_threads)
1075 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1076 target_pid_to_str (ptid_of (thread)),
1077 gdb_signal_to_string (signo));
1078 return 0;
1079 }
1080 else if (!program_signals_p
1081 /* If we have no way to know which signals GDB does not
1082 want to have passed to the program, assume
1083 SIGTRAP/SIGINT, which is GDB's default. */
1084 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1085 {
1086 if (debug_threads)
1087 debug_printf ("GPS: lwp %s had signal %s, "
1088 "but we don't know if we should pass it. "
1089 "Default to not.\n",
1090 target_pid_to_str (ptid_of (thread)),
1091 gdb_signal_to_string (signo));
1092 return 0;
1093 }
1094 else
1095 {
1096 if (debug_threads)
1097 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1098 target_pid_to_str (ptid_of (thread)),
1099 gdb_signal_to_string (signo));
1100
1101 return WSTOPSIG (status);
1102 }
1103 }
1104
1105 static int
1106 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1107 {
1108 struct thread_info *thread = (struct thread_info *) entry;
1109 struct lwp_info *lwp = get_thread_lwp (thread);
1110 int pid = * (int *) args;
1111 int sig;
1112
1113 if (ptid_get_pid (entry->id) != pid)
1114 return 0;
1115
1116 /* If there is a pending SIGSTOP, get rid of it. */
1117 if (lwp->stop_expected)
1118 {
1119 if (debug_threads)
1120 debug_printf ("Sending SIGCONT to %s\n",
1121 target_pid_to_str (ptid_of (thread)));
1122
1123 kill_lwp (lwpid_of (thread), SIGCONT);
1124 lwp->stop_expected = 0;
1125 }
1126
1127 /* Flush any pending changes to the process's registers. */
1128 regcache_invalidate_thread (thread);
1129
1130 /* Pass on any pending signal for this thread. */
1131 sig = get_detach_signal (thread);
1132
1133 /* Finally, let it resume. */
1134 if (the_low_target.prepare_to_resume != NULL)
1135 the_low_target.prepare_to_resume (lwp);
1136 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1137 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1138 error (_("Can't detach %s: %s"),
1139 target_pid_to_str (ptid_of (thread)),
1140 strerror (errno));
1141
1142 delete_lwp (lwp);
1143 return 0;
1144 }
1145
1146 static int
1147 linux_detach (int pid)
1148 {
1149 struct process_info *process;
1150
1151 process = find_process_pid (pid);
1152 if (process == NULL)
1153 return -1;
1154
1155 /* Stop all threads before detaching. First, ptrace requires that
1156 the thread is stopped to sucessfully detach. Second, thread_db
1157 may need to uninstall thread event breakpoints from memory, which
1158 only works with a stopped process anyway. */
1159 stop_all_lwps (0, NULL);
1160
1161 #ifdef USE_THREAD_DB
1162 thread_db_detach (process);
1163 #endif
1164
1165 /* Stabilize threads (move out of jump pads). */
1166 stabilize_threads ();
1167
1168 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1169
1170 the_target->mourn (process);
1171
1172 /* Since we presently can only stop all lwps of all processes, we
1173 need to unstop lwps of other processes. */
1174 unstop_all_lwps (0, NULL);
1175 return 0;
1176 }
1177
1178 /* Remove all LWPs that belong to process PROC from the lwp list. */
1179
1180 static int
1181 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1182 {
1183 struct thread_info *thread = (struct thread_info *) entry;
1184 struct lwp_info *lwp = get_thread_lwp (thread);
1185 struct process_info *process = proc;
1186
1187 if (pid_of (thread) == pid_of (process))
1188 delete_lwp (lwp);
1189
1190 return 0;
1191 }
1192
1193 static void
1194 linux_mourn (struct process_info *process)
1195 {
1196 struct process_info_private *priv;
1197
1198 #ifdef USE_THREAD_DB
1199 thread_db_mourn (process);
1200 #endif
1201
1202 find_inferior (&all_threads, delete_lwp_callback, process);
1203
1204 /* Freeing all private data. */
1205 priv = process->private;
1206 free (priv->arch_private);
1207 free (priv);
1208 process->private = NULL;
1209
1210 remove_process (process);
1211 }
1212
1213 static void
1214 linux_join (int pid)
1215 {
1216 int status, ret;
1217
1218 do {
1219 ret = my_waitpid (pid, &status, 0);
1220 if (WIFEXITED (status) || WIFSIGNALED (status))
1221 break;
1222 } while (ret != -1 || errno != ECHILD);
1223 }
1224
1225 /* Return nonzero if the given thread is still alive. */
1226 static int
1227 linux_thread_alive (ptid_t ptid)
1228 {
1229 struct lwp_info *lwp = find_lwp_pid (ptid);
1230
1231 /* We assume we always know if a thread exits. If a whole process
1232 exited but we still haven't been able to report it to GDB, we'll
1233 hold on to the last lwp of the dead process. */
1234 if (lwp != NULL)
1235 return !lwp->dead;
1236 else
1237 return 0;
1238 }
1239
1240 /* Return 1 if this lwp has an interesting status pending. */
1241 static int
1242 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1243 {
1244 struct thread_info *thread = (struct thread_info *) entry;
1245 struct lwp_info *lwp = get_thread_lwp (thread);
1246 ptid_t ptid = * (ptid_t *) arg;
1247
1248 /* Check if we're only interested in events from a specific process
1249 or its lwps. */
1250 if (!ptid_equal (minus_one_ptid, ptid)
1251 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
1252 return 0;
1253
1254 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1255 report any status pending the LWP may have. */
1256 if (thread->last_resume_kind == resume_stop
1257 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1258 return 0;
1259
1260 return lwp->status_pending_p;
1261 }
1262
1263 static int
1264 same_lwp (struct inferior_list_entry *entry, void *data)
1265 {
1266 ptid_t ptid = *(ptid_t *) data;
1267 int lwp;
1268
1269 if (ptid_get_lwp (ptid) != 0)
1270 lwp = ptid_get_lwp (ptid);
1271 else
1272 lwp = ptid_get_pid (ptid);
1273
1274 if (ptid_get_lwp (entry->id) == lwp)
1275 return 1;
1276
1277 return 0;
1278 }
1279
1280 struct lwp_info *
1281 find_lwp_pid (ptid_t ptid)
1282 {
1283 struct inferior_list_entry *thread
1284 = find_inferior (&all_threads, same_lwp, &ptid);
1285
1286 if (thread == NULL)
1287 return NULL;
1288
1289 return get_thread_lwp ((struct thread_info *) thread);
1290 }
1291
1292 /* Return the number of known LWPs in the tgid given by PID. */
1293
1294 static int
1295 num_lwps (int pid)
1296 {
1297 struct inferior_list_entry *inf, *tmp;
1298 int count = 0;
1299
1300 ALL_INFERIORS (&all_threads, inf, tmp)
1301 {
1302 if (ptid_get_pid (inf->id) == pid)
1303 count++;
1304 }
1305
1306 return count;
1307 }
1308
1309 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1310 their exits until all other threads in the group have exited. */
1311
1312 static void
1313 check_zombie_leaders (void)
1314 {
1315 struct process_info *proc, *tmp;
1316
1317 ALL_PROCESSES (proc, tmp)
1318 {
1319 pid_t leader_pid = pid_of (proc);
1320 struct lwp_info *leader_lp;
1321
1322 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1323
1324 if (debug_threads)
1325 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1326 "num_lwps=%d, zombie=%d\n",
1327 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1328 linux_proc_pid_is_zombie (leader_pid));
1329
1330 if (leader_lp != NULL
1331 /* Check if there are other threads in the group, as we may
1332 have raced with the inferior simply exiting. */
1333 && !last_thread_of_process_p (leader_pid)
1334 && linux_proc_pid_is_zombie (leader_pid))
1335 {
1336 /* A leader zombie can mean one of two things:
1337
1338 - It exited, and there's an exit status pending
1339 available, or only the leader exited (not the whole
1340 program). In the latter case, we can't waitpid the
1341 leader's exit status until all other threads are gone.
1342
1343 - There are 3 or more threads in the group, and a thread
1344 other than the leader exec'd. On an exec, the Linux
1345 kernel destroys all other threads (except the execing
1346 one) in the thread group, and resets the execing thread's
1347 tid to the tgid. No exit notification is sent for the
1348 execing thread -- from the ptracer's perspective, it
1349 appears as though the execing thread just vanishes.
1350 Until we reap all other threads except the leader and the
1351 execing thread, the leader will be zombie, and the
1352 execing thread will be in `D (disc sleep)'. As soon as
1353 all other threads are reaped, the execing thread changes
1354 it's tid to the tgid, and the previous (zombie) leader
1355 vanishes, giving place to the "new" leader. We could try
1356 distinguishing the exit and exec cases, by waiting once
1357 more, and seeing if something comes out, but it doesn't
1358 sound useful. The previous leader _does_ go away, and
1359 we'll re-add the new one once we see the exec event
1360 (which is just the same as what would happen if the
1361 previous leader did exit voluntarily before some other
1362 thread execs). */
1363
1364 if (debug_threads)
1365 fprintf (stderr,
1366 "CZL: Thread group leader %d zombie "
1367 "(it exited, or another thread execd).\n",
1368 leader_pid);
1369
1370 delete_lwp (leader_lp);
1371 }
1372 }
1373 }
1374
1375 /* Callback for `find_inferior'. Returns the first LWP that is not
1376 stopped. ARG is a PTID filter. */
1377
1378 static int
1379 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1380 {
1381 struct thread_info *thr = (struct thread_info *) entry;
1382 struct lwp_info *lwp;
1383 ptid_t filter = *(ptid_t *) arg;
1384
1385 if (!ptid_match (ptid_of (thr), filter))
1386 return 0;
1387
1388 lwp = get_thread_lwp (thr);
1389 if (!lwp->stopped)
1390 return 1;
1391
1392 return 0;
1393 }
1394
1395 /* This function should only be called if the LWP got a SIGTRAP.
1396
1397 Handle any tracepoint steps or hits. Return true if a tracepoint
1398 event was handled, 0 otherwise. */
1399
1400 static int
1401 handle_tracepoints (struct lwp_info *lwp)
1402 {
1403 struct thread_info *tinfo = get_lwp_thread (lwp);
1404 int tpoint_related_event = 0;
1405
1406 /* If this tracepoint hit causes a tracing stop, we'll immediately
1407 uninsert tracepoints. To do this, we temporarily pause all
1408 threads, unpatch away, and then unpause threads. We need to make
1409 sure the unpausing doesn't resume LWP too. */
1410 lwp->suspended++;
1411
1412 /* And we need to be sure that any all-threads-stopping doesn't try
1413 to move threads out of the jump pads, as it could deadlock the
1414 inferior (LWP could be in the jump pad, maybe even holding the
1415 lock.) */
1416
1417 /* Do any necessary step collect actions. */
1418 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1419
1420 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1421
1422 /* See if we just hit a tracepoint and do its main collect
1423 actions. */
1424 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1425
1426 lwp->suspended--;
1427
1428 gdb_assert (lwp->suspended == 0);
1429 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1430
1431 if (tpoint_related_event)
1432 {
1433 if (debug_threads)
1434 debug_printf ("got a tracepoint event\n");
1435 return 1;
1436 }
1437
1438 return 0;
1439 }
1440
1441 /* Convenience wrapper. Returns true if LWP is presently collecting a
1442 fast tracepoint. */
1443
1444 static int
1445 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1446 struct fast_tpoint_collect_status *status)
1447 {
1448 CORE_ADDR thread_area;
1449 struct thread_info *thread = get_lwp_thread (lwp);
1450
1451 if (the_low_target.get_thread_area == NULL)
1452 return 0;
1453
1454 /* Get the thread area address. This is used to recognize which
1455 thread is which when tracing with the in-process agent library.
1456 We don't read anything from the address, and treat it as opaque;
1457 it's the address itself that we assume is unique per-thread. */
1458 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1459 return 0;
1460
1461 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1462 }
1463
1464 /* The reason we resume in the caller, is because we want to be able
1465 to pass lwp->status_pending as WSTAT, and we need to clear
1466 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1467 refuses to resume. */
1468
1469 static int
1470 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1471 {
1472 struct thread_info *saved_inferior;
1473
1474 saved_inferior = current_inferior;
1475 current_inferior = get_lwp_thread (lwp);
1476
1477 if ((wstat == NULL
1478 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1479 && supports_fast_tracepoints ()
1480 && agent_loaded_p ())
1481 {
1482 struct fast_tpoint_collect_status status;
1483 int r;
1484
1485 if (debug_threads)
1486 debug_printf ("Checking whether LWP %ld needs to move out of the "
1487 "jump pad.\n",
1488 lwpid_of (current_inferior));
1489
1490 r = linux_fast_tracepoint_collecting (lwp, &status);
1491
1492 if (wstat == NULL
1493 || (WSTOPSIG (*wstat) != SIGILL
1494 && WSTOPSIG (*wstat) != SIGFPE
1495 && WSTOPSIG (*wstat) != SIGSEGV
1496 && WSTOPSIG (*wstat) != SIGBUS))
1497 {
1498 lwp->collecting_fast_tracepoint = r;
1499
1500 if (r != 0)
1501 {
1502 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1503 {
1504 /* Haven't executed the original instruction yet.
1505 Set breakpoint there, and wait till it's hit,
1506 then single-step until exiting the jump pad. */
1507 lwp->exit_jump_pad_bkpt
1508 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1509 }
1510
1511 if (debug_threads)
1512 debug_printf ("Checking whether LWP %ld needs to move out of "
1513 "the jump pad...it does\n",
1514 lwpid_of (current_inferior));
1515 current_inferior = saved_inferior;
1516
1517 return 1;
1518 }
1519 }
1520 else
1521 {
1522 /* If we get a synchronous signal while collecting, *and*
1523 while executing the (relocated) original instruction,
1524 reset the PC to point at the tpoint address, before
1525 reporting to GDB. Otherwise, it's an IPA lib bug: just
1526 report the signal to GDB, and pray for the best. */
1527
1528 lwp->collecting_fast_tracepoint = 0;
1529
1530 if (r != 0
1531 && (status.adjusted_insn_addr <= lwp->stop_pc
1532 && lwp->stop_pc < status.adjusted_insn_addr_end))
1533 {
1534 siginfo_t info;
1535 struct regcache *regcache;
1536
1537 /* The si_addr on a few signals references the address
1538 of the faulting instruction. Adjust that as
1539 well. */
1540 if ((WSTOPSIG (*wstat) == SIGILL
1541 || WSTOPSIG (*wstat) == SIGFPE
1542 || WSTOPSIG (*wstat) == SIGBUS
1543 || WSTOPSIG (*wstat) == SIGSEGV)
1544 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
1545 (PTRACE_TYPE_ARG3) 0, &info) == 0
1546 /* Final check just to make sure we don't clobber
1547 the siginfo of non-kernel-sent signals. */
1548 && (uintptr_t) info.si_addr == lwp->stop_pc)
1549 {
1550 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1551 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_inferior),
1552 (PTRACE_TYPE_ARG3) 0, &info);
1553 }
1554
1555 regcache = get_thread_regcache (current_inferior, 1);
1556 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1557 lwp->stop_pc = status.tpoint_addr;
1558
1559 /* Cancel any fast tracepoint lock this thread was
1560 holding. */
1561 force_unlock_trace_buffer ();
1562 }
1563
1564 if (lwp->exit_jump_pad_bkpt != NULL)
1565 {
1566 if (debug_threads)
1567 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1568 "stopping all threads momentarily.\n");
1569
1570 stop_all_lwps (1, lwp);
1571 cancel_breakpoints ();
1572
1573 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1574 lwp->exit_jump_pad_bkpt = NULL;
1575
1576 unstop_all_lwps (1, lwp);
1577
1578 gdb_assert (lwp->suspended >= 0);
1579 }
1580 }
1581 }
1582
1583 if (debug_threads)
1584 debug_printf ("Checking whether LWP %ld needs to move out of the "
1585 "jump pad...no\n",
1586 lwpid_of (current_inferior));
1587
1588 current_inferior = saved_inferior;
1589 return 0;
1590 }
1591
1592 /* Enqueue one signal in the "signals to report later when out of the
1593 jump pad" list. */
1594
1595 static void
1596 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1597 {
1598 struct pending_signals *p_sig;
1599 struct thread_info *thread = get_lwp_thread (lwp);
1600
1601 if (debug_threads)
1602 debug_printf ("Deferring signal %d for LWP %ld.\n",
1603 WSTOPSIG (*wstat), lwpid_of (thread));
1604
1605 if (debug_threads)
1606 {
1607 struct pending_signals *sig;
1608
1609 for (sig = lwp->pending_signals_to_report;
1610 sig != NULL;
1611 sig = sig->prev)
1612 debug_printf (" Already queued %d\n",
1613 sig->signal);
1614
1615 debug_printf (" (no more currently queued signals)\n");
1616 }
1617
1618 /* Don't enqueue non-RT signals if they are already in the deferred
1619 queue. (SIGSTOP being the easiest signal to see ending up here
1620 twice) */
1621 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1622 {
1623 struct pending_signals *sig;
1624
1625 for (sig = lwp->pending_signals_to_report;
1626 sig != NULL;
1627 sig = sig->prev)
1628 {
1629 if (sig->signal == WSTOPSIG (*wstat))
1630 {
1631 if (debug_threads)
1632 debug_printf ("Not requeuing already queued non-RT signal %d"
1633 " for LWP %ld\n",
1634 sig->signal,
1635 lwpid_of (thread));
1636 return;
1637 }
1638 }
1639 }
1640
1641 p_sig = xmalloc (sizeof (*p_sig));
1642 p_sig->prev = lwp->pending_signals_to_report;
1643 p_sig->signal = WSTOPSIG (*wstat);
1644 memset (&p_sig->info, 0, sizeof (siginfo_t));
1645 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1646 &p_sig->info);
1647
1648 lwp->pending_signals_to_report = p_sig;
1649 }
1650
1651 /* Dequeue one signal from the "signals to report later when out of
1652 the jump pad" list. */
1653
1654 static int
1655 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1656 {
1657 struct thread_info *thread = get_lwp_thread (lwp);
1658
1659 if (lwp->pending_signals_to_report != NULL)
1660 {
1661 struct pending_signals **p_sig;
1662
1663 p_sig = &lwp->pending_signals_to_report;
1664 while ((*p_sig)->prev != NULL)
1665 p_sig = &(*p_sig)->prev;
1666
1667 *wstat = W_STOPCODE ((*p_sig)->signal);
1668 if ((*p_sig)->info.si_signo != 0)
1669 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1670 &(*p_sig)->info);
1671 free (*p_sig);
1672 *p_sig = NULL;
1673
1674 if (debug_threads)
1675 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1676 WSTOPSIG (*wstat), lwpid_of (thread));
1677
1678 if (debug_threads)
1679 {
1680 struct pending_signals *sig;
1681
1682 for (sig = lwp->pending_signals_to_report;
1683 sig != NULL;
1684 sig = sig->prev)
1685 debug_printf (" Still queued %d\n",
1686 sig->signal);
1687
1688 debug_printf (" (no more queued signals)\n");
1689 }
1690
1691 return 1;
1692 }
1693
1694 return 0;
1695 }
1696
1697 /* Arrange for a breakpoint to be hit again later. We don't keep the
1698 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1699 will handle the current event, eventually we will resume this LWP,
1700 and this breakpoint will trap again. */
1701
1702 static int
1703 cancel_breakpoint (struct lwp_info *lwp)
1704 {
1705 struct thread_info *saved_inferior;
1706
1707 /* There's nothing to do if we don't support breakpoints. */
1708 if (!supports_breakpoints ())
1709 return 0;
1710
1711 /* breakpoint_at reads from current inferior. */
1712 saved_inferior = current_inferior;
1713 current_inferior = get_lwp_thread (lwp);
1714
1715 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1716 {
1717 if (debug_threads)
1718 debug_printf ("CB: Push back breakpoint for %s\n",
1719 target_pid_to_str (ptid_of (current_inferior)));
1720
1721 /* Back up the PC if necessary. */
1722 if (the_low_target.decr_pc_after_break)
1723 {
1724 struct regcache *regcache
1725 = get_thread_regcache (current_inferior, 1);
1726 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1727 }
1728
1729 current_inferior = saved_inferior;
1730 return 1;
1731 }
1732 else
1733 {
1734 if (debug_threads)
1735 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1736 paddress (lwp->stop_pc),
1737 target_pid_to_str (ptid_of (current_inferior)));
1738 }
1739
1740 current_inferior = saved_inferior;
1741 return 0;
1742 }
1743
1744 /* Do low-level handling of the event, and check if we should go on
1745 and pass it to caller code. Return the affected lwp if we are, or
1746 NULL otherwise. */
1747
1748 static struct lwp_info *
1749 linux_low_filter_event (ptid_t filter_ptid, int lwpid, int wstat)
1750 {
1751 struct lwp_info *child;
1752 struct thread_info *thread;
1753
1754 child = find_lwp_pid (pid_to_ptid (lwpid));
1755
1756 /* If we didn't find a process, one of two things presumably happened:
1757 - A process we started and then detached from has exited. Ignore it.
1758 - A process we are controlling has forked and the new child's stop
1759 was reported to us by the kernel. Save its PID. */
1760 if (child == NULL && WIFSTOPPED (wstat))
1761 {
1762 add_to_pid_list (&stopped_pids, lwpid, wstat);
1763 return NULL;
1764 }
1765 else if (child == NULL)
1766 return NULL;
1767
1768 thread = get_lwp_thread (child);
1769
1770 child->stopped = 1;
1771
1772 child->last_status = wstat;
1773
1774 if (WIFSTOPPED (wstat))
1775 {
1776 struct process_info *proc;
1777
1778 /* Architecture-specific setup after inferior is running. This
1779 needs to happen after we have attached to the inferior and it
1780 is stopped for the first time, but before we access any
1781 inferior registers. */
1782 proc = find_process_pid (pid_of (thread));
1783 if (proc->private->new_inferior)
1784 {
1785 struct thread_info *saved_inferior;
1786
1787 saved_inferior = current_inferior;
1788 current_inferior = thread;
1789
1790 the_low_target.arch_setup ();
1791
1792 current_inferior = saved_inferior;
1793
1794 proc->private->new_inferior = 0;
1795 }
1796 }
1797
1798 /* Store the STOP_PC, with adjustment applied. This depends on the
1799 architecture being defined already (so that CHILD has a valid
1800 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1801 not). */
1802 if (WIFSTOPPED (wstat))
1803 {
1804 if (debug_threads
1805 && the_low_target.get_pc != NULL)
1806 {
1807 struct thread_info *saved_inferior;
1808 struct regcache *regcache;
1809 CORE_ADDR pc;
1810
1811 saved_inferior = current_inferior;
1812 current_inferior = thread;
1813 regcache = get_thread_regcache (current_inferior, 1);
1814 pc = (*the_low_target.get_pc) (regcache);
1815 debug_printf ("linux_low_filter_event: pc is 0x%lx\n", (long) pc);
1816 current_inferior = saved_inferior;
1817 }
1818
1819 child->stop_pc = get_stop_pc (child);
1820 }
1821
1822 /* Fetch the possibly triggered data watchpoint info and store it in
1823 CHILD.
1824
1825 On some archs, like x86, that use debug registers to set
1826 watchpoints, it's possible that the way to know which watched
1827 address trapped, is to check the register that is used to select
1828 which address to watch. Problem is, between setting the
1829 watchpoint and reading back which data address trapped, the user
1830 may change the set of watchpoints, and, as a consequence, GDB
1831 changes the debug registers in the inferior. To avoid reading
1832 back a stale stopped-data-address when that happens, we cache in
1833 LP the fact that a watchpoint trapped, and the corresponding data
1834 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1835 changes the debug registers meanwhile, we have the cached data we
1836 can rely on. */
1837
1838 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP)
1839 {
1840 if (the_low_target.stopped_by_watchpoint == NULL)
1841 {
1842 child->stopped_by_watchpoint = 0;
1843 }
1844 else
1845 {
1846 struct thread_info *saved_inferior;
1847
1848 saved_inferior = current_inferior;
1849 current_inferior = thread;
1850
1851 child->stopped_by_watchpoint
1852 = the_low_target.stopped_by_watchpoint ();
1853
1854 if (child->stopped_by_watchpoint)
1855 {
1856 if (the_low_target.stopped_data_address != NULL)
1857 child->stopped_data_address
1858 = the_low_target.stopped_data_address ();
1859 else
1860 child->stopped_data_address = 0;
1861 }
1862
1863 current_inferior = saved_inferior;
1864 }
1865 }
1866
1867 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1868 {
1869 linux_enable_event_reporting (lwpid);
1870 child->must_set_ptrace_flags = 0;
1871 }
1872
1873 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1874 && wstat >> 16 != 0)
1875 {
1876 handle_extended_wait (child, wstat);
1877 return NULL;
1878 }
1879
1880 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1881 && child->stop_expected)
1882 {
1883 if (debug_threads)
1884 debug_printf ("Expected stop.\n");
1885 child->stop_expected = 0;
1886
1887 if (thread->last_resume_kind == resume_stop)
1888 {
1889 /* We want to report the stop to the core. Treat the
1890 SIGSTOP as a normal event. */
1891 }
1892 else if (stopping_threads != NOT_STOPPING_THREADS)
1893 {
1894 /* Stopping threads. We don't want this SIGSTOP to end up
1895 pending in the FILTER_PTID handling below. */
1896 return NULL;
1897 }
1898 else
1899 {
1900 /* Filter out the event. */
1901 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1902 return NULL;
1903 }
1904 }
1905
1906 /* Check if the thread has exited. */
1907 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat))
1908 && num_lwps (pid_of (thread)) > 1)
1909 {
1910 if (debug_threads)
1911 debug_printf ("LLW: %d exited.\n", lwpid);
1912
1913 /* If there is at least one more LWP, then the exit signal
1914 was not the end of the debugged application and should be
1915 ignored. */
1916 delete_lwp (child);
1917 return NULL;
1918 }
1919
1920 if (!ptid_match (ptid_of (thread), filter_ptid))
1921 {
1922 if (debug_threads)
1923 debug_printf ("LWP %d got an event %06x, leaving pending.\n",
1924 lwpid, wstat);
1925
1926 if (WIFSTOPPED (wstat))
1927 {
1928 child->status_pending_p = 1;
1929 child->status_pending = wstat;
1930
1931 if (WSTOPSIG (wstat) != SIGSTOP)
1932 {
1933 /* Cancel breakpoint hits. The breakpoint may be
1934 removed before we fetch events from this process to
1935 report to the core. It is best not to assume the
1936 moribund breakpoints heuristic always handles these
1937 cases --- it could be too many events go through to
1938 the core before this one is handled. All-stop always
1939 cancels breakpoint hits in all threads. */
1940 if (non_stop
1941 && WSTOPSIG (wstat) == SIGTRAP
1942 && cancel_breakpoint (child))
1943 {
1944 /* Throw away the SIGTRAP. */
1945 child->status_pending_p = 0;
1946
1947 if (debug_threads)
1948 debug_printf ("LLW: LWP %d hit a breakpoint while"
1949 " waiting for another process;"
1950 " cancelled it\n", lwpid);
1951 }
1952 }
1953 }
1954 else if (WIFEXITED (wstat) || WIFSIGNALED (wstat))
1955 {
1956 if (debug_threads)
1957 debug_printf ("LLWE: process %d exited while fetching "
1958 "event from another LWP\n", lwpid);
1959
1960 /* This was the last lwp in the process. Since events are
1961 serialized to GDB core, and we can't report this one
1962 right now, but GDB core and the other target layers will
1963 want to be notified about the exit code/signal, leave the
1964 status pending for the next time we're able to report
1965 it. */
1966 mark_lwp_dead (child, wstat);
1967 }
1968
1969 return NULL;
1970 }
1971
1972 return child;
1973 }
1974
1975 /* When the event-loop is doing a step-over, this points at the thread
1976 being stepped. */
1977 ptid_t step_over_bkpt;
1978
1979 /* Wait for an event from child(ren) WAIT_PTID, and return any that
1980 match FILTER_PTID (leaving others pending). The PTIDs can be:
1981 minus_one_ptid, to specify any child; a pid PTID, specifying all
1982 lwps of a thread group; or a PTID representing a single lwp. Store
1983 the stop status through the status pointer WSTAT. OPTIONS is
1984 passed to the waitpid call. Return 0 if no event was found and
1985 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1986 was found. Return the PID of the stopped child otherwise. */
1987
1988 static int
1989 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
1990 int *wstatp, int options)
1991 {
1992 struct thread_info *event_thread;
1993 struct lwp_info *event_child, *requested_child;
1994 sigset_t block_mask, prev_mask;
1995
1996 retry:
1997 /* N.B. event_thread points to the thread_info struct that contains
1998 event_child. Keep them in sync. */
1999 event_thread = NULL;
2000 event_child = NULL;
2001 requested_child = NULL;
2002
2003 /* Check for a lwp with a pending status. */
2004
2005 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2006 {
2007 event_thread = (struct thread_info *)
2008 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2009 if (event_thread != NULL)
2010 event_child = get_thread_lwp (event_thread);
2011 if (debug_threads && event_thread)
2012 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2013 }
2014 else if (!ptid_equal (filter_ptid, null_ptid))
2015 {
2016 requested_child = find_lwp_pid (filter_ptid);
2017
2018 if (stopping_threads == NOT_STOPPING_THREADS
2019 && requested_child->status_pending_p
2020 && requested_child->collecting_fast_tracepoint)
2021 {
2022 enqueue_one_deferred_signal (requested_child,
2023 &requested_child->status_pending);
2024 requested_child->status_pending_p = 0;
2025 requested_child->status_pending = 0;
2026 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2027 }
2028
2029 if (requested_child->suspended
2030 && requested_child->status_pending_p)
2031 fatal ("requesting an event out of a suspended child?");
2032
2033 if (requested_child->status_pending_p)
2034 {
2035 event_child = requested_child;
2036 event_thread = get_lwp_thread (event_child);
2037 }
2038 }
2039
2040 if (event_child != NULL)
2041 {
2042 if (debug_threads)
2043 debug_printf ("Got an event from pending child %ld (%04x)\n",
2044 lwpid_of (event_thread), event_child->status_pending);
2045 *wstatp = event_child->status_pending;
2046 event_child->status_pending_p = 0;
2047 event_child->status_pending = 0;
2048 current_inferior = event_thread;
2049 return lwpid_of (event_thread);
2050 }
2051
2052 /* But if we don't find a pending event, we'll have to wait.
2053
2054 We only enter this loop if no process has a pending wait status.
2055 Thus any action taken in response to a wait status inside this
2056 loop is responding as soon as we detect the status, not after any
2057 pending events. */
2058
2059 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2060 all signals while here. */
2061 sigfillset (&block_mask);
2062 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2063
2064 while (event_child == NULL)
2065 {
2066 pid_t ret = 0;
2067
2068 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2069 quirks:
2070
2071 - If the thread group leader exits while other threads in the
2072 thread group still exist, waitpid(TGID, ...) hangs. That
2073 waitpid won't return an exit status until the other threads
2074 in the group are reaped.
2075
2076 - When a non-leader thread execs, that thread just vanishes
2077 without reporting an exit (so we'd hang if we waited for it
2078 explicitly in that case). The exec event is reported to
2079 the TGID pid (although we don't currently enable exec
2080 events). */
2081 errno = 0;
2082 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2083
2084 if (debug_threads)
2085 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2086 ret, errno ? strerror (errno) : "ERRNO-OK");
2087
2088 if (ret > 0)
2089 {
2090 if (debug_threads)
2091 {
2092 debug_printf ("LLW: waitpid %ld received %s\n",
2093 (long) ret, status_to_str (*wstatp));
2094 }
2095
2096 event_child = linux_low_filter_event (filter_ptid,
2097 ret, *wstatp);
2098 if (event_child != NULL)
2099 {
2100 /* We got an event to report to the core. */
2101 event_thread = get_lwp_thread (event_child);
2102 break;
2103 }
2104
2105 /* Retry until nothing comes out of waitpid. A single
2106 SIGCHLD can indicate more than one child stopped. */
2107 continue;
2108 }
2109
2110 /* Check for zombie thread group leaders. Those can't be reaped
2111 until all other threads in the thread group are. */
2112 check_zombie_leaders ();
2113
2114 /* If there are no resumed children left in the set of LWPs we
2115 want to wait for, bail. We can't just block in
2116 waitpid/sigsuspend, because lwps might have been left stopped
2117 in trace-stop state, and we'd be stuck forever waiting for
2118 their status to change (which would only happen if we resumed
2119 them). Even if WNOHANG is set, this return code is preferred
2120 over 0 (below), as it is more detailed. */
2121 if ((find_inferior (&all_threads,
2122 not_stopped_callback,
2123 &wait_ptid) == NULL))
2124 {
2125 if (debug_threads)
2126 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2127 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2128 return -1;
2129 }
2130
2131 /* No interesting event to report to the caller. */
2132 if ((options & WNOHANG))
2133 {
2134 if (debug_threads)
2135 debug_printf ("WNOHANG set, no event found\n");
2136
2137 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2138 return 0;
2139 }
2140
2141 /* Block until we get an event reported with SIGCHLD. */
2142 if (debug_threads)
2143 debug_printf ("sigsuspend'ing\n");
2144
2145 sigsuspend (&prev_mask);
2146 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2147 goto retry;
2148 }
2149
2150 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2151
2152 current_inferior = event_thread;
2153
2154 /* Check for thread exit. */
2155 if (! WIFSTOPPED (*wstatp))
2156 {
2157 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2158
2159 if (debug_threads)
2160 debug_printf ("LWP %d is the last lwp of process. "
2161 "Process %ld exiting.\n",
2162 pid_of (event_thread), lwpid_of (event_thread));
2163 return lwpid_of (event_thread);
2164 }
2165
2166 return lwpid_of (event_thread);
2167 }
2168
2169 /* Wait for an event from child(ren) PTID. PTIDs can be:
2170 minus_one_ptid, to specify any child; a pid PTID, specifying all
2171 lwps of a thread group; or a PTID representing a single lwp. Store
2172 the stop status through the status pointer WSTAT. OPTIONS is
2173 passed to the waitpid call. Return 0 if no event was found and
2174 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2175 was found. Return the PID of the stopped child otherwise. */
2176
2177 static int
2178 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2179 {
2180 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2181 }
2182
2183 /* Count the LWP's that have had events. */
2184
2185 static int
2186 count_events_callback (struct inferior_list_entry *entry, void *data)
2187 {
2188 struct thread_info *thread = (struct thread_info *) entry;
2189 struct lwp_info *lp = get_thread_lwp (thread);
2190 int *count = data;
2191
2192 gdb_assert (count != NULL);
2193
2194 /* Count only resumed LWPs that have a SIGTRAP event pending that
2195 should be reported to GDB. */
2196 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2197 && thread->last_resume_kind != resume_stop
2198 && lp->status_pending_p
2199 && WIFSTOPPED (lp->status_pending)
2200 && WSTOPSIG (lp->status_pending) == SIGTRAP
2201 && !breakpoint_inserted_here (lp->stop_pc))
2202 (*count)++;
2203
2204 return 0;
2205 }
2206
2207 /* Select the LWP (if any) that is currently being single-stepped. */
2208
2209 static int
2210 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2211 {
2212 struct thread_info *thread = (struct thread_info *) entry;
2213 struct lwp_info *lp = get_thread_lwp (thread);
2214
2215 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2216 && thread->last_resume_kind == resume_step
2217 && lp->status_pending_p)
2218 return 1;
2219 else
2220 return 0;
2221 }
2222
2223 /* Select the Nth LWP that has had a SIGTRAP event that should be
2224 reported to GDB. */
2225
2226 static int
2227 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2228 {
2229 struct thread_info *thread = (struct thread_info *) entry;
2230 struct lwp_info *lp = get_thread_lwp (thread);
2231 int *selector = data;
2232
2233 gdb_assert (selector != NULL);
2234
2235 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2236 if (thread->last_resume_kind != resume_stop
2237 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2238 && lp->status_pending_p
2239 && WIFSTOPPED (lp->status_pending)
2240 && WSTOPSIG (lp->status_pending) == SIGTRAP
2241 && !breakpoint_inserted_here (lp->stop_pc))
2242 if ((*selector)-- == 0)
2243 return 1;
2244
2245 return 0;
2246 }
2247
2248 static int
2249 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2250 {
2251 struct thread_info *thread = (struct thread_info *) entry;
2252 struct lwp_info *lp = get_thread_lwp (thread);
2253 struct lwp_info *event_lp = data;
2254
2255 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2256 if (lp == event_lp)
2257 return 0;
2258
2259 /* If a LWP other than the LWP that we're reporting an event for has
2260 hit a GDB breakpoint (as opposed to some random trap signal),
2261 then just arrange for it to hit it again later. We don't keep
2262 the SIGTRAP status and don't forward the SIGTRAP signal to the
2263 LWP. We will handle the current event, eventually we will resume
2264 all LWPs, and this one will get its breakpoint trap again.
2265
2266 If we do not do this, then we run the risk that the user will
2267 delete or disable the breakpoint, but the LWP will have already
2268 tripped on it. */
2269
2270 if (thread->last_resume_kind != resume_stop
2271 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2272 && lp->status_pending_p
2273 && WIFSTOPPED (lp->status_pending)
2274 && WSTOPSIG (lp->status_pending) == SIGTRAP
2275 && !lp->stepping
2276 && !lp->stopped_by_watchpoint
2277 && cancel_breakpoint (lp))
2278 /* Throw away the SIGTRAP. */
2279 lp->status_pending_p = 0;
2280
2281 return 0;
2282 }
2283
2284 static void
2285 linux_cancel_breakpoints (void)
2286 {
2287 find_inferior (&all_threads, cancel_breakpoints_callback, NULL);
2288 }
2289
2290 /* Select one LWP out of those that have events pending. */
2291
2292 static void
2293 select_event_lwp (struct lwp_info **orig_lp)
2294 {
2295 int num_events = 0;
2296 int random_selector;
2297 struct thread_info *event_thread;
2298
2299 /* Give preference to any LWP that is being single-stepped. */
2300 event_thread
2301 = (struct thread_info *) find_inferior (&all_threads,
2302 select_singlestep_lwp_callback,
2303 NULL);
2304 if (event_thread != NULL)
2305 {
2306 if (debug_threads)
2307 debug_printf ("SEL: Select single-step %s\n",
2308 target_pid_to_str (ptid_of (event_thread)));
2309 }
2310 else
2311 {
2312 /* No single-stepping LWP. Select one at random, out of those
2313 which have had SIGTRAP events. */
2314
2315 /* First see how many SIGTRAP events we have. */
2316 find_inferior (&all_threads, count_events_callback, &num_events);
2317
2318 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2319 random_selector = (int)
2320 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2321
2322 if (debug_threads && num_events > 1)
2323 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2324 num_events, random_selector);
2325
2326 event_thread
2327 = (struct thread_info *) find_inferior (&all_threads,
2328 select_event_lwp_callback,
2329 &random_selector);
2330 }
2331
2332 if (event_thread != NULL)
2333 {
2334 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2335
2336 /* Switch the event LWP. */
2337 *orig_lp = event_lp;
2338 }
2339 }
2340
2341 /* Decrement the suspend count of an LWP. */
2342
2343 static int
2344 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2345 {
2346 struct thread_info *thread = (struct thread_info *) entry;
2347 struct lwp_info *lwp = get_thread_lwp (thread);
2348
2349 /* Ignore EXCEPT. */
2350 if (lwp == except)
2351 return 0;
2352
2353 lwp->suspended--;
2354
2355 gdb_assert (lwp->suspended >= 0);
2356 return 0;
2357 }
2358
2359 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2360 NULL. */
2361
2362 static void
2363 unsuspend_all_lwps (struct lwp_info *except)
2364 {
2365 find_inferior (&all_threads, unsuspend_one_lwp, except);
2366 }
2367
2368 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2369 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2370 void *data);
2371 static int lwp_running (struct inferior_list_entry *entry, void *data);
2372 static ptid_t linux_wait_1 (ptid_t ptid,
2373 struct target_waitstatus *ourstatus,
2374 int target_options);
2375
2376 /* Stabilize threads (move out of jump pads).
2377
2378 If a thread is midway collecting a fast tracepoint, we need to
2379 finish the collection and move it out of the jump pad before
2380 reporting the signal.
2381
2382 This avoids recursion while collecting (when a signal arrives
2383 midway, and the signal handler itself collects), which would trash
2384 the trace buffer. In case the user set a breakpoint in a signal
2385 handler, this avoids the backtrace showing the jump pad, etc..
2386 Most importantly, there are certain things we can't do safely if
2387 threads are stopped in a jump pad (or in its callee's). For
2388 example:
2389
2390 - starting a new trace run. A thread still collecting the
2391 previous run, could trash the trace buffer when resumed. The trace
2392 buffer control structures would have been reset but the thread had
2393 no way to tell. The thread could even midway memcpy'ing to the
2394 buffer, which would mean that when resumed, it would clobber the
2395 trace buffer that had been set for a new run.
2396
2397 - we can't rewrite/reuse the jump pads for new tracepoints
2398 safely. Say you do tstart while a thread is stopped midway while
2399 collecting. When the thread is later resumed, it finishes the
2400 collection, and returns to the jump pad, to execute the original
2401 instruction that was under the tracepoint jump at the time the
2402 older run had been started. If the jump pad had been rewritten
2403 since for something else in the new run, the thread would now
2404 execute the wrong / random instructions. */
2405
2406 static void
2407 linux_stabilize_threads (void)
2408 {
2409 struct thread_info *save_inferior;
2410 struct thread_info *thread_stuck;
2411
2412 thread_stuck
2413 = (struct thread_info *) find_inferior (&all_threads,
2414 stuck_in_jump_pad_callback,
2415 NULL);
2416 if (thread_stuck != NULL)
2417 {
2418 if (debug_threads)
2419 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2420 lwpid_of (thread_stuck));
2421 return;
2422 }
2423
2424 save_inferior = current_inferior;
2425
2426 stabilizing_threads = 1;
2427
2428 /* Kick 'em all. */
2429 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2430
2431 /* Loop until all are stopped out of the jump pads. */
2432 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2433 {
2434 struct target_waitstatus ourstatus;
2435 struct lwp_info *lwp;
2436 int wstat;
2437
2438 /* Note that we go through the full wait even loop. While
2439 moving threads out of jump pad, we need to be able to step
2440 over internal breakpoints and such. */
2441 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2442
2443 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2444 {
2445 lwp = get_thread_lwp (current_inferior);
2446
2447 /* Lock it. */
2448 lwp->suspended++;
2449
2450 if (ourstatus.value.sig != GDB_SIGNAL_0
2451 || current_inferior->last_resume_kind == resume_stop)
2452 {
2453 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2454 enqueue_one_deferred_signal (lwp, &wstat);
2455 }
2456 }
2457 }
2458
2459 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2460
2461 stabilizing_threads = 0;
2462
2463 current_inferior = save_inferior;
2464
2465 if (debug_threads)
2466 {
2467 thread_stuck
2468 = (struct thread_info *) find_inferior (&all_threads,
2469 stuck_in_jump_pad_callback,
2470 NULL);
2471 if (thread_stuck != NULL)
2472 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2473 lwpid_of (thread_stuck));
2474 }
2475 }
2476
2477 /* Wait for process, returns status. */
2478
2479 static ptid_t
2480 linux_wait_1 (ptid_t ptid,
2481 struct target_waitstatus *ourstatus, int target_options)
2482 {
2483 int w;
2484 struct lwp_info *event_child;
2485 int options;
2486 int pid;
2487 int step_over_finished;
2488 int bp_explains_trap;
2489 int maybe_internal_trap;
2490 int report_to_gdb;
2491 int trace_event;
2492 int in_step_range;
2493
2494 if (debug_threads)
2495 {
2496 debug_enter ();
2497 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2498 }
2499
2500 /* Translate generic target options into linux options. */
2501 options = __WALL;
2502 if (target_options & TARGET_WNOHANG)
2503 options |= WNOHANG;
2504
2505 retry:
2506 bp_explains_trap = 0;
2507 trace_event = 0;
2508 in_step_range = 0;
2509 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2510
2511 /* If we were only supposed to resume one thread, only wait for
2512 that thread - if it's still alive. If it died, however - which
2513 can happen if we're coming from the thread death case below -
2514 then we need to make sure we restart the other threads. We could
2515 pick a thread at random or restart all; restarting all is less
2516 arbitrary. */
2517 if (!non_stop
2518 && !ptid_equal (cont_thread, null_ptid)
2519 && !ptid_equal (cont_thread, minus_one_ptid))
2520 {
2521 struct thread_info *thread;
2522
2523 thread = (struct thread_info *) find_inferior_id (&all_threads,
2524 cont_thread);
2525
2526 /* No stepping, no signal - unless one is pending already, of course. */
2527 if (thread == NULL)
2528 {
2529 struct thread_resume resume_info;
2530 resume_info.thread = minus_one_ptid;
2531 resume_info.kind = resume_continue;
2532 resume_info.sig = 0;
2533 linux_resume (&resume_info, 1);
2534 }
2535 else
2536 ptid = cont_thread;
2537 }
2538
2539 if (ptid_equal (step_over_bkpt, null_ptid))
2540 pid = linux_wait_for_event (ptid, &w, options);
2541 else
2542 {
2543 if (debug_threads)
2544 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2545 target_pid_to_str (step_over_bkpt));
2546 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2547 }
2548
2549 if (pid == 0)
2550 {
2551 gdb_assert (target_options & TARGET_WNOHANG);
2552
2553 if (debug_threads)
2554 {
2555 debug_printf ("linux_wait_1 ret = null_ptid, "
2556 "TARGET_WAITKIND_IGNORE\n");
2557 debug_exit ();
2558 }
2559
2560 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2561 return null_ptid;
2562 }
2563 else if (pid == -1)
2564 {
2565 if (debug_threads)
2566 {
2567 debug_printf ("linux_wait_1 ret = null_ptid, "
2568 "TARGET_WAITKIND_NO_RESUMED\n");
2569 debug_exit ();
2570 }
2571
2572 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2573 return null_ptid;
2574 }
2575
2576 event_child = get_thread_lwp (current_inferior);
2577
2578 /* linux_wait_for_event only returns an exit status for the last
2579 child of a process. Report it. */
2580 if (WIFEXITED (w) || WIFSIGNALED (w))
2581 {
2582 if (WIFEXITED (w))
2583 {
2584 ourstatus->kind = TARGET_WAITKIND_EXITED;
2585 ourstatus->value.integer = WEXITSTATUS (w);
2586
2587 if (debug_threads)
2588 {
2589 debug_printf ("linux_wait_1 ret = %s, exited with "
2590 "retcode %d\n",
2591 target_pid_to_str (ptid_of (current_inferior)),
2592 WEXITSTATUS (w));
2593 debug_exit ();
2594 }
2595 }
2596 else
2597 {
2598 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2599 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2600
2601 if (debug_threads)
2602 {
2603 debug_printf ("linux_wait_1 ret = %s, terminated with "
2604 "signal %d\n",
2605 target_pid_to_str (ptid_of (current_inferior)),
2606 WTERMSIG (w));
2607 debug_exit ();
2608 }
2609 }
2610
2611 return ptid_of (current_inferior);
2612 }
2613
2614 /* If this event was not handled before, and is not a SIGTRAP, we
2615 report it. SIGILL and SIGSEGV are also treated as traps in case
2616 a breakpoint is inserted at the current PC. If this target does
2617 not support internal breakpoints at all, we also report the
2618 SIGTRAP without further processing; it's of no concern to us. */
2619 maybe_internal_trap
2620 = (supports_breakpoints ()
2621 && (WSTOPSIG (w) == SIGTRAP
2622 || ((WSTOPSIG (w) == SIGILL
2623 || WSTOPSIG (w) == SIGSEGV)
2624 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2625
2626 if (maybe_internal_trap)
2627 {
2628 /* Handle anything that requires bookkeeping before deciding to
2629 report the event or continue waiting. */
2630
2631 /* First check if we can explain the SIGTRAP with an internal
2632 breakpoint, or if we should possibly report the event to GDB.
2633 Do this before anything that may remove or insert a
2634 breakpoint. */
2635 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2636
2637 /* We have a SIGTRAP, possibly a step-over dance has just
2638 finished. If so, tweak the state machine accordingly,
2639 reinsert breakpoints and delete any reinsert (software
2640 single-step) breakpoints. */
2641 step_over_finished = finish_step_over (event_child);
2642
2643 /* Now invoke the callbacks of any internal breakpoints there. */
2644 check_breakpoints (event_child->stop_pc);
2645
2646 /* Handle tracepoint data collecting. This may overflow the
2647 trace buffer, and cause a tracing stop, removing
2648 breakpoints. */
2649 trace_event = handle_tracepoints (event_child);
2650
2651 if (bp_explains_trap)
2652 {
2653 /* If we stepped or ran into an internal breakpoint, we've
2654 already handled it. So next time we resume (from this
2655 PC), we should step over it. */
2656 if (debug_threads)
2657 debug_printf ("Hit a gdbserver breakpoint.\n");
2658
2659 if (breakpoint_here (event_child->stop_pc))
2660 event_child->need_step_over = 1;
2661 }
2662 }
2663 else
2664 {
2665 /* We have some other signal, possibly a step-over dance was in
2666 progress, and it should be cancelled too. */
2667 step_over_finished = finish_step_over (event_child);
2668 }
2669
2670 /* We have all the data we need. Either report the event to GDB, or
2671 resume threads and keep waiting for more. */
2672
2673 /* If we're collecting a fast tracepoint, finish the collection and
2674 move out of the jump pad before delivering a signal. See
2675 linux_stabilize_threads. */
2676
2677 if (WIFSTOPPED (w)
2678 && WSTOPSIG (w) != SIGTRAP
2679 && supports_fast_tracepoints ()
2680 && agent_loaded_p ())
2681 {
2682 if (debug_threads)
2683 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2684 "to defer or adjust it.\n",
2685 WSTOPSIG (w), lwpid_of (current_inferior));
2686
2687 /* Allow debugging the jump pad itself. */
2688 if (current_inferior->last_resume_kind != resume_step
2689 && maybe_move_out_of_jump_pad (event_child, &w))
2690 {
2691 enqueue_one_deferred_signal (event_child, &w);
2692
2693 if (debug_threads)
2694 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2695 WSTOPSIG (w), lwpid_of (current_inferior));
2696
2697 linux_resume_one_lwp (event_child, 0, 0, NULL);
2698 goto retry;
2699 }
2700 }
2701
2702 if (event_child->collecting_fast_tracepoint)
2703 {
2704 if (debug_threads)
2705 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2706 "Check if we're already there.\n",
2707 lwpid_of (current_inferior),
2708 event_child->collecting_fast_tracepoint);
2709
2710 trace_event = 1;
2711
2712 event_child->collecting_fast_tracepoint
2713 = linux_fast_tracepoint_collecting (event_child, NULL);
2714
2715 if (event_child->collecting_fast_tracepoint != 1)
2716 {
2717 /* No longer need this breakpoint. */
2718 if (event_child->exit_jump_pad_bkpt != NULL)
2719 {
2720 if (debug_threads)
2721 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2722 "stopping all threads momentarily.\n");
2723
2724 /* Other running threads could hit this breakpoint.
2725 We don't handle moribund locations like GDB does,
2726 instead we always pause all threads when removing
2727 breakpoints, so that any step-over or
2728 decr_pc_after_break adjustment is always taken
2729 care of while the breakpoint is still
2730 inserted. */
2731 stop_all_lwps (1, event_child);
2732 cancel_breakpoints ();
2733
2734 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2735 event_child->exit_jump_pad_bkpt = NULL;
2736
2737 unstop_all_lwps (1, event_child);
2738
2739 gdb_assert (event_child->suspended >= 0);
2740 }
2741 }
2742
2743 if (event_child->collecting_fast_tracepoint == 0)
2744 {
2745 if (debug_threads)
2746 debug_printf ("fast tracepoint finished "
2747 "collecting successfully.\n");
2748
2749 /* We may have a deferred signal to report. */
2750 if (dequeue_one_deferred_signal (event_child, &w))
2751 {
2752 if (debug_threads)
2753 debug_printf ("dequeued one signal.\n");
2754 }
2755 else
2756 {
2757 if (debug_threads)
2758 debug_printf ("no deferred signals.\n");
2759
2760 if (stabilizing_threads)
2761 {
2762 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2763 ourstatus->value.sig = GDB_SIGNAL_0;
2764
2765 if (debug_threads)
2766 {
2767 debug_printf ("linux_wait_1 ret = %s, stopped "
2768 "while stabilizing threads\n",
2769 target_pid_to_str (ptid_of (current_inferior)));
2770 debug_exit ();
2771 }
2772
2773 return ptid_of (current_inferior);
2774 }
2775 }
2776 }
2777 }
2778
2779 /* Check whether GDB would be interested in this event. */
2780
2781 /* If GDB is not interested in this signal, don't stop other
2782 threads, and don't report it to GDB. Just resume the inferior
2783 right away. We do this for threading-related signals as well as
2784 any that GDB specifically requested we ignore. But never ignore
2785 SIGSTOP if we sent it ourselves, and do not ignore signals when
2786 stepping - they may require special handling to skip the signal
2787 handler. */
2788 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2789 thread library? */
2790 if (WIFSTOPPED (w)
2791 && current_inferior->last_resume_kind != resume_step
2792 && (
2793 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2794 (current_process ()->private->thread_db != NULL
2795 && (WSTOPSIG (w) == __SIGRTMIN
2796 || WSTOPSIG (w) == __SIGRTMIN + 1))
2797 ||
2798 #endif
2799 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2800 && !(WSTOPSIG (w) == SIGSTOP
2801 && current_inferior->last_resume_kind == resume_stop))))
2802 {
2803 siginfo_t info, *info_p;
2804
2805 if (debug_threads)
2806 debug_printf ("Ignored signal %d for LWP %ld.\n",
2807 WSTOPSIG (w), lwpid_of (current_inferior));
2808
2809 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
2810 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2811 info_p = &info;
2812 else
2813 info_p = NULL;
2814 linux_resume_one_lwp (event_child, event_child->stepping,
2815 WSTOPSIG (w), info_p);
2816 goto retry;
2817 }
2818
2819 /* Note that all addresses are always "out of the step range" when
2820 there's no range to begin with. */
2821 in_step_range = lwp_in_step_range (event_child);
2822
2823 /* If GDB wanted this thread to single step, and the thread is out
2824 of the step range, we always want to report the SIGTRAP, and let
2825 GDB handle it. Watchpoints should always be reported. So should
2826 signals we can't explain. A SIGTRAP we can't explain could be a
2827 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2828 do, we're be able to handle GDB breakpoints on top of internal
2829 breakpoints, by handling the internal breakpoint and still
2830 reporting the event to GDB. If we don't, we're out of luck, GDB
2831 won't see the breakpoint hit. */
2832 report_to_gdb = (!maybe_internal_trap
2833 || (current_inferior->last_resume_kind == resume_step
2834 && !in_step_range)
2835 || event_child->stopped_by_watchpoint
2836 || (!step_over_finished && !in_step_range
2837 && !bp_explains_trap && !trace_event)
2838 || (gdb_breakpoint_here (event_child->stop_pc)
2839 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2840 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2841
2842 run_breakpoint_commands (event_child->stop_pc);
2843
2844 /* We found no reason GDB would want us to stop. We either hit one
2845 of our own breakpoints, or finished an internal step GDB
2846 shouldn't know about. */
2847 if (!report_to_gdb)
2848 {
2849 if (debug_threads)
2850 {
2851 if (bp_explains_trap)
2852 debug_printf ("Hit a gdbserver breakpoint.\n");
2853 if (step_over_finished)
2854 debug_printf ("Step-over finished.\n");
2855 if (trace_event)
2856 debug_printf ("Tracepoint event.\n");
2857 if (lwp_in_step_range (event_child))
2858 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2859 paddress (event_child->stop_pc),
2860 paddress (event_child->step_range_start),
2861 paddress (event_child->step_range_end));
2862 }
2863
2864 /* We're not reporting this breakpoint to GDB, so apply the
2865 decr_pc_after_break adjustment to the inferior's regcache
2866 ourselves. */
2867
2868 if (the_low_target.set_pc != NULL)
2869 {
2870 struct regcache *regcache
2871 = get_thread_regcache (current_inferior, 1);
2872 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2873 }
2874
2875 /* We may have finished stepping over a breakpoint. If so,
2876 we've stopped and suspended all LWPs momentarily except the
2877 stepping one. This is where we resume them all again. We're
2878 going to keep waiting, so use proceed, which handles stepping
2879 over the next breakpoint. */
2880 if (debug_threads)
2881 debug_printf ("proceeding all threads.\n");
2882
2883 if (step_over_finished)
2884 unsuspend_all_lwps (event_child);
2885
2886 proceed_all_lwps ();
2887 goto retry;
2888 }
2889
2890 if (debug_threads)
2891 {
2892 if (current_inferior->last_resume_kind == resume_step)
2893 {
2894 if (event_child->step_range_start == event_child->step_range_end)
2895 debug_printf ("GDB wanted to single-step, reporting event.\n");
2896 else if (!lwp_in_step_range (event_child))
2897 debug_printf ("Out of step range, reporting event.\n");
2898 }
2899 if (event_child->stopped_by_watchpoint)
2900 debug_printf ("Stopped by watchpoint.\n");
2901 if (gdb_breakpoint_here (event_child->stop_pc))
2902 debug_printf ("Stopped by GDB breakpoint.\n");
2903 if (debug_threads)
2904 debug_printf ("Hit a non-gdbserver trap event.\n");
2905 }
2906
2907 /* Alright, we're going to report a stop. */
2908
2909 if (!non_stop && !stabilizing_threads)
2910 {
2911 /* In all-stop, stop all threads. */
2912 stop_all_lwps (0, NULL);
2913
2914 /* If we're not waiting for a specific LWP, choose an event LWP
2915 from among those that have had events. Giving equal priority
2916 to all LWPs that have had events helps prevent
2917 starvation. */
2918 if (ptid_equal (ptid, minus_one_ptid))
2919 {
2920 event_child->status_pending_p = 1;
2921 event_child->status_pending = w;
2922
2923 select_event_lwp (&event_child);
2924
2925 /* current_inferior and event_child must stay in sync. */
2926 current_inferior = get_lwp_thread (event_child);
2927
2928 event_child->status_pending_p = 0;
2929 w = event_child->status_pending;
2930 }
2931
2932 /* Now that we've selected our final event LWP, cancel any
2933 breakpoints in other LWPs that have hit a GDB breakpoint.
2934 See the comment in cancel_breakpoints_callback to find out
2935 why. */
2936 find_inferior (&all_threads, cancel_breakpoints_callback, event_child);
2937
2938 /* If we were going a step-over, all other threads but the stepping one
2939 had been paused in start_step_over, with their suspend counts
2940 incremented. We don't want to do a full unstop/unpause, because we're
2941 in all-stop mode (so we want threads stopped), but we still need to
2942 unsuspend the other threads, to decrement their `suspended' count
2943 back. */
2944 if (step_over_finished)
2945 unsuspend_all_lwps (event_child);
2946
2947 /* Stabilize threads (move out of jump pads). */
2948 stabilize_threads ();
2949 }
2950 else
2951 {
2952 /* If we just finished a step-over, then all threads had been
2953 momentarily paused. In all-stop, that's fine, we want
2954 threads stopped by now anyway. In non-stop, we need to
2955 re-resume threads that GDB wanted to be running. */
2956 if (step_over_finished)
2957 unstop_all_lwps (1, event_child);
2958 }
2959
2960 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2961
2962 if (current_inferior->last_resume_kind == resume_stop
2963 && WSTOPSIG (w) == SIGSTOP)
2964 {
2965 /* A thread that has been requested to stop by GDB with vCont;t,
2966 and it stopped cleanly, so report as SIG0. The use of
2967 SIGSTOP is an implementation detail. */
2968 ourstatus->value.sig = GDB_SIGNAL_0;
2969 }
2970 else if (current_inferior->last_resume_kind == resume_stop
2971 && WSTOPSIG (w) != SIGSTOP)
2972 {
2973 /* A thread that has been requested to stop by GDB with vCont;t,
2974 but, it stopped for other reasons. */
2975 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2976 }
2977 else
2978 {
2979 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2980 }
2981
2982 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2983
2984 if (debug_threads)
2985 {
2986 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2987 target_pid_to_str (ptid_of (current_inferior)),
2988 ourstatus->kind, ourstatus->value.sig);
2989 debug_exit ();
2990 }
2991
2992 return ptid_of (current_inferior);
2993 }
2994
2995 /* Get rid of any pending event in the pipe. */
2996 static void
2997 async_file_flush (void)
2998 {
2999 int ret;
3000 char buf;
3001
3002 do
3003 ret = read (linux_event_pipe[0], &buf, 1);
3004 while (ret >= 0 || (ret == -1 && errno == EINTR));
3005 }
3006
3007 /* Put something in the pipe, so the event loop wakes up. */
3008 static void
3009 async_file_mark (void)
3010 {
3011 int ret;
3012
3013 async_file_flush ();
3014
3015 do
3016 ret = write (linux_event_pipe[1], "+", 1);
3017 while (ret == 0 || (ret == -1 && errno == EINTR));
3018
3019 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3020 be awakened anyway. */
3021 }
3022
3023 static ptid_t
3024 linux_wait (ptid_t ptid,
3025 struct target_waitstatus *ourstatus, int target_options)
3026 {
3027 ptid_t event_ptid;
3028
3029 /* Flush the async file first. */
3030 if (target_is_async_p ())
3031 async_file_flush ();
3032
3033 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3034
3035 /* If at least one stop was reported, there may be more. A single
3036 SIGCHLD can signal more than one child stop. */
3037 if (target_is_async_p ()
3038 && (target_options & TARGET_WNOHANG) != 0
3039 && !ptid_equal (event_ptid, null_ptid))
3040 async_file_mark ();
3041
3042 return event_ptid;
3043 }
3044
3045 /* Send a signal to an LWP. */
3046
3047 static int
3048 kill_lwp (unsigned long lwpid, int signo)
3049 {
3050 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3051 fails, then we are not using nptl threads and we should be using kill. */
3052
3053 #ifdef __NR_tkill
3054 {
3055 static int tkill_failed;
3056
3057 if (!tkill_failed)
3058 {
3059 int ret;
3060
3061 errno = 0;
3062 ret = syscall (__NR_tkill, lwpid, signo);
3063 if (errno != ENOSYS)
3064 return ret;
3065 tkill_failed = 1;
3066 }
3067 }
3068 #endif
3069
3070 return kill (lwpid, signo);
3071 }
3072
3073 void
3074 linux_stop_lwp (struct lwp_info *lwp)
3075 {
3076 send_sigstop (lwp);
3077 }
3078
3079 static void
3080 send_sigstop (struct lwp_info *lwp)
3081 {
3082 int pid;
3083
3084 pid = lwpid_of (get_lwp_thread (lwp));
3085
3086 /* If we already have a pending stop signal for this process, don't
3087 send another. */
3088 if (lwp->stop_expected)
3089 {
3090 if (debug_threads)
3091 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3092
3093 return;
3094 }
3095
3096 if (debug_threads)
3097 debug_printf ("Sending sigstop to lwp %d\n", pid);
3098
3099 lwp->stop_expected = 1;
3100 kill_lwp (pid, SIGSTOP);
3101 }
3102
3103 static int
3104 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3105 {
3106 struct thread_info *thread = (struct thread_info *) entry;
3107 struct lwp_info *lwp = get_thread_lwp (thread);
3108
3109 /* Ignore EXCEPT. */
3110 if (lwp == except)
3111 return 0;
3112
3113 if (lwp->stopped)
3114 return 0;
3115
3116 send_sigstop (lwp);
3117 return 0;
3118 }
3119
3120 /* Increment the suspend count of an LWP, and stop it, if not stopped
3121 yet. */
3122 static int
3123 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3124 void *except)
3125 {
3126 struct thread_info *thread = (struct thread_info *) entry;
3127 struct lwp_info *lwp = get_thread_lwp (thread);
3128
3129 /* Ignore EXCEPT. */
3130 if (lwp == except)
3131 return 0;
3132
3133 lwp->suspended++;
3134
3135 return send_sigstop_callback (entry, except);
3136 }
3137
3138 static void
3139 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3140 {
3141 /* It's dead, really. */
3142 lwp->dead = 1;
3143
3144 /* Store the exit status for later. */
3145 lwp->status_pending_p = 1;
3146 lwp->status_pending = wstat;
3147
3148 /* Prevent trying to stop it. */
3149 lwp->stopped = 1;
3150
3151 /* No further stops are expected from a dead lwp. */
3152 lwp->stop_expected = 0;
3153 }
3154
3155 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3156
3157 static void
3158 wait_for_sigstop (void)
3159 {
3160 struct thread_info *saved_inferior;
3161 ptid_t saved_tid;
3162 int wstat;
3163 int ret;
3164
3165 saved_inferior = current_inferior;
3166 if (saved_inferior != NULL)
3167 saved_tid = saved_inferior->entry.id;
3168 else
3169 saved_tid = null_ptid; /* avoid bogus unused warning */
3170
3171 if (debug_threads)
3172 debug_printf ("wait_for_sigstop: pulling events\n");
3173
3174 /* Passing NULL_PTID as filter indicates we want all events to be
3175 left pending. Eventually this returns when there are no
3176 unwaited-for children left. */
3177 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3178 &wstat, __WALL);
3179 gdb_assert (ret == -1);
3180
3181 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
3182 current_inferior = saved_inferior;
3183 else
3184 {
3185 if (debug_threads)
3186 debug_printf ("Previously current thread died.\n");
3187
3188 if (non_stop)
3189 {
3190 /* We can't change the current inferior behind GDB's back,
3191 otherwise, a subsequent command may apply to the wrong
3192 process. */
3193 current_inferior = NULL;
3194 }
3195 else
3196 {
3197 /* Set a valid thread as current. */
3198 set_desired_inferior (0);
3199 }
3200 }
3201 }
3202
3203 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3204 move it out, because we need to report the stop event to GDB. For
3205 example, if the user puts a breakpoint in the jump pad, it's
3206 because she wants to debug it. */
3207
3208 static int
3209 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3210 {
3211 struct thread_info *thread = (struct thread_info *) entry;
3212 struct lwp_info *lwp = get_thread_lwp (thread);
3213
3214 gdb_assert (lwp->suspended == 0);
3215 gdb_assert (lwp->stopped);
3216
3217 /* Allow debugging the jump pad, gdb_collect, etc.. */
3218 return (supports_fast_tracepoints ()
3219 && agent_loaded_p ()
3220 && (gdb_breakpoint_here (lwp->stop_pc)
3221 || lwp->stopped_by_watchpoint
3222 || thread->last_resume_kind == resume_step)
3223 && linux_fast_tracepoint_collecting (lwp, NULL));
3224 }
3225
3226 static void
3227 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3228 {
3229 struct thread_info *thread = (struct thread_info *) entry;
3230 struct lwp_info *lwp = get_thread_lwp (thread);
3231 int *wstat;
3232
3233 gdb_assert (lwp->suspended == 0);
3234 gdb_assert (lwp->stopped);
3235
3236 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3237
3238 /* Allow debugging the jump pad, gdb_collect, etc. */
3239 if (!gdb_breakpoint_here (lwp->stop_pc)
3240 && !lwp->stopped_by_watchpoint
3241 && thread->last_resume_kind != resume_step
3242 && maybe_move_out_of_jump_pad (lwp, wstat))
3243 {
3244 if (debug_threads)
3245 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3246 lwpid_of (thread));
3247
3248 if (wstat)
3249 {
3250 lwp->status_pending_p = 0;
3251 enqueue_one_deferred_signal (lwp, wstat);
3252
3253 if (debug_threads)
3254 debug_printf ("Signal %d for LWP %ld deferred "
3255 "(in jump pad)\n",
3256 WSTOPSIG (*wstat), lwpid_of (thread));
3257 }
3258
3259 linux_resume_one_lwp (lwp, 0, 0, NULL);
3260 }
3261 else
3262 lwp->suspended++;
3263 }
3264
3265 static int
3266 lwp_running (struct inferior_list_entry *entry, void *data)
3267 {
3268 struct thread_info *thread = (struct thread_info *) entry;
3269 struct lwp_info *lwp = get_thread_lwp (thread);
3270
3271 if (lwp->dead)
3272 return 0;
3273 if (lwp->stopped)
3274 return 0;
3275 return 1;
3276 }
3277
3278 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3279 If SUSPEND, then also increase the suspend count of every LWP,
3280 except EXCEPT. */
3281
3282 static void
3283 stop_all_lwps (int suspend, struct lwp_info *except)
3284 {
3285 /* Should not be called recursively. */
3286 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3287
3288 if (debug_threads)
3289 {
3290 debug_enter ();
3291 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3292 suspend ? "stop-and-suspend" : "stop",
3293 except != NULL
3294 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3295 : "none");
3296 }
3297
3298 stopping_threads = (suspend
3299 ? STOPPING_AND_SUSPENDING_THREADS
3300 : STOPPING_THREADS);
3301
3302 if (suspend)
3303 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3304 else
3305 find_inferior (&all_threads, send_sigstop_callback, except);
3306 wait_for_sigstop ();
3307 stopping_threads = NOT_STOPPING_THREADS;
3308
3309 if (debug_threads)
3310 {
3311 debug_printf ("stop_all_lwps done, setting stopping_threads "
3312 "back to !stopping\n");
3313 debug_exit ();
3314 }
3315 }
3316
3317 /* Resume execution of the inferior process.
3318 If STEP is nonzero, single-step it.
3319 If SIGNAL is nonzero, give it that signal. */
3320
3321 static void
3322 linux_resume_one_lwp (struct lwp_info *lwp,
3323 int step, int signal, siginfo_t *info)
3324 {
3325 struct thread_info *thread = get_lwp_thread (lwp);
3326 struct thread_info *saved_inferior;
3327 int fast_tp_collecting;
3328
3329 if (lwp->stopped == 0)
3330 return;
3331
3332 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3333
3334 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3335
3336 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3337 user used the "jump" command, or "set $pc = foo"). */
3338 if (lwp->stop_pc != get_pc (lwp))
3339 {
3340 /* Collecting 'while-stepping' actions doesn't make sense
3341 anymore. */
3342 release_while_stepping_state_list (thread);
3343 }
3344
3345 /* If we have pending signals or status, and a new signal, enqueue the
3346 signal. Also enqueue the signal if we are waiting to reinsert a
3347 breakpoint; it will be picked up again below. */
3348 if (signal != 0
3349 && (lwp->status_pending_p
3350 || lwp->pending_signals != NULL
3351 || lwp->bp_reinsert != 0
3352 || fast_tp_collecting))
3353 {
3354 struct pending_signals *p_sig;
3355 p_sig = xmalloc (sizeof (*p_sig));
3356 p_sig->prev = lwp->pending_signals;
3357 p_sig->signal = signal;
3358 if (info == NULL)
3359 memset (&p_sig->info, 0, sizeof (siginfo_t));
3360 else
3361 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3362 lwp->pending_signals = p_sig;
3363 }
3364
3365 if (lwp->status_pending_p)
3366 {
3367 if (debug_threads)
3368 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3369 " has pending status\n",
3370 lwpid_of (thread), step ? "step" : "continue", signal,
3371 lwp->stop_expected ? "expected" : "not expected");
3372 return;
3373 }
3374
3375 saved_inferior = current_inferior;
3376 current_inferior = thread;
3377
3378 if (debug_threads)
3379 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3380 lwpid_of (thread), step ? "step" : "continue", signal,
3381 lwp->stop_expected ? "expected" : "not expected");
3382
3383 /* This bit needs some thinking about. If we get a signal that
3384 we must report while a single-step reinsert is still pending,
3385 we often end up resuming the thread. It might be better to
3386 (ew) allow a stack of pending events; then we could be sure that
3387 the reinsert happened right away and not lose any signals.
3388
3389 Making this stack would also shrink the window in which breakpoints are
3390 uninserted (see comment in linux_wait_for_lwp) but not enough for
3391 complete correctness, so it won't solve that problem. It may be
3392 worthwhile just to solve this one, however. */
3393 if (lwp->bp_reinsert != 0)
3394 {
3395 if (debug_threads)
3396 debug_printf (" pending reinsert at 0x%s\n",
3397 paddress (lwp->bp_reinsert));
3398
3399 if (can_hardware_single_step ())
3400 {
3401 if (fast_tp_collecting == 0)
3402 {
3403 if (step == 0)
3404 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3405 if (lwp->suspended)
3406 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3407 lwp->suspended);
3408 }
3409
3410 step = 1;
3411 }
3412
3413 /* Postpone any pending signal. It was enqueued above. */
3414 signal = 0;
3415 }
3416
3417 if (fast_tp_collecting == 1)
3418 {
3419 if (debug_threads)
3420 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3421 " (exit-jump-pad-bkpt)\n",
3422 lwpid_of (thread));
3423
3424 /* Postpone any pending signal. It was enqueued above. */
3425 signal = 0;
3426 }
3427 else if (fast_tp_collecting == 2)
3428 {
3429 if (debug_threads)
3430 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3431 " single-stepping\n",
3432 lwpid_of (thread));
3433
3434 if (can_hardware_single_step ())
3435 step = 1;
3436 else
3437 fatal ("moving out of jump pad single-stepping"
3438 " not implemented on this target");
3439
3440 /* Postpone any pending signal. It was enqueued above. */
3441 signal = 0;
3442 }
3443
3444 /* If we have while-stepping actions in this thread set it stepping.
3445 If we have a signal to deliver, it may or may not be set to
3446 SIG_IGN, we don't know. Assume so, and allow collecting
3447 while-stepping into a signal handler. A possible smart thing to
3448 do would be to set an internal breakpoint at the signal return
3449 address, continue, and carry on catching this while-stepping
3450 action only when that breakpoint is hit. A future
3451 enhancement. */
3452 if (thread->while_stepping != NULL
3453 && can_hardware_single_step ())
3454 {
3455 if (debug_threads)
3456 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3457 lwpid_of (thread));
3458 step = 1;
3459 }
3460
3461 if (debug_threads && the_low_target.get_pc != NULL)
3462 {
3463 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3464 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3465 debug_printf (" resuming from pc 0x%lx\n", (long) pc);
3466 }
3467
3468 /* If we have pending signals, consume one unless we are trying to
3469 reinsert a breakpoint or we're trying to finish a fast tracepoint
3470 collect. */
3471 if (lwp->pending_signals != NULL
3472 && lwp->bp_reinsert == 0
3473 && fast_tp_collecting == 0)
3474 {
3475 struct pending_signals **p_sig;
3476
3477 p_sig = &lwp->pending_signals;
3478 while ((*p_sig)->prev != NULL)
3479 p_sig = &(*p_sig)->prev;
3480
3481 signal = (*p_sig)->signal;
3482 if ((*p_sig)->info.si_signo != 0)
3483 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3484 &(*p_sig)->info);
3485
3486 free (*p_sig);
3487 *p_sig = NULL;
3488 }
3489
3490 if (the_low_target.prepare_to_resume != NULL)
3491 the_low_target.prepare_to_resume (lwp);
3492
3493 regcache_invalidate_thread (thread);
3494 errno = 0;
3495 lwp->stopped = 0;
3496 lwp->stopped_by_watchpoint = 0;
3497 lwp->stepping = step;
3498 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3499 (PTRACE_TYPE_ARG3) 0,
3500 /* Coerce to a uintptr_t first to avoid potential gcc warning
3501 of coercing an 8 byte integer to a 4 byte pointer. */
3502 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3503
3504 current_inferior = saved_inferior;
3505 if (errno)
3506 {
3507 /* ESRCH from ptrace either means that the thread was already
3508 running (an error) or that it is gone (a race condition). If
3509 it's gone, we will get a notification the next time we wait,
3510 so we can ignore the error. We could differentiate these
3511 two, but it's tricky without waiting; the thread still exists
3512 as a zombie, so sending it signal 0 would succeed. So just
3513 ignore ESRCH. */
3514 if (errno == ESRCH)
3515 return;
3516
3517 perror_with_name ("ptrace");
3518 }
3519 }
3520
3521 struct thread_resume_array
3522 {
3523 struct thread_resume *resume;
3524 size_t n;
3525 };
3526
3527 /* This function is called once per thread via find_inferior.
3528 ARG is a pointer to a thread_resume_array struct.
3529 We look up the thread specified by ENTRY in ARG, and mark the thread
3530 with a pointer to the appropriate resume request.
3531
3532 This algorithm is O(threads * resume elements), but resume elements
3533 is small (and will remain small at least until GDB supports thread
3534 suspension). */
3535
3536 static int
3537 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3538 {
3539 struct thread_info *thread = (struct thread_info *) entry;
3540 struct lwp_info *lwp = get_thread_lwp (thread);
3541 int ndx;
3542 struct thread_resume_array *r;
3543
3544 r = arg;
3545
3546 for (ndx = 0; ndx < r->n; ndx++)
3547 {
3548 ptid_t ptid = r->resume[ndx].thread;
3549 if (ptid_equal (ptid, minus_one_ptid)
3550 || ptid_equal (ptid, entry->id)
3551 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3552 of PID'. */
3553 || (ptid_get_pid (ptid) == pid_of (thread)
3554 && (ptid_is_pid (ptid)
3555 || ptid_get_lwp (ptid) == -1)))
3556 {
3557 if (r->resume[ndx].kind == resume_stop
3558 && thread->last_resume_kind == resume_stop)
3559 {
3560 if (debug_threads)
3561 debug_printf ("already %s LWP %ld at GDB's request\n",
3562 (thread->last_status.kind
3563 == TARGET_WAITKIND_STOPPED)
3564 ? "stopped"
3565 : "stopping",
3566 lwpid_of (thread));
3567
3568 continue;
3569 }
3570
3571 lwp->resume = &r->resume[ndx];
3572 thread->last_resume_kind = lwp->resume->kind;
3573
3574 lwp->step_range_start = lwp->resume->step_range_start;
3575 lwp->step_range_end = lwp->resume->step_range_end;
3576
3577 /* If we had a deferred signal to report, dequeue one now.
3578 This can happen if LWP gets more than one signal while
3579 trying to get out of a jump pad. */
3580 if (lwp->stopped
3581 && !lwp->status_pending_p
3582 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3583 {
3584 lwp->status_pending_p = 1;
3585
3586 if (debug_threads)
3587 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3588 "leaving status pending.\n",
3589 WSTOPSIG (lwp->status_pending),
3590 lwpid_of (thread));
3591 }
3592
3593 return 0;
3594 }
3595 }
3596
3597 /* No resume action for this thread. */
3598 lwp->resume = NULL;
3599
3600 return 0;
3601 }
3602
3603 /* find_inferior callback for linux_resume.
3604 Set *FLAG_P if this lwp has an interesting status pending. */
3605
3606 static int
3607 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3608 {
3609 struct thread_info *thread = (struct thread_info *) entry;
3610 struct lwp_info *lwp = get_thread_lwp (thread);
3611
3612 /* LWPs which will not be resumed are not interesting, because
3613 we might not wait for them next time through linux_wait. */
3614 if (lwp->resume == NULL)
3615 return 0;
3616
3617 if (lwp->status_pending_p)
3618 * (int *) flag_p = 1;
3619
3620 return 0;
3621 }
3622
3623 /* Return 1 if this lwp that GDB wants running is stopped at an
3624 internal breakpoint that we need to step over. It assumes that any
3625 required STOP_PC adjustment has already been propagated to the
3626 inferior's regcache. */
3627
3628 static int
3629 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3630 {
3631 struct thread_info *thread = (struct thread_info *) entry;
3632 struct lwp_info *lwp = get_thread_lwp (thread);
3633 struct thread_info *saved_inferior;
3634 CORE_ADDR pc;
3635
3636 /* LWPs which will not be resumed are not interesting, because we
3637 might not wait for them next time through linux_wait. */
3638
3639 if (!lwp->stopped)
3640 {
3641 if (debug_threads)
3642 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3643 lwpid_of (thread));
3644 return 0;
3645 }
3646
3647 if (thread->last_resume_kind == resume_stop)
3648 {
3649 if (debug_threads)
3650 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3651 " stopped\n",
3652 lwpid_of (thread));
3653 return 0;
3654 }
3655
3656 gdb_assert (lwp->suspended >= 0);
3657
3658 if (lwp->suspended)
3659 {
3660 if (debug_threads)
3661 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3662 lwpid_of (thread));
3663 return 0;
3664 }
3665
3666 if (!lwp->need_step_over)
3667 {
3668 if (debug_threads)
3669 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3670 }
3671
3672 if (lwp->status_pending_p)
3673 {
3674 if (debug_threads)
3675 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3676 " status.\n",
3677 lwpid_of (thread));
3678 return 0;
3679 }
3680
3681 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3682 or we have. */
3683 pc = get_pc (lwp);
3684
3685 /* If the PC has changed since we stopped, then don't do anything,
3686 and let the breakpoint/tracepoint be hit. This happens if, for
3687 instance, GDB handled the decr_pc_after_break subtraction itself,
3688 GDB is OOL stepping this thread, or the user has issued a "jump"
3689 command, or poked thread's registers herself. */
3690 if (pc != lwp->stop_pc)
3691 {
3692 if (debug_threads)
3693 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3694 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3695 lwpid_of (thread),
3696 paddress (lwp->stop_pc), paddress (pc));
3697
3698 lwp->need_step_over = 0;
3699 return 0;
3700 }
3701
3702 saved_inferior = current_inferior;
3703 current_inferior = thread;
3704
3705 /* We can only step over breakpoints we know about. */
3706 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3707 {
3708 /* Don't step over a breakpoint that GDB expects to hit
3709 though. If the condition is being evaluated on the target's side
3710 and it evaluate to false, step over this breakpoint as well. */
3711 if (gdb_breakpoint_here (pc)
3712 && gdb_condition_true_at_breakpoint (pc)
3713 && gdb_no_commands_at_breakpoint (pc))
3714 {
3715 if (debug_threads)
3716 debug_printf ("Need step over [LWP %ld]? yes, but found"
3717 " GDB breakpoint at 0x%s; skipping step over\n",
3718 lwpid_of (thread), paddress (pc));
3719
3720 current_inferior = saved_inferior;
3721 return 0;
3722 }
3723 else
3724 {
3725 if (debug_threads)
3726 debug_printf ("Need step over [LWP %ld]? yes, "
3727 "found breakpoint at 0x%s\n",
3728 lwpid_of (thread), paddress (pc));
3729
3730 /* We've found an lwp that needs stepping over --- return 1 so
3731 that find_inferior stops looking. */
3732 current_inferior = saved_inferior;
3733
3734 /* If the step over is cancelled, this is set again. */
3735 lwp->need_step_over = 0;
3736 return 1;
3737 }
3738 }
3739
3740 current_inferior = saved_inferior;
3741
3742 if (debug_threads)
3743 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3744 " at 0x%s\n",
3745 lwpid_of (thread), paddress (pc));
3746
3747 return 0;
3748 }
3749
3750 /* Start a step-over operation on LWP. When LWP stopped at a
3751 breakpoint, to make progress, we need to remove the breakpoint out
3752 of the way. If we let other threads run while we do that, they may
3753 pass by the breakpoint location and miss hitting it. To avoid
3754 that, a step-over momentarily stops all threads while LWP is
3755 single-stepped while the breakpoint is temporarily uninserted from
3756 the inferior. When the single-step finishes, we reinsert the
3757 breakpoint, and let all threads that are supposed to be running,
3758 run again.
3759
3760 On targets that don't support hardware single-step, we don't
3761 currently support full software single-stepping. Instead, we only
3762 support stepping over the thread event breakpoint, by asking the
3763 low target where to place a reinsert breakpoint. Since this
3764 routine assumes the breakpoint being stepped over is a thread event
3765 breakpoint, it usually assumes the return address of the current
3766 function is a good enough place to set the reinsert breakpoint. */
3767
3768 static int
3769 start_step_over (struct lwp_info *lwp)
3770 {
3771 struct thread_info *thread = get_lwp_thread (lwp);
3772 struct thread_info *saved_inferior;
3773 CORE_ADDR pc;
3774 int step;
3775
3776 if (debug_threads)
3777 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3778 lwpid_of (thread));
3779
3780 stop_all_lwps (1, lwp);
3781 gdb_assert (lwp->suspended == 0);
3782
3783 if (debug_threads)
3784 debug_printf ("Done stopping all threads for step-over.\n");
3785
3786 /* Note, we should always reach here with an already adjusted PC,
3787 either by GDB (if we're resuming due to GDB's request), or by our
3788 caller, if we just finished handling an internal breakpoint GDB
3789 shouldn't care about. */
3790 pc = get_pc (lwp);
3791
3792 saved_inferior = current_inferior;
3793 current_inferior = thread;
3794
3795 lwp->bp_reinsert = pc;
3796 uninsert_breakpoints_at (pc);
3797 uninsert_fast_tracepoint_jumps_at (pc);
3798
3799 if (can_hardware_single_step ())
3800 {
3801 step = 1;
3802 }
3803 else
3804 {
3805 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3806 set_reinsert_breakpoint (raddr);
3807 step = 0;
3808 }
3809
3810 current_inferior = saved_inferior;
3811
3812 linux_resume_one_lwp (lwp, step, 0, NULL);
3813
3814 /* Require next event from this LWP. */
3815 step_over_bkpt = thread->entry.id;
3816 return 1;
3817 }
3818
3819 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3820 start_step_over, if still there, and delete any reinsert
3821 breakpoints we've set, on non hardware single-step targets. */
3822
3823 static int
3824 finish_step_over (struct lwp_info *lwp)
3825 {
3826 if (lwp->bp_reinsert != 0)
3827 {
3828 if (debug_threads)
3829 debug_printf ("Finished step over.\n");
3830
3831 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3832 may be no breakpoint to reinsert there by now. */
3833 reinsert_breakpoints_at (lwp->bp_reinsert);
3834 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3835
3836 lwp->bp_reinsert = 0;
3837
3838 /* Delete any software-single-step reinsert breakpoints. No
3839 longer needed. We don't have to worry about other threads
3840 hitting this trap, and later not being able to explain it,
3841 because we were stepping over a breakpoint, and we hold all
3842 threads but LWP stopped while doing that. */
3843 if (!can_hardware_single_step ())
3844 delete_reinsert_breakpoints ();
3845
3846 step_over_bkpt = null_ptid;
3847 return 1;
3848 }
3849 else
3850 return 0;
3851 }
3852
3853 /* This function is called once per thread. We check the thread's resume
3854 request, which will tell us whether to resume, step, or leave the thread
3855 stopped; and what signal, if any, it should be sent.
3856
3857 For threads which we aren't explicitly told otherwise, we preserve
3858 the stepping flag; this is used for stepping over gdbserver-placed
3859 breakpoints.
3860
3861 If pending_flags was set in any thread, we queue any needed
3862 signals, since we won't actually resume. We already have a pending
3863 event to report, so we don't need to preserve any step requests;
3864 they should be re-issued if necessary. */
3865
3866 static int
3867 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3868 {
3869 struct thread_info *thread = (struct thread_info *) entry;
3870 struct lwp_info *lwp = get_thread_lwp (thread);
3871 int step;
3872 int leave_all_stopped = * (int *) arg;
3873 int leave_pending;
3874
3875 if (lwp->resume == NULL)
3876 return 0;
3877
3878 if (lwp->resume->kind == resume_stop)
3879 {
3880 if (debug_threads)
3881 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
3882
3883 if (!lwp->stopped)
3884 {
3885 if (debug_threads)
3886 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
3887
3888 /* Stop the thread, and wait for the event asynchronously,
3889 through the event loop. */
3890 send_sigstop (lwp);
3891 }
3892 else
3893 {
3894 if (debug_threads)
3895 debug_printf ("already stopped LWP %ld\n",
3896 lwpid_of (thread));
3897
3898 /* The LWP may have been stopped in an internal event that
3899 was not meant to be notified back to GDB (e.g., gdbserver
3900 breakpoint), so we should be reporting a stop event in
3901 this case too. */
3902
3903 /* If the thread already has a pending SIGSTOP, this is a
3904 no-op. Otherwise, something later will presumably resume
3905 the thread and this will cause it to cancel any pending
3906 operation, due to last_resume_kind == resume_stop. If
3907 the thread already has a pending status to report, we
3908 will still report it the next time we wait - see
3909 status_pending_p_callback. */
3910
3911 /* If we already have a pending signal to report, then
3912 there's no need to queue a SIGSTOP, as this means we're
3913 midway through moving the LWP out of the jumppad, and we
3914 will report the pending signal as soon as that is
3915 finished. */
3916 if (lwp->pending_signals_to_report == NULL)
3917 send_sigstop (lwp);
3918 }
3919
3920 /* For stop requests, we're done. */
3921 lwp->resume = NULL;
3922 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3923 return 0;
3924 }
3925
3926 /* If this thread which is about to be resumed has a pending status,
3927 then don't resume any threads - we can just report the pending
3928 status. Make sure to queue any signals that would otherwise be
3929 sent. In all-stop mode, we do this decision based on if *any*
3930 thread has a pending status. If there's a thread that needs the
3931 step-over-breakpoint dance, then don't resume any other thread
3932 but that particular one. */
3933 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3934
3935 if (!leave_pending)
3936 {
3937 if (debug_threads)
3938 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
3939
3940 step = (lwp->resume->kind == resume_step);
3941 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3942 }
3943 else
3944 {
3945 if (debug_threads)
3946 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
3947
3948 /* If we have a new signal, enqueue the signal. */
3949 if (lwp->resume->sig != 0)
3950 {
3951 struct pending_signals *p_sig;
3952 p_sig = xmalloc (sizeof (*p_sig));
3953 p_sig->prev = lwp->pending_signals;
3954 p_sig->signal = lwp->resume->sig;
3955 memset (&p_sig->info, 0, sizeof (siginfo_t));
3956
3957 /* If this is the same signal we were previously stopped by,
3958 make sure to queue its siginfo. We can ignore the return
3959 value of ptrace; if it fails, we'll skip
3960 PTRACE_SETSIGINFO. */
3961 if (WIFSTOPPED (lwp->last_status)
3962 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3963 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3964 &p_sig->info);
3965
3966 lwp->pending_signals = p_sig;
3967 }
3968 }
3969
3970 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3971 lwp->resume = NULL;
3972 return 0;
3973 }
3974
3975 static void
3976 linux_resume (struct thread_resume *resume_info, size_t n)
3977 {
3978 struct thread_resume_array array = { resume_info, n };
3979 struct thread_info *need_step_over = NULL;
3980 int any_pending;
3981 int leave_all_stopped;
3982
3983 if (debug_threads)
3984 {
3985 debug_enter ();
3986 debug_printf ("linux_resume:\n");
3987 }
3988
3989 find_inferior (&all_threads, linux_set_resume_request, &array);
3990
3991 /* If there is a thread which would otherwise be resumed, which has
3992 a pending status, then don't resume any threads - we can just
3993 report the pending status. Make sure to queue any signals that
3994 would otherwise be sent. In non-stop mode, we'll apply this
3995 logic to each thread individually. We consume all pending events
3996 before considering to start a step-over (in all-stop). */
3997 any_pending = 0;
3998 if (!non_stop)
3999 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4000
4001 /* If there is a thread which would otherwise be resumed, which is
4002 stopped at a breakpoint that needs stepping over, then don't
4003 resume any threads - have it step over the breakpoint with all
4004 other threads stopped, then resume all threads again. Make sure
4005 to queue any signals that would otherwise be delivered or
4006 queued. */
4007 if (!any_pending && supports_breakpoints ())
4008 need_step_over
4009 = (struct thread_info *) find_inferior (&all_threads,
4010 need_step_over_p, NULL);
4011
4012 leave_all_stopped = (need_step_over != NULL || any_pending);
4013
4014 if (debug_threads)
4015 {
4016 if (need_step_over != NULL)
4017 debug_printf ("Not resuming all, need step over\n");
4018 else if (any_pending)
4019 debug_printf ("Not resuming, all-stop and found "
4020 "an LWP with pending status\n");
4021 else
4022 debug_printf ("Resuming, no pending status or step over needed\n");
4023 }
4024
4025 /* Even if we're leaving threads stopped, queue all signals we'd
4026 otherwise deliver. */
4027 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4028
4029 if (need_step_over)
4030 start_step_over (get_thread_lwp (need_step_over));
4031
4032 if (debug_threads)
4033 {
4034 debug_printf ("linux_resume done\n");
4035 debug_exit ();
4036 }
4037 }
4038
4039 /* This function is called once per thread. We check the thread's
4040 last resume request, which will tell us whether to resume, step, or
4041 leave the thread stopped. Any signal the client requested to be
4042 delivered has already been enqueued at this point.
4043
4044 If any thread that GDB wants running is stopped at an internal
4045 breakpoint that needs stepping over, we start a step-over operation
4046 on that particular thread, and leave all others stopped. */
4047
4048 static int
4049 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4050 {
4051 struct thread_info *thread = (struct thread_info *) entry;
4052 struct lwp_info *lwp = get_thread_lwp (thread);
4053 int step;
4054
4055 if (lwp == except)
4056 return 0;
4057
4058 if (debug_threads)
4059 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4060
4061 if (!lwp->stopped)
4062 {
4063 if (debug_threads)
4064 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4065 return 0;
4066 }
4067
4068 if (thread->last_resume_kind == resume_stop
4069 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4070 {
4071 if (debug_threads)
4072 debug_printf (" client wants LWP to remain %ld stopped\n",
4073 lwpid_of (thread));
4074 return 0;
4075 }
4076
4077 if (lwp->status_pending_p)
4078 {
4079 if (debug_threads)
4080 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4081 lwpid_of (thread));
4082 return 0;
4083 }
4084
4085 gdb_assert (lwp->suspended >= 0);
4086
4087 if (lwp->suspended)
4088 {
4089 if (debug_threads)
4090 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4091 return 0;
4092 }
4093
4094 if (thread->last_resume_kind == resume_stop
4095 && lwp->pending_signals_to_report == NULL
4096 && lwp->collecting_fast_tracepoint == 0)
4097 {
4098 /* We haven't reported this LWP as stopped yet (otherwise, the
4099 last_status.kind check above would catch it, and we wouldn't
4100 reach here. This LWP may have been momentarily paused by a
4101 stop_all_lwps call while handling for example, another LWP's
4102 step-over. In that case, the pending expected SIGSTOP signal
4103 that was queued at vCont;t handling time will have already
4104 been consumed by wait_for_sigstop, and so we need to requeue
4105 another one here. Note that if the LWP already has a SIGSTOP
4106 pending, this is a no-op. */
4107
4108 if (debug_threads)
4109 debug_printf ("Client wants LWP %ld to stop. "
4110 "Making sure it has a SIGSTOP pending\n",
4111 lwpid_of (thread));
4112
4113 send_sigstop (lwp);
4114 }
4115
4116 step = thread->last_resume_kind == resume_step;
4117 linux_resume_one_lwp (lwp, step, 0, NULL);
4118 return 0;
4119 }
4120
4121 static int
4122 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4123 {
4124 struct thread_info *thread = (struct thread_info *) entry;
4125 struct lwp_info *lwp = get_thread_lwp (thread);
4126
4127 if (lwp == except)
4128 return 0;
4129
4130 lwp->suspended--;
4131 gdb_assert (lwp->suspended >= 0);
4132
4133 return proceed_one_lwp (entry, except);
4134 }
4135
4136 /* When we finish a step-over, set threads running again. If there's
4137 another thread that may need a step-over, now's the time to start
4138 it. Eventually, we'll move all threads past their breakpoints. */
4139
4140 static void
4141 proceed_all_lwps (void)
4142 {
4143 struct thread_info *need_step_over;
4144
4145 /* If there is a thread which would otherwise be resumed, which is
4146 stopped at a breakpoint that needs stepping over, then don't
4147 resume any threads - have it step over the breakpoint with all
4148 other threads stopped, then resume all threads again. */
4149
4150 if (supports_breakpoints ())
4151 {
4152 need_step_over
4153 = (struct thread_info *) find_inferior (&all_threads,
4154 need_step_over_p, NULL);
4155
4156 if (need_step_over != NULL)
4157 {
4158 if (debug_threads)
4159 debug_printf ("proceed_all_lwps: found "
4160 "thread %ld needing a step-over\n",
4161 lwpid_of (need_step_over));
4162
4163 start_step_over (get_thread_lwp (need_step_over));
4164 return;
4165 }
4166 }
4167
4168 if (debug_threads)
4169 debug_printf ("Proceeding, no step-over needed\n");
4170
4171 find_inferior (&all_threads, proceed_one_lwp, NULL);
4172 }
4173
4174 /* Stopped LWPs that the client wanted to be running, that don't have
4175 pending statuses, are set to run again, except for EXCEPT, if not
4176 NULL. This undoes a stop_all_lwps call. */
4177
4178 static void
4179 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4180 {
4181 if (debug_threads)
4182 {
4183 debug_enter ();
4184 if (except)
4185 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4186 lwpid_of (get_lwp_thread (except)));
4187 else
4188 debug_printf ("unstopping all lwps\n");
4189 }
4190
4191 if (unsuspend)
4192 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4193 else
4194 find_inferior (&all_threads, proceed_one_lwp, except);
4195
4196 if (debug_threads)
4197 {
4198 debug_printf ("unstop_all_lwps done\n");
4199 debug_exit ();
4200 }
4201 }
4202
4203
4204 #ifdef HAVE_LINUX_REGSETS
4205
4206 #define use_linux_regsets 1
4207
4208 /* Returns true if REGSET has been disabled. */
4209
4210 static int
4211 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4212 {
4213 return (info->disabled_regsets != NULL
4214 && info->disabled_regsets[regset - info->regsets]);
4215 }
4216
4217 /* Disable REGSET. */
4218
4219 static void
4220 disable_regset (struct regsets_info *info, struct regset_info *regset)
4221 {
4222 int dr_offset;
4223
4224 dr_offset = regset - info->regsets;
4225 if (info->disabled_regsets == NULL)
4226 info->disabled_regsets = xcalloc (1, info->num_regsets);
4227 info->disabled_regsets[dr_offset] = 1;
4228 }
4229
4230 static int
4231 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4232 struct regcache *regcache)
4233 {
4234 struct regset_info *regset;
4235 int saw_general_regs = 0;
4236 int pid;
4237 struct iovec iov;
4238
4239 regset = regsets_info->regsets;
4240
4241 pid = lwpid_of (current_inferior);
4242 while (regset->size >= 0)
4243 {
4244 void *buf, *data;
4245 int nt_type, res;
4246
4247 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4248 {
4249 regset ++;
4250 continue;
4251 }
4252
4253 buf = xmalloc (regset->size);
4254
4255 nt_type = regset->nt_type;
4256 if (nt_type)
4257 {
4258 iov.iov_base = buf;
4259 iov.iov_len = regset->size;
4260 data = (void *) &iov;
4261 }
4262 else
4263 data = buf;
4264
4265 #ifndef __sparc__
4266 res = ptrace (regset->get_request, pid,
4267 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4268 #else
4269 res = ptrace (regset->get_request, pid, data, nt_type);
4270 #endif
4271 if (res < 0)
4272 {
4273 if (errno == EIO)
4274 {
4275 /* If we get EIO on a regset, do not try it again for
4276 this process mode. */
4277 disable_regset (regsets_info, regset);
4278 free (buf);
4279 continue;
4280 }
4281 else
4282 {
4283 char s[256];
4284 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4285 pid);
4286 perror (s);
4287 }
4288 }
4289 else if (regset->type == GENERAL_REGS)
4290 saw_general_regs = 1;
4291 regset->store_function (regcache, buf);
4292 regset ++;
4293 free (buf);
4294 }
4295 if (saw_general_regs)
4296 return 0;
4297 else
4298 return 1;
4299 }
4300
4301 static int
4302 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4303 struct regcache *regcache)
4304 {
4305 struct regset_info *regset;
4306 int saw_general_regs = 0;
4307 int pid;
4308 struct iovec iov;
4309
4310 regset = regsets_info->regsets;
4311
4312 pid = lwpid_of (current_inferior);
4313 while (regset->size >= 0)
4314 {
4315 void *buf, *data;
4316 int nt_type, res;
4317
4318 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4319 {
4320 regset ++;
4321 continue;
4322 }
4323
4324 buf = xmalloc (regset->size);
4325
4326 /* First fill the buffer with the current register set contents,
4327 in case there are any items in the kernel's regset that are
4328 not in gdbserver's regcache. */
4329
4330 nt_type = regset->nt_type;
4331 if (nt_type)
4332 {
4333 iov.iov_base = buf;
4334 iov.iov_len = regset->size;
4335 data = (void *) &iov;
4336 }
4337 else
4338 data = buf;
4339
4340 #ifndef __sparc__
4341 res = ptrace (regset->get_request, pid,
4342 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4343 #else
4344 res = ptrace (regset->get_request, pid, data, nt_type);
4345 #endif
4346
4347 if (res == 0)
4348 {
4349 /* Then overlay our cached registers on that. */
4350 regset->fill_function (regcache, buf);
4351
4352 /* Only now do we write the register set. */
4353 #ifndef __sparc__
4354 res = ptrace (regset->set_request, pid,
4355 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4356 #else
4357 res = ptrace (regset->set_request, pid, data, nt_type);
4358 #endif
4359 }
4360
4361 if (res < 0)
4362 {
4363 if (errno == EIO)
4364 {
4365 /* If we get EIO on a regset, do not try it again for
4366 this process mode. */
4367 disable_regset (regsets_info, regset);
4368 free (buf);
4369 continue;
4370 }
4371 else if (errno == ESRCH)
4372 {
4373 /* At this point, ESRCH should mean the process is
4374 already gone, in which case we simply ignore attempts
4375 to change its registers. See also the related
4376 comment in linux_resume_one_lwp. */
4377 free (buf);
4378 return 0;
4379 }
4380 else
4381 {
4382 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4383 }
4384 }
4385 else if (regset->type == GENERAL_REGS)
4386 saw_general_regs = 1;
4387 regset ++;
4388 free (buf);
4389 }
4390 if (saw_general_regs)
4391 return 0;
4392 else
4393 return 1;
4394 }
4395
4396 #else /* !HAVE_LINUX_REGSETS */
4397
4398 #define use_linux_regsets 0
4399 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4400 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4401
4402 #endif
4403
4404 /* Return 1 if register REGNO is supported by one of the regset ptrace
4405 calls or 0 if it has to be transferred individually. */
4406
4407 static int
4408 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4409 {
4410 unsigned char mask = 1 << (regno % 8);
4411 size_t index = regno / 8;
4412
4413 return (use_linux_regsets
4414 && (regs_info->regset_bitmap == NULL
4415 || (regs_info->regset_bitmap[index] & mask) != 0));
4416 }
4417
4418 #ifdef HAVE_LINUX_USRREGS
4419
4420 int
4421 register_addr (const struct usrregs_info *usrregs, int regnum)
4422 {
4423 int addr;
4424
4425 if (regnum < 0 || regnum >= usrregs->num_regs)
4426 error ("Invalid register number %d.", regnum);
4427
4428 addr = usrregs->regmap[regnum];
4429
4430 return addr;
4431 }
4432
4433 /* Fetch one register. */
4434 static void
4435 fetch_register (const struct usrregs_info *usrregs,
4436 struct regcache *regcache, int regno)
4437 {
4438 CORE_ADDR regaddr;
4439 int i, size;
4440 char *buf;
4441 int pid;
4442
4443 if (regno >= usrregs->num_regs)
4444 return;
4445 if ((*the_low_target.cannot_fetch_register) (regno))
4446 return;
4447
4448 regaddr = register_addr (usrregs, regno);
4449 if (regaddr == -1)
4450 return;
4451
4452 size = ((register_size (regcache->tdesc, regno)
4453 + sizeof (PTRACE_XFER_TYPE) - 1)
4454 & -sizeof (PTRACE_XFER_TYPE));
4455 buf = alloca (size);
4456
4457 pid = lwpid_of (current_inferior);
4458 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4459 {
4460 errno = 0;
4461 *(PTRACE_XFER_TYPE *) (buf + i) =
4462 ptrace (PTRACE_PEEKUSER, pid,
4463 /* Coerce to a uintptr_t first to avoid potential gcc warning
4464 of coercing an 8 byte integer to a 4 byte pointer. */
4465 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4466 regaddr += sizeof (PTRACE_XFER_TYPE);
4467 if (errno != 0)
4468 error ("reading register %d: %s", regno, strerror (errno));
4469 }
4470
4471 if (the_low_target.supply_ptrace_register)
4472 the_low_target.supply_ptrace_register (regcache, regno, buf);
4473 else
4474 supply_register (regcache, regno, buf);
4475 }
4476
4477 /* Store one register. */
4478 static void
4479 store_register (const struct usrregs_info *usrregs,
4480 struct regcache *regcache, int regno)
4481 {
4482 CORE_ADDR regaddr;
4483 int i, size;
4484 char *buf;
4485 int pid;
4486
4487 if (regno >= usrregs->num_regs)
4488 return;
4489 if ((*the_low_target.cannot_store_register) (regno))
4490 return;
4491
4492 regaddr = register_addr (usrregs, regno);
4493 if (regaddr == -1)
4494 return;
4495
4496 size = ((register_size (regcache->tdesc, regno)
4497 + sizeof (PTRACE_XFER_TYPE) - 1)
4498 & -sizeof (PTRACE_XFER_TYPE));
4499 buf = alloca (size);
4500 memset (buf, 0, size);
4501
4502 if (the_low_target.collect_ptrace_register)
4503 the_low_target.collect_ptrace_register (regcache, regno, buf);
4504 else
4505 collect_register (regcache, regno, buf);
4506
4507 pid = lwpid_of (current_inferior);
4508 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4509 {
4510 errno = 0;
4511 ptrace (PTRACE_POKEUSER, pid,
4512 /* Coerce to a uintptr_t first to avoid potential gcc warning
4513 about coercing an 8 byte integer to a 4 byte pointer. */
4514 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4515 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4516 if (errno != 0)
4517 {
4518 /* At this point, ESRCH should mean the process is
4519 already gone, in which case we simply ignore attempts
4520 to change its registers. See also the related
4521 comment in linux_resume_one_lwp. */
4522 if (errno == ESRCH)
4523 return;
4524
4525 if ((*the_low_target.cannot_store_register) (regno) == 0)
4526 error ("writing register %d: %s", regno, strerror (errno));
4527 }
4528 regaddr += sizeof (PTRACE_XFER_TYPE);
4529 }
4530 }
4531
4532 /* Fetch all registers, or just one, from the child process.
4533 If REGNO is -1, do this for all registers, skipping any that are
4534 assumed to have been retrieved by regsets_fetch_inferior_registers,
4535 unless ALL is non-zero.
4536 Otherwise, REGNO specifies which register (so we can save time). */
4537 static void
4538 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4539 struct regcache *regcache, int regno, int all)
4540 {
4541 struct usrregs_info *usr = regs_info->usrregs;
4542
4543 if (regno == -1)
4544 {
4545 for (regno = 0; regno < usr->num_regs; regno++)
4546 if (all || !linux_register_in_regsets (regs_info, regno))
4547 fetch_register (usr, regcache, regno);
4548 }
4549 else
4550 fetch_register (usr, regcache, regno);
4551 }
4552
4553 /* Store our register values back into the inferior.
4554 If REGNO is -1, do this for all registers, skipping any that are
4555 assumed to have been saved by regsets_store_inferior_registers,
4556 unless ALL is non-zero.
4557 Otherwise, REGNO specifies which register (so we can save time). */
4558 static void
4559 usr_store_inferior_registers (const struct regs_info *regs_info,
4560 struct regcache *regcache, int regno, int all)
4561 {
4562 struct usrregs_info *usr = regs_info->usrregs;
4563
4564 if (regno == -1)
4565 {
4566 for (regno = 0; regno < usr->num_regs; regno++)
4567 if (all || !linux_register_in_regsets (regs_info, regno))
4568 store_register (usr, regcache, regno);
4569 }
4570 else
4571 store_register (usr, regcache, regno);
4572 }
4573
4574 #else /* !HAVE_LINUX_USRREGS */
4575
4576 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4577 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4578
4579 #endif
4580
4581
4582 void
4583 linux_fetch_registers (struct regcache *regcache, int regno)
4584 {
4585 int use_regsets;
4586 int all = 0;
4587 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4588
4589 if (regno == -1)
4590 {
4591 if (the_low_target.fetch_register != NULL
4592 && regs_info->usrregs != NULL)
4593 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4594 (*the_low_target.fetch_register) (regcache, regno);
4595
4596 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4597 if (regs_info->usrregs != NULL)
4598 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4599 }
4600 else
4601 {
4602 if (the_low_target.fetch_register != NULL
4603 && (*the_low_target.fetch_register) (regcache, regno))
4604 return;
4605
4606 use_regsets = linux_register_in_regsets (regs_info, regno);
4607 if (use_regsets)
4608 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4609 regcache);
4610 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4611 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4612 }
4613 }
4614
4615 void
4616 linux_store_registers (struct regcache *regcache, int regno)
4617 {
4618 int use_regsets;
4619 int all = 0;
4620 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4621
4622 if (regno == -1)
4623 {
4624 all = regsets_store_inferior_registers (regs_info->regsets_info,
4625 regcache);
4626 if (regs_info->usrregs != NULL)
4627 usr_store_inferior_registers (regs_info, regcache, regno, all);
4628 }
4629 else
4630 {
4631 use_regsets = linux_register_in_regsets (regs_info, regno);
4632 if (use_regsets)
4633 all = regsets_store_inferior_registers (regs_info->regsets_info,
4634 regcache);
4635 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4636 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4637 }
4638 }
4639
4640
4641 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4642 to debugger memory starting at MYADDR. */
4643
4644 static int
4645 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4646 {
4647 int pid = lwpid_of (current_inferior);
4648 register PTRACE_XFER_TYPE *buffer;
4649 register CORE_ADDR addr;
4650 register int count;
4651 char filename[64];
4652 register int i;
4653 int ret;
4654 int fd;
4655
4656 /* Try using /proc. Don't bother for one word. */
4657 if (len >= 3 * sizeof (long))
4658 {
4659 int bytes;
4660
4661 /* We could keep this file open and cache it - possibly one per
4662 thread. That requires some juggling, but is even faster. */
4663 sprintf (filename, "/proc/%d/mem", pid);
4664 fd = open (filename, O_RDONLY | O_LARGEFILE);
4665 if (fd == -1)
4666 goto no_proc;
4667
4668 /* If pread64 is available, use it. It's faster if the kernel
4669 supports it (only one syscall), and it's 64-bit safe even on
4670 32-bit platforms (for instance, SPARC debugging a SPARC64
4671 application). */
4672 #ifdef HAVE_PREAD64
4673 bytes = pread64 (fd, myaddr, len, memaddr);
4674 #else
4675 bytes = -1;
4676 if (lseek (fd, memaddr, SEEK_SET) != -1)
4677 bytes = read (fd, myaddr, len);
4678 #endif
4679
4680 close (fd);
4681 if (bytes == len)
4682 return 0;
4683
4684 /* Some data was read, we'll try to get the rest with ptrace. */
4685 if (bytes > 0)
4686 {
4687 memaddr += bytes;
4688 myaddr += bytes;
4689 len -= bytes;
4690 }
4691 }
4692
4693 no_proc:
4694 /* Round starting address down to longword boundary. */
4695 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4696 /* Round ending address up; get number of longwords that makes. */
4697 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4698 / sizeof (PTRACE_XFER_TYPE));
4699 /* Allocate buffer of that many longwords. */
4700 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4701
4702 /* Read all the longwords */
4703 errno = 0;
4704 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4705 {
4706 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4707 about coercing an 8 byte integer to a 4 byte pointer. */
4708 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4709 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4710 (PTRACE_TYPE_ARG4) 0);
4711 if (errno)
4712 break;
4713 }
4714 ret = errno;
4715
4716 /* Copy appropriate bytes out of the buffer. */
4717 if (i > 0)
4718 {
4719 i *= sizeof (PTRACE_XFER_TYPE);
4720 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4721 memcpy (myaddr,
4722 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4723 i < len ? i : len);
4724 }
4725
4726 return ret;
4727 }
4728
4729 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4730 memory at MEMADDR. On failure (cannot write to the inferior)
4731 returns the value of errno. Always succeeds if LEN is zero. */
4732
4733 static int
4734 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4735 {
4736 register int i;
4737 /* Round starting address down to longword boundary. */
4738 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4739 /* Round ending address up; get number of longwords that makes. */
4740 register int count
4741 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4742 / sizeof (PTRACE_XFER_TYPE);
4743
4744 /* Allocate buffer of that many longwords. */
4745 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4746 alloca (count * sizeof (PTRACE_XFER_TYPE));
4747
4748 int pid = lwpid_of (current_inferior);
4749
4750 if (len == 0)
4751 {
4752 /* Zero length write always succeeds. */
4753 return 0;
4754 }
4755
4756 if (debug_threads)
4757 {
4758 /* Dump up to four bytes. */
4759 unsigned int val = * (unsigned int *) myaddr;
4760 if (len == 1)
4761 val = val & 0xff;
4762 else if (len == 2)
4763 val = val & 0xffff;
4764 else if (len == 3)
4765 val = val & 0xffffff;
4766 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4767 val, (long)memaddr);
4768 }
4769
4770 /* Fill start and end extra bytes of buffer with existing memory data. */
4771
4772 errno = 0;
4773 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4774 about coercing an 8 byte integer to a 4 byte pointer. */
4775 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4776 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4777 (PTRACE_TYPE_ARG4) 0);
4778 if (errno)
4779 return errno;
4780
4781 if (count > 1)
4782 {
4783 errno = 0;
4784 buffer[count - 1]
4785 = ptrace (PTRACE_PEEKTEXT, pid,
4786 /* Coerce to a uintptr_t first to avoid potential gcc warning
4787 about coercing an 8 byte integer to a 4 byte pointer. */
4788 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4789 * sizeof (PTRACE_XFER_TYPE)),
4790 (PTRACE_TYPE_ARG4) 0);
4791 if (errno)
4792 return errno;
4793 }
4794
4795 /* Copy data to be written over corresponding part of buffer. */
4796
4797 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4798 myaddr, len);
4799
4800 /* Write the entire buffer. */
4801
4802 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4803 {
4804 errno = 0;
4805 ptrace (PTRACE_POKETEXT, pid,
4806 /* Coerce to a uintptr_t first to avoid potential gcc warning
4807 about coercing an 8 byte integer to a 4 byte pointer. */
4808 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4809 (PTRACE_TYPE_ARG4) buffer[i]);
4810 if (errno)
4811 return errno;
4812 }
4813
4814 return 0;
4815 }
4816
4817 static void
4818 linux_look_up_symbols (void)
4819 {
4820 #ifdef USE_THREAD_DB
4821 struct process_info *proc = current_process ();
4822
4823 if (proc->private->thread_db != NULL)
4824 return;
4825
4826 /* If the kernel supports tracing clones, then we don't need to
4827 use the magic thread event breakpoint to learn about
4828 threads. */
4829 thread_db_init (!linux_supports_traceclone ());
4830 #endif
4831 }
4832
4833 static void
4834 linux_request_interrupt (void)
4835 {
4836 extern unsigned long signal_pid;
4837
4838 if (!ptid_equal (cont_thread, null_ptid)
4839 && !ptid_equal (cont_thread, minus_one_ptid))
4840 {
4841 int lwpid;
4842
4843 lwpid = lwpid_of (current_inferior);
4844 kill_lwp (lwpid, SIGINT);
4845 }
4846 else
4847 kill_lwp (signal_pid, SIGINT);
4848 }
4849
4850 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4851 to debugger memory starting at MYADDR. */
4852
4853 static int
4854 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4855 {
4856 char filename[PATH_MAX];
4857 int fd, n;
4858 int pid = lwpid_of (current_inferior);
4859
4860 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4861
4862 fd = open (filename, O_RDONLY);
4863 if (fd < 0)
4864 return -1;
4865
4866 if (offset != (CORE_ADDR) 0
4867 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4868 n = -1;
4869 else
4870 n = read (fd, myaddr, len);
4871
4872 close (fd);
4873
4874 return n;
4875 }
4876
4877 /* These breakpoint and watchpoint related wrapper functions simply
4878 pass on the function call if the target has registered a
4879 corresponding function. */
4880
4881 static int
4882 linux_supports_z_point_type (char z_type)
4883 {
4884 return (the_low_target.supports_z_point_type != NULL
4885 && the_low_target.supports_z_point_type (z_type));
4886 }
4887
4888 static int
4889 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4890 int size, struct raw_breakpoint *bp)
4891 {
4892 if (the_low_target.insert_point != NULL)
4893 return the_low_target.insert_point (type, addr, size, bp);
4894 else
4895 /* Unsupported (see target.h). */
4896 return 1;
4897 }
4898
4899 static int
4900 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4901 int size, struct raw_breakpoint *bp)
4902 {
4903 if (the_low_target.remove_point != NULL)
4904 return the_low_target.remove_point (type, addr, size, bp);
4905 else
4906 /* Unsupported (see target.h). */
4907 return 1;
4908 }
4909
4910 static int
4911 linux_stopped_by_watchpoint (void)
4912 {
4913 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4914
4915 return lwp->stopped_by_watchpoint;
4916 }
4917
4918 static CORE_ADDR
4919 linux_stopped_data_address (void)
4920 {
4921 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4922
4923 return lwp->stopped_data_address;
4924 }
4925
4926 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4927 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4928 && defined(PT_TEXT_END_ADDR)
4929
4930 /* This is only used for targets that define PT_TEXT_ADDR,
4931 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4932 the target has different ways of acquiring this information, like
4933 loadmaps. */
4934
4935 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4936 to tell gdb about. */
4937
4938 static int
4939 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4940 {
4941 unsigned long text, text_end, data;
4942 int pid = lwpid_of (get_thread_lwp (current_inferior));
4943
4944 errno = 0;
4945
4946 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4947 (PTRACE_TYPE_ARG4) 0);
4948 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4949 (PTRACE_TYPE_ARG4) 0);
4950 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4951 (PTRACE_TYPE_ARG4) 0);
4952
4953 if (errno == 0)
4954 {
4955 /* Both text and data offsets produced at compile-time (and so
4956 used by gdb) are relative to the beginning of the program,
4957 with the data segment immediately following the text segment.
4958 However, the actual runtime layout in memory may put the data
4959 somewhere else, so when we send gdb a data base-address, we
4960 use the real data base address and subtract the compile-time
4961 data base-address from it (which is just the length of the
4962 text segment). BSS immediately follows data in both
4963 cases. */
4964 *text_p = text;
4965 *data_p = data - (text_end - text);
4966
4967 return 1;
4968 }
4969 return 0;
4970 }
4971 #endif
4972
4973 static int
4974 linux_qxfer_osdata (const char *annex,
4975 unsigned char *readbuf, unsigned const char *writebuf,
4976 CORE_ADDR offset, int len)
4977 {
4978 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4979 }
4980
4981 /* Convert a native/host siginfo object, into/from the siginfo in the
4982 layout of the inferiors' architecture. */
4983
4984 static void
4985 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4986 {
4987 int done = 0;
4988
4989 if (the_low_target.siginfo_fixup != NULL)
4990 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4991
4992 /* If there was no callback, or the callback didn't do anything,
4993 then just do a straight memcpy. */
4994 if (!done)
4995 {
4996 if (direction == 1)
4997 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4998 else
4999 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5000 }
5001 }
5002
5003 static int
5004 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5005 unsigned const char *writebuf, CORE_ADDR offset, int len)
5006 {
5007 int pid;
5008 siginfo_t siginfo;
5009 char inf_siginfo[sizeof (siginfo_t)];
5010
5011 if (current_inferior == NULL)
5012 return -1;
5013
5014 pid = lwpid_of (current_inferior);
5015
5016 if (debug_threads)
5017 debug_printf ("%s siginfo for lwp %d.\n",
5018 readbuf != NULL ? "Reading" : "Writing",
5019 pid);
5020
5021 if (offset >= sizeof (siginfo))
5022 return -1;
5023
5024 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5025 return -1;
5026
5027 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5028 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5029 inferior with a 64-bit GDBSERVER should look the same as debugging it
5030 with a 32-bit GDBSERVER, we need to convert it. */
5031 siginfo_fixup (&siginfo, inf_siginfo, 0);
5032
5033 if (offset + len > sizeof (siginfo))
5034 len = sizeof (siginfo) - offset;
5035
5036 if (readbuf != NULL)
5037 memcpy (readbuf, inf_siginfo + offset, len);
5038 else
5039 {
5040 memcpy (inf_siginfo + offset, writebuf, len);
5041
5042 /* Convert back to ptrace layout before flushing it out. */
5043 siginfo_fixup (&siginfo, inf_siginfo, 1);
5044
5045 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5046 return -1;
5047 }
5048
5049 return len;
5050 }
5051
5052 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5053 so we notice when children change state; as the handler for the
5054 sigsuspend in my_waitpid. */
5055
5056 static void
5057 sigchld_handler (int signo)
5058 {
5059 int old_errno = errno;
5060
5061 if (debug_threads)
5062 {
5063 do
5064 {
5065 /* fprintf is not async-signal-safe, so call write
5066 directly. */
5067 if (write (2, "sigchld_handler\n",
5068 sizeof ("sigchld_handler\n") - 1) < 0)
5069 break; /* just ignore */
5070 } while (0);
5071 }
5072
5073 if (target_is_async_p ())
5074 async_file_mark (); /* trigger a linux_wait */
5075
5076 errno = old_errno;
5077 }
5078
5079 static int
5080 linux_supports_non_stop (void)
5081 {
5082 return 1;
5083 }
5084
5085 static int
5086 linux_async (int enable)
5087 {
5088 int previous = target_is_async_p ();
5089
5090 if (debug_threads)
5091 debug_printf ("linux_async (%d), previous=%d\n",
5092 enable, previous);
5093
5094 if (previous != enable)
5095 {
5096 sigset_t mask;
5097 sigemptyset (&mask);
5098 sigaddset (&mask, SIGCHLD);
5099
5100 sigprocmask (SIG_BLOCK, &mask, NULL);
5101
5102 if (enable)
5103 {
5104 if (pipe (linux_event_pipe) == -1)
5105 fatal ("creating event pipe failed.");
5106
5107 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5108 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5109
5110 /* Register the event loop handler. */
5111 add_file_handler (linux_event_pipe[0],
5112 handle_target_event, NULL);
5113
5114 /* Always trigger a linux_wait. */
5115 async_file_mark ();
5116 }
5117 else
5118 {
5119 delete_file_handler (linux_event_pipe[0]);
5120
5121 close (linux_event_pipe[0]);
5122 close (linux_event_pipe[1]);
5123 linux_event_pipe[0] = -1;
5124 linux_event_pipe[1] = -1;
5125 }
5126
5127 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5128 }
5129
5130 return previous;
5131 }
5132
5133 static int
5134 linux_start_non_stop (int nonstop)
5135 {
5136 /* Register or unregister from event-loop accordingly. */
5137 linux_async (nonstop);
5138 return 0;
5139 }
5140
5141 static int
5142 linux_supports_multi_process (void)
5143 {
5144 return 1;
5145 }
5146
5147 static int
5148 linux_supports_disable_randomization (void)
5149 {
5150 #ifdef HAVE_PERSONALITY
5151 return 1;
5152 #else
5153 return 0;
5154 #endif
5155 }
5156
5157 static int
5158 linux_supports_agent (void)
5159 {
5160 return 1;
5161 }
5162
5163 static int
5164 linux_supports_range_stepping (void)
5165 {
5166 if (*the_low_target.supports_range_stepping == NULL)
5167 return 0;
5168
5169 return (*the_low_target.supports_range_stepping) ();
5170 }
5171
5172 /* Enumerate spufs IDs for process PID. */
5173 static int
5174 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5175 {
5176 int pos = 0;
5177 int written = 0;
5178 char path[128];
5179 DIR *dir;
5180 struct dirent *entry;
5181
5182 sprintf (path, "/proc/%ld/fd", pid);
5183 dir = opendir (path);
5184 if (!dir)
5185 return -1;
5186
5187 rewinddir (dir);
5188 while ((entry = readdir (dir)) != NULL)
5189 {
5190 struct stat st;
5191 struct statfs stfs;
5192 int fd;
5193
5194 fd = atoi (entry->d_name);
5195 if (!fd)
5196 continue;
5197
5198 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5199 if (stat (path, &st) != 0)
5200 continue;
5201 if (!S_ISDIR (st.st_mode))
5202 continue;
5203
5204 if (statfs (path, &stfs) != 0)
5205 continue;
5206 if (stfs.f_type != SPUFS_MAGIC)
5207 continue;
5208
5209 if (pos >= offset && pos + 4 <= offset + len)
5210 {
5211 *(unsigned int *)(buf + pos - offset) = fd;
5212 written += 4;
5213 }
5214 pos += 4;
5215 }
5216
5217 closedir (dir);
5218 return written;
5219 }
5220
5221 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5222 object type, using the /proc file system. */
5223 static int
5224 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5225 unsigned const char *writebuf,
5226 CORE_ADDR offset, int len)
5227 {
5228 long pid = lwpid_of (current_inferior);
5229 char buf[128];
5230 int fd = 0;
5231 int ret = 0;
5232
5233 if (!writebuf && !readbuf)
5234 return -1;
5235
5236 if (!*annex)
5237 {
5238 if (!readbuf)
5239 return -1;
5240 else
5241 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5242 }
5243
5244 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5245 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5246 if (fd <= 0)
5247 return -1;
5248
5249 if (offset != 0
5250 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5251 {
5252 close (fd);
5253 return 0;
5254 }
5255
5256 if (writebuf)
5257 ret = write (fd, writebuf, (size_t) len);
5258 else
5259 ret = read (fd, readbuf, (size_t) len);
5260
5261 close (fd);
5262 return ret;
5263 }
5264
5265 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5266 struct target_loadseg
5267 {
5268 /* Core address to which the segment is mapped. */
5269 Elf32_Addr addr;
5270 /* VMA recorded in the program header. */
5271 Elf32_Addr p_vaddr;
5272 /* Size of this segment in memory. */
5273 Elf32_Word p_memsz;
5274 };
5275
5276 # if defined PT_GETDSBT
5277 struct target_loadmap
5278 {
5279 /* Protocol version number, must be zero. */
5280 Elf32_Word version;
5281 /* Pointer to the DSBT table, its size, and the DSBT index. */
5282 unsigned *dsbt_table;
5283 unsigned dsbt_size, dsbt_index;
5284 /* Number of segments in this map. */
5285 Elf32_Word nsegs;
5286 /* The actual memory map. */
5287 struct target_loadseg segs[/*nsegs*/];
5288 };
5289 # define LINUX_LOADMAP PT_GETDSBT
5290 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5291 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5292 # else
5293 struct target_loadmap
5294 {
5295 /* Protocol version number, must be zero. */
5296 Elf32_Half version;
5297 /* Number of segments in this map. */
5298 Elf32_Half nsegs;
5299 /* The actual memory map. */
5300 struct target_loadseg segs[/*nsegs*/];
5301 };
5302 # define LINUX_LOADMAP PTRACE_GETFDPIC
5303 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5304 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5305 # endif
5306
5307 static int
5308 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5309 unsigned char *myaddr, unsigned int len)
5310 {
5311 int pid = lwpid_of (current_inferior);
5312 int addr = -1;
5313 struct target_loadmap *data = NULL;
5314 unsigned int actual_length, copy_length;
5315
5316 if (strcmp (annex, "exec") == 0)
5317 addr = (int) LINUX_LOADMAP_EXEC;
5318 else if (strcmp (annex, "interp") == 0)
5319 addr = (int) LINUX_LOADMAP_INTERP;
5320 else
5321 return -1;
5322
5323 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5324 return -1;
5325
5326 if (data == NULL)
5327 return -1;
5328
5329 actual_length = sizeof (struct target_loadmap)
5330 + sizeof (struct target_loadseg) * data->nsegs;
5331
5332 if (offset < 0 || offset > actual_length)
5333 return -1;
5334
5335 copy_length = actual_length - offset < len ? actual_length - offset : len;
5336 memcpy (myaddr, (char *) data + offset, copy_length);
5337 return copy_length;
5338 }
5339 #else
5340 # define linux_read_loadmap NULL
5341 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5342
5343 static void
5344 linux_process_qsupported (const char *query)
5345 {
5346 if (the_low_target.process_qsupported != NULL)
5347 the_low_target.process_qsupported (query);
5348 }
5349
5350 static int
5351 linux_supports_tracepoints (void)
5352 {
5353 if (*the_low_target.supports_tracepoints == NULL)
5354 return 0;
5355
5356 return (*the_low_target.supports_tracepoints) ();
5357 }
5358
5359 static CORE_ADDR
5360 linux_read_pc (struct regcache *regcache)
5361 {
5362 if (the_low_target.get_pc == NULL)
5363 return 0;
5364
5365 return (*the_low_target.get_pc) (regcache);
5366 }
5367
5368 static void
5369 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5370 {
5371 gdb_assert (the_low_target.set_pc != NULL);
5372
5373 (*the_low_target.set_pc) (regcache, pc);
5374 }
5375
5376 static int
5377 linux_thread_stopped (struct thread_info *thread)
5378 {
5379 return get_thread_lwp (thread)->stopped;
5380 }
5381
5382 /* This exposes stop-all-threads functionality to other modules. */
5383
5384 static void
5385 linux_pause_all (int freeze)
5386 {
5387 stop_all_lwps (freeze, NULL);
5388 }
5389
5390 /* This exposes unstop-all-threads functionality to other gdbserver
5391 modules. */
5392
5393 static void
5394 linux_unpause_all (int unfreeze)
5395 {
5396 unstop_all_lwps (unfreeze, NULL);
5397 }
5398
5399 static int
5400 linux_prepare_to_access_memory (void)
5401 {
5402 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5403 running LWP. */
5404 if (non_stop)
5405 linux_pause_all (1);
5406 return 0;
5407 }
5408
5409 static void
5410 linux_done_accessing_memory (void)
5411 {
5412 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5413 running LWP. */
5414 if (non_stop)
5415 linux_unpause_all (1);
5416 }
5417
5418 static int
5419 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5420 CORE_ADDR collector,
5421 CORE_ADDR lockaddr,
5422 ULONGEST orig_size,
5423 CORE_ADDR *jump_entry,
5424 CORE_ADDR *trampoline,
5425 ULONGEST *trampoline_size,
5426 unsigned char *jjump_pad_insn,
5427 ULONGEST *jjump_pad_insn_size,
5428 CORE_ADDR *adjusted_insn_addr,
5429 CORE_ADDR *adjusted_insn_addr_end,
5430 char *err)
5431 {
5432 return (*the_low_target.install_fast_tracepoint_jump_pad)
5433 (tpoint, tpaddr, collector, lockaddr, orig_size,
5434 jump_entry, trampoline, trampoline_size,
5435 jjump_pad_insn, jjump_pad_insn_size,
5436 adjusted_insn_addr, adjusted_insn_addr_end,
5437 err);
5438 }
5439
5440 static struct emit_ops *
5441 linux_emit_ops (void)
5442 {
5443 if (the_low_target.emit_ops != NULL)
5444 return (*the_low_target.emit_ops) ();
5445 else
5446 return NULL;
5447 }
5448
5449 static int
5450 linux_get_min_fast_tracepoint_insn_len (void)
5451 {
5452 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5453 }
5454
5455 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5456
5457 static int
5458 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5459 CORE_ADDR *phdr_memaddr, int *num_phdr)
5460 {
5461 char filename[PATH_MAX];
5462 int fd;
5463 const int auxv_size = is_elf64
5464 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5465 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5466
5467 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5468
5469 fd = open (filename, O_RDONLY);
5470 if (fd < 0)
5471 return 1;
5472
5473 *phdr_memaddr = 0;
5474 *num_phdr = 0;
5475 while (read (fd, buf, auxv_size) == auxv_size
5476 && (*phdr_memaddr == 0 || *num_phdr == 0))
5477 {
5478 if (is_elf64)
5479 {
5480 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5481
5482 switch (aux->a_type)
5483 {
5484 case AT_PHDR:
5485 *phdr_memaddr = aux->a_un.a_val;
5486 break;
5487 case AT_PHNUM:
5488 *num_phdr = aux->a_un.a_val;
5489 break;
5490 }
5491 }
5492 else
5493 {
5494 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5495
5496 switch (aux->a_type)
5497 {
5498 case AT_PHDR:
5499 *phdr_memaddr = aux->a_un.a_val;
5500 break;
5501 case AT_PHNUM:
5502 *num_phdr = aux->a_un.a_val;
5503 break;
5504 }
5505 }
5506 }
5507
5508 close (fd);
5509
5510 if (*phdr_memaddr == 0 || *num_phdr == 0)
5511 {
5512 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5513 "phdr_memaddr = %ld, phdr_num = %d",
5514 (long) *phdr_memaddr, *num_phdr);
5515 return 2;
5516 }
5517
5518 return 0;
5519 }
5520
5521 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5522
5523 static CORE_ADDR
5524 get_dynamic (const int pid, const int is_elf64)
5525 {
5526 CORE_ADDR phdr_memaddr, relocation;
5527 int num_phdr, i;
5528 unsigned char *phdr_buf;
5529 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5530
5531 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5532 return 0;
5533
5534 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5535 phdr_buf = alloca (num_phdr * phdr_size);
5536
5537 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5538 return 0;
5539
5540 /* Compute relocation: it is expected to be 0 for "regular" executables,
5541 non-zero for PIE ones. */
5542 relocation = -1;
5543 for (i = 0; relocation == -1 && i < num_phdr; i++)
5544 if (is_elf64)
5545 {
5546 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5547
5548 if (p->p_type == PT_PHDR)
5549 relocation = phdr_memaddr - p->p_vaddr;
5550 }
5551 else
5552 {
5553 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5554
5555 if (p->p_type == PT_PHDR)
5556 relocation = phdr_memaddr - p->p_vaddr;
5557 }
5558
5559 if (relocation == -1)
5560 {
5561 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5562 any real world executables, including PIE executables, have always
5563 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5564 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5565 or present DT_DEBUG anyway (fpc binaries are statically linked).
5566
5567 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5568
5569 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5570
5571 return 0;
5572 }
5573
5574 for (i = 0; i < num_phdr; i++)
5575 {
5576 if (is_elf64)
5577 {
5578 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5579
5580 if (p->p_type == PT_DYNAMIC)
5581 return p->p_vaddr + relocation;
5582 }
5583 else
5584 {
5585 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5586
5587 if (p->p_type == PT_DYNAMIC)
5588 return p->p_vaddr + relocation;
5589 }
5590 }
5591
5592 return 0;
5593 }
5594
5595 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5596 can be 0 if the inferior does not yet have the library list initialized.
5597 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5598 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5599
5600 static CORE_ADDR
5601 get_r_debug (const int pid, const int is_elf64)
5602 {
5603 CORE_ADDR dynamic_memaddr;
5604 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5605 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5606 CORE_ADDR map = -1;
5607
5608 dynamic_memaddr = get_dynamic (pid, is_elf64);
5609 if (dynamic_memaddr == 0)
5610 return map;
5611
5612 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5613 {
5614 if (is_elf64)
5615 {
5616 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5617 #ifdef DT_MIPS_RLD_MAP
5618 union
5619 {
5620 Elf64_Xword map;
5621 unsigned char buf[sizeof (Elf64_Xword)];
5622 }
5623 rld_map;
5624
5625 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5626 {
5627 if (linux_read_memory (dyn->d_un.d_val,
5628 rld_map.buf, sizeof (rld_map.buf)) == 0)
5629 return rld_map.map;
5630 else
5631 break;
5632 }
5633 #endif /* DT_MIPS_RLD_MAP */
5634
5635 if (dyn->d_tag == DT_DEBUG && map == -1)
5636 map = dyn->d_un.d_val;
5637
5638 if (dyn->d_tag == DT_NULL)
5639 break;
5640 }
5641 else
5642 {
5643 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5644 #ifdef DT_MIPS_RLD_MAP
5645 union
5646 {
5647 Elf32_Word map;
5648 unsigned char buf[sizeof (Elf32_Word)];
5649 }
5650 rld_map;
5651
5652 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5653 {
5654 if (linux_read_memory (dyn->d_un.d_val,
5655 rld_map.buf, sizeof (rld_map.buf)) == 0)
5656 return rld_map.map;
5657 else
5658 break;
5659 }
5660 #endif /* DT_MIPS_RLD_MAP */
5661
5662 if (dyn->d_tag == DT_DEBUG && map == -1)
5663 map = dyn->d_un.d_val;
5664
5665 if (dyn->d_tag == DT_NULL)
5666 break;
5667 }
5668
5669 dynamic_memaddr += dyn_size;
5670 }
5671
5672 return map;
5673 }
5674
5675 /* Read one pointer from MEMADDR in the inferior. */
5676
5677 static int
5678 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5679 {
5680 int ret;
5681
5682 /* Go through a union so this works on either big or little endian
5683 hosts, when the inferior's pointer size is smaller than the size
5684 of CORE_ADDR. It is assumed the inferior's endianness is the
5685 same of the superior's. */
5686 union
5687 {
5688 CORE_ADDR core_addr;
5689 unsigned int ui;
5690 unsigned char uc;
5691 } addr;
5692
5693 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5694 if (ret == 0)
5695 {
5696 if (ptr_size == sizeof (CORE_ADDR))
5697 *ptr = addr.core_addr;
5698 else if (ptr_size == sizeof (unsigned int))
5699 *ptr = addr.ui;
5700 else
5701 gdb_assert_not_reached ("unhandled pointer size");
5702 }
5703 return ret;
5704 }
5705
5706 struct link_map_offsets
5707 {
5708 /* Offset and size of r_debug.r_version. */
5709 int r_version_offset;
5710
5711 /* Offset and size of r_debug.r_map. */
5712 int r_map_offset;
5713
5714 /* Offset to l_addr field in struct link_map. */
5715 int l_addr_offset;
5716
5717 /* Offset to l_name field in struct link_map. */
5718 int l_name_offset;
5719
5720 /* Offset to l_ld field in struct link_map. */
5721 int l_ld_offset;
5722
5723 /* Offset to l_next field in struct link_map. */
5724 int l_next_offset;
5725
5726 /* Offset to l_prev field in struct link_map. */
5727 int l_prev_offset;
5728 };
5729
5730 /* Construct qXfer:libraries-svr4:read reply. */
5731
5732 static int
5733 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5734 unsigned const char *writebuf,
5735 CORE_ADDR offset, int len)
5736 {
5737 char *document;
5738 unsigned document_len;
5739 struct process_info_private *const priv = current_process ()->private;
5740 char filename[PATH_MAX];
5741 int pid, is_elf64;
5742
5743 static const struct link_map_offsets lmo_32bit_offsets =
5744 {
5745 0, /* r_version offset. */
5746 4, /* r_debug.r_map offset. */
5747 0, /* l_addr offset in link_map. */
5748 4, /* l_name offset in link_map. */
5749 8, /* l_ld offset in link_map. */
5750 12, /* l_next offset in link_map. */
5751 16 /* l_prev offset in link_map. */
5752 };
5753
5754 static const struct link_map_offsets lmo_64bit_offsets =
5755 {
5756 0, /* r_version offset. */
5757 8, /* r_debug.r_map offset. */
5758 0, /* l_addr offset in link_map. */
5759 8, /* l_name offset in link_map. */
5760 16, /* l_ld offset in link_map. */
5761 24, /* l_next offset in link_map. */
5762 32 /* l_prev offset in link_map. */
5763 };
5764 const struct link_map_offsets *lmo;
5765 unsigned int machine;
5766 int ptr_size;
5767 CORE_ADDR lm_addr = 0, lm_prev = 0;
5768 int allocated = 1024;
5769 char *p;
5770 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5771 int header_done = 0;
5772
5773 if (writebuf != NULL)
5774 return -2;
5775 if (readbuf == NULL)
5776 return -1;
5777
5778 pid = lwpid_of (current_inferior);
5779 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5780 is_elf64 = elf_64_file_p (filename, &machine);
5781 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5782 ptr_size = is_elf64 ? 8 : 4;
5783
5784 while (annex[0] != '\0')
5785 {
5786 const char *sep;
5787 CORE_ADDR *addrp;
5788 int len;
5789
5790 sep = strchr (annex, '=');
5791 if (sep == NULL)
5792 break;
5793
5794 len = sep - annex;
5795 if (len == 5 && strncmp (annex, "start", 5) == 0)
5796 addrp = &lm_addr;
5797 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5798 addrp = &lm_prev;
5799 else
5800 {
5801 annex = strchr (sep, ';');
5802 if (annex == NULL)
5803 break;
5804 annex++;
5805 continue;
5806 }
5807
5808 annex = decode_address_to_semicolon (addrp, sep + 1);
5809 }
5810
5811 if (lm_addr == 0)
5812 {
5813 int r_version = 0;
5814
5815 if (priv->r_debug == 0)
5816 priv->r_debug = get_r_debug (pid, is_elf64);
5817
5818 /* We failed to find DT_DEBUG. Such situation will not change
5819 for this inferior - do not retry it. Report it to GDB as
5820 E01, see for the reasons at the GDB solib-svr4.c side. */
5821 if (priv->r_debug == (CORE_ADDR) -1)
5822 return -1;
5823
5824 if (priv->r_debug != 0)
5825 {
5826 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5827 (unsigned char *) &r_version,
5828 sizeof (r_version)) != 0
5829 || r_version != 1)
5830 {
5831 warning ("unexpected r_debug version %d", r_version);
5832 }
5833 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5834 &lm_addr, ptr_size) != 0)
5835 {
5836 warning ("unable to read r_map from 0x%lx",
5837 (long) priv->r_debug + lmo->r_map_offset);
5838 }
5839 }
5840 }
5841
5842 document = xmalloc (allocated);
5843 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5844 p = document + strlen (document);
5845
5846 while (lm_addr
5847 && read_one_ptr (lm_addr + lmo->l_name_offset,
5848 &l_name, ptr_size) == 0
5849 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5850 &l_addr, ptr_size) == 0
5851 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5852 &l_ld, ptr_size) == 0
5853 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5854 &l_prev, ptr_size) == 0
5855 && read_one_ptr (lm_addr + lmo->l_next_offset,
5856 &l_next, ptr_size) == 0)
5857 {
5858 unsigned char libname[PATH_MAX];
5859
5860 if (lm_prev != l_prev)
5861 {
5862 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5863 (long) lm_prev, (long) l_prev);
5864 break;
5865 }
5866
5867 /* Ignore the first entry even if it has valid name as the first entry
5868 corresponds to the main executable. The first entry should not be
5869 skipped if the dynamic loader was loaded late by a static executable
5870 (see solib-svr4.c parameter ignore_first). But in such case the main
5871 executable does not have PT_DYNAMIC present and this function already
5872 exited above due to failed get_r_debug. */
5873 if (lm_prev == 0)
5874 {
5875 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5876 p = p + strlen (p);
5877 }
5878 else
5879 {
5880 /* Not checking for error because reading may stop before
5881 we've got PATH_MAX worth of characters. */
5882 libname[0] = '\0';
5883 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5884 libname[sizeof (libname) - 1] = '\0';
5885 if (libname[0] != '\0')
5886 {
5887 /* 6x the size for xml_escape_text below. */
5888 size_t len = 6 * strlen ((char *) libname);
5889 char *name;
5890
5891 if (!header_done)
5892 {
5893 /* Terminate `<library-list-svr4'. */
5894 *p++ = '>';
5895 header_done = 1;
5896 }
5897
5898 while (allocated < p - document + len + 200)
5899 {
5900 /* Expand to guarantee sufficient storage. */
5901 uintptr_t document_len = p - document;
5902
5903 document = xrealloc (document, 2 * allocated);
5904 allocated *= 2;
5905 p = document + document_len;
5906 }
5907
5908 name = xml_escape_text ((char *) libname);
5909 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5910 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5911 name, (unsigned long) lm_addr,
5912 (unsigned long) l_addr, (unsigned long) l_ld);
5913 free (name);
5914 }
5915 }
5916
5917 lm_prev = lm_addr;
5918 lm_addr = l_next;
5919 }
5920
5921 if (!header_done)
5922 {
5923 /* Empty list; terminate `<library-list-svr4'. */
5924 strcpy (p, "/>");
5925 }
5926 else
5927 strcpy (p, "</library-list-svr4>");
5928
5929 document_len = strlen (document);
5930 if (offset < document_len)
5931 document_len -= offset;
5932 else
5933 document_len = 0;
5934 if (len > document_len)
5935 len = document_len;
5936
5937 memcpy (readbuf, document + offset, len);
5938 xfree (document);
5939
5940 return len;
5941 }
5942
5943 #ifdef HAVE_LINUX_BTRACE
5944
5945 /* See to_enable_btrace target method. */
5946
5947 static struct btrace_target_info *
5948 linux_low_enable_btrace (ptid_t ptid)
5949 {
5950 struct btrace_target_info *tinfo;
5951
5952 tinfo = linux_enable_btrace (ptid);
5953
5954 if (tinfo != NULL)
5955 {
5956 struct thread_info *thread = find_thread_ptid (ptid);
5957 struct regcache *regcache = get_thread_regcache (thread, 0);
5958
5959 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5960 }
5961
5962 return tinfo;
5963 }
5964
5965 /* See to_disable_btrace target method. */
5966
5967 static int
5968 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5969 {
5970 enum btrace_error err;
5971
5972 err = linux_disable_btrace (tinfo);
5973 return (err == BTRACE_ERR_NONE ? 0 : -1);
5974 }
5975
5976 /* See to_read_btrace target method. */
5977
5978 static int
5979 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5980 int type)
5981 {
5982 VEC (btrace_block_s) *btrace;
5983 struct btrace_block *block;
5984 enum btrace_error err;
5985 int i;
5986
5987 btrace = NULL;
5988 err = linux_read_btrace (&btrace, tinfo, type);
5989 if (err != BTRACE_ERR_NONE)
5990 {
5991 if (err == BTRACE_ERR_OVERFLOW)
5992 buffer_grow_str0 (buffer, "E.Overflow.");
5993 else
5994 buffer_grow_str0 (buffer, "E.Generic Error.");
5995
5996 return -1;
5997 }
5998
5999 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6000 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6001
6002 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
6003 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6004 paddress (block->begin), paddress (block->end));
6005
6006 buffer_grow_str0 (buffer, "</btrace>\n");
6007
6008 VEC_free (btrace_block_s, btrace);
6009
6010 return 0;
6011 }
6012 #endif /* HAVE_LINUX_BTRACE */
6013
6014 static struct target_ops linux_target_ops = {
6015 linux_create_inferior,
6016 linux_attach,
6017 linux_kill,
6018 linux_detach,
6019 linux_mourn,
6020 linux_join,
6021 linux_thread_alive,
6022 linux_resume,
6023 linux_wait,
6024 linux_fetch_registers,
6025 linux_store_registers,
6026 linux_prepare_to_access_memory,
6027 linux_done_accessing_memory,
6028 linux_read_memory,
6029 linux_write_memory,
6030 linux_look_up_symbols,
6031 linux_request_interrupt,
6032 linux_read_auxv,
6033 linux_supports_z_point_type,
6034 linux_insert_point,
6035 linux_remove_point,
6036 linux_stopped_by_watchpoint,
6037 linux_stopped_data_address,
6038 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6039 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6040 && defined(PT_TEXT_END_ADDR)
6041 linux_read_offsets,
6042 #else
6043 NULL,
6044 #endif
6045 #ifdef USE_THREAD_DB
6046 thread_db_get_tls_address,
6047 #else
6048 NULL,
6049 #endif
6050 linux_qxfer_spu,
6051 hostio_last_error_from_errno,
6052 linux_qxfer_osdata,
6053 linux_xfer_siginfo,
6054 linux_supports_non_stop,
6055 linux_async,
6056 linux_start_non_stop,
6057 linux_supports_multi_process,
6058 #ifdef USE_THREAD_DB
6059 thread_db_handle_monitor_command,
6060 #else
6061 NULL,
6062 #endif
6063 linux_common_core_of_thread,
6064 linux_read_loadmap,
6065 linux_process_qsupported,
6066 linux_supports_tracepoints,
6067 linux_read_pc,
6068 linux_write_pc,
6069 linux_thread_stopped,
6070 NULL,
6071 linux_pause_all,
6072 linux_unpause_all,
6073 linux_cancel_breakpoints,
6074 linux_stabilize_threads,
6075 linux_install_fast_tracepoint_jump_pad,
6076 linux_emit_ops,
6077 linux_supports_disable_randomization,
6078 linux_get_min_fast_tracepoint_insn_len,
6079 linux_qxfer_libraries_svr4,
6080 linux_supports_agent,
6081 #ifdef HAVE_LINUX_BTRACE
6082 linux_supports_btrace,
6083 linux_low_enable_btrace,
6084 linux_low_disable_btrace,
6085 linux_low_read_btrace,
6086 #else
6087 NULL,
6088 NULL,
6089 NULL,
6090 NULL,
6091 #endif
6092 linux_supports_range_stepping,
6093 };
6094
6095 static void
6096 linux_init_signals ()
6097 {
6098 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6099 to find what the cancel signal actually is. */
6100 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6101 signal (__SIGRTMIN+1, SIG_IGN);
6102 #endif
6103 }
6104
6105 #ifdef HAVE_LINUX_REGSETS
6106 void
6107 initialize_regsets_info (struct regsets_info *info)
6108 {
6109 for (info->num_regsets = 0;
6110 info->regsets[info->num_regsets].size >= 0;
6111 info->num_regsets++)
6112 ;
6113 }
6114 #endif
6115
6116 void
6117 initialize_low (void)
6118 {
6119 struct sigaction sigchld_action;
6120 memset (&sigchld_action, 0, sizeof (sigchld_action));
6121 set_target_ops (&linux_target_ops);
6122 set_breakpoint_data (the_low_target.breakpoint,
6123 the_low_target.breakpoint_len);
6124 linux_init_signals ();
6125 linux_ptrace_init_warnings ();
6126
6127 sigchld_action.sa_handler = sigchld_handler;
6128 sigemptyset (&sigchld_action.sa_mask);
6129 sigchld_action.sa_flags = SA_RESTART;
6130 sigaction (SIGCHLD, &sigchld_action, NULL);
6131
6132 initialize_low_arch ();
6133 }
This page took 0.16354 seconds and 4 git commands to generate.