Make reinsert_breakpoint thread specific
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
182
183 struct simple_pid_list
184 {
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193 };
194 struct simple_pid_list *stopped_pids;
195
196 /* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199 static void
200 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201 {
202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208 }
209
210 static int
211 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212 {
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226 }
227
228 enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240 /* This is set while stop_all_lwps is in effect. */
241 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
242
243 /* FIXME make into a target method? */
244 int using_threads = 1;
245
246 /* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248 static int stabilizing_threads;
249
250 static void linux_resume_one_lwp (struct lwp_info *lwp,
251 int step, int signal, siginfo_t *info);
252 static void linux_resume (struct thread_resume *resume_info, size_t n);
253 static void stop_all_lwps (int suspend, struct lwp_info *except);
254 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
255 static void unsuspend_all_lwps (struct lwp_info *except);
256 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
257 int *wstat, int options);
258 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
259 static struct lwp_info *add_lwp (ptid_t ptid);
260 static void linux_mourn (struct process_info *process);
261 static int linux_stopped_by_watchpoint (void);
262 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
263 static int lwp_is_marked_dead (struct lwp_info *lwp);
264 static void proceed_all_lwps (void);
265 static int finish_step_over (struct lwp_info *lwp);
266 static int kill_lwp (unsigned long lwpid, int signo);
267 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
268 static void complete_ongoing_step_over (void);
269 static int linux_low_ptrace_options (int attached);
270 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
271
272 /* When the event-loop is doing a step-over, this points at the thread
273 being stepped. */
274 ptid_t step_over_bkpt;
275
276 /* True if the low target can hardware single-step. */
277
278 static int
279 can_hardware_single_step (void)
280 {
281 if (the_low_target.supports_hardware_single_step != NULL)
282 return the_low_target.supports_hardware_single_step ();
283 else
284 return 0;
285 }
286
287 /* True if the low target can software single-step. Such targets
288 implement the GET_NEXT_PCS callback. */
289
290 static int
291 can_software_single_step (void)
292 {
293 return (the_low_target.get_next_pcs != NULL);
294 }
295
296 /* True if the low target supports memory breakpoints. If so, we'll
297 have a GET_PC implementation. */
298
299 static int
300 supports_breakpoints (void)
301 {
302 return (the_low_target.get_pc != NULL);
303 }
304
305 /* Returns true if this target can support fast tracepoints. This
306 does not mean that the in-process agent has been loaded in the
307 inferior. */
308
309 static int
310 supports_fast_tracepoints (void)
311 {
312 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
313 }
314
315 /* True if LWP is stopped in its stepping range. */
316
317 static int
318 lwp_in_step_range (struct lwp_info *lwp)
319 {
320 CORE_ADDR pc = lwp->stop_pc;
321
322 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
323 }
324
325 struct pending_signals
326 {
327 int signal;
328 siginfo_t info;
329 struct pending_signals *prev;
330 };
331
332 /* The read/write ends of the pipe registered as waitable file in the
333 event loop. */
334 static int linux_event_pipe[2] = { -1, -1 };
335
336 /* True if we're currently in async mode. */
337 #define target_is_async_p() (linux_event_pipe[0] != -1)
338
339 static void send_sigstop (struct lwp_info *lwp);
340 static void wait_for_sigstop (void);
341
342 /* Return non-zero if HEADER is a 64-bit ELF file. */
343
344 static int
345 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
346 {
347 if (header->e_ident[EI_MAG0] == ELFMAG0
348 && header->e_ident[EI_MAG1] == ELFMAG1
349 && header->e_ident[EI_MAG2] == ELFMAG2
350 && header->e_ident[EI_MAG3] == ELFMAG3)
351 {
352 *machine = header->e_machine;
353 return header->e_ident[EI_CLASS] == ELFCLASS64;
354
355 }
356 *machine = EM_NONE;
357 return -1;
358 }
359
360 /* Return non-zero if FILE is a 64-bit ELF file,
361 zero if the file is not a 64-bit ELF file,
362 and -1 if the file is not accessible or doesn't exist. */
363
364 static int
365 elf_64_file_p (const char *file, unsigned int *machine)
366 {
367 Elf64_Ehdr header;
368 int fd;
369
370 fd = open (file, O_RDONLY);
371 if (fd < 0)
372 return -1;
373
374 if (read (fd, &header, sizeof (header)) != sizeof (header))
375 {
376 close (fd);
377 return 0;
378 }
379 close (fd);
380
381 return elf_64_header_p (&header, machine);
382 }
383
384 /* Accepts an integer PID; Returns true if the executable PID is
385 running is a 64-bit ELF file.. */
386
387 int
388 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
389 {
390 char file[PATH_MAX];
391
392 sprintf (file, "/proc/%d/exe", pid);
393 return elf_64_file_p (file, machine);
394 }
395
396 static void
397 delete_lwp (struct lwp_info *lwp)
398 {
399 struct thread_info *thr = get_lwp_thread (lwp);
400
401 if (debug_threads)
402 debug_printf ("deleting %ld\n", lwpid_of (thr));
403
404 remove_thread (thr);
405 free (lwp->arch_private);
406 free (lwp);
407 }
408
409 /* Add a process to the common process list, and set its private
410 data. */
411
412 static struct process_info *
413 linux_add_process (int pid, int attached)
414 {
415 struct process_info *proc;
416
417 proc = add_process (pid, attached);
418 proc->priv = XCNEW (struct process_info_private);
419
420 if (the_low_target.new_process != NULL)
421 proc->priv->arch_private = the_low_target.new_process ();
422
423 return proc;
424 }
425
426 static CORE_ADDR get_pc (struct lwp_info *lwp);
427
428 /* Call the target arch_setup function on the current thread. */
429
430 static void
431 linux_arch_setup (void)
432 {
433 the_low_target.arch_setup ();
434 }
435
436 /* Call the target arch_setup function on THREAD. */
437
438 static void
439 linux_arch_setup_thread (struct thread_info *thread)
440 {
441 struct thread_info *saved_thread;
442
443 saved_thread = current_thread;
444 current_thread = thread;
445
446 linux_arch_setup ();
447
448 current_thread = saved_thread;
449 }
450
451 /* Handle a GNU/Linux extended wait response. If we see a clone,
452 fork, or vfork event, we need to add the new LWP to our list
453 (and return 0 so as not to report the trap to higher layers).
454 If we see an exec event, we will modify ORIG_EVENT_LWP to point
455 to a new LWP representing the new program. */
456
457 static int
458 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
459 {
460 struct lwp_info *event_lwp = *orig_event_lwp;
461 int event = linux_ptrace_get_extended_event (wstat);
462 struct thread_info *event_thr = get_lwp_thread (event_lwp);
463 struct lwp_info *new_lwp;
464
465 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
466
467 /* All extended events we currently use are mid-syscall. Only
468 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
469 you have to be using PTRACE_SEIZE to get that. */
470 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
471
472 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
473 || (event == PTRACE_EVENT_CLONE))
474 {
475 ptid_t ptid;
476 unsigned long new_pid;
477 int ret, status;
478
479 /* Get the pid of the new lwp. */
480 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
481 &new_pid);
482
483 /* If we haven't already seen the new PID stop, wait for it now. */
484 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
485 {
486 /* The new child has a pending SIGSTOP. We can't affect it until it
487 hits the SIGSTOP, but we're already attached. */
488
489 ret = my_waitpid (new_pid, &status, __WALL);
490
491 if (ret == -1)
492 perror_with_name ("waiting for new child");
493 else if (ret != new_pid)
494 warning ("wait returned unexpected PID %d", ret);
495 else if (!WIFSTOPPED (status))
496 warning ("wait returned unexpected status 0x%x", status);
497 }
498
499 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
500 {
501 struct process_info *parent_proc;
502 struct process_info *child_proc;
503 struct lwp_info *child_lwp;
504 struct thread_info *child_thr;
505 struct target_desc *tdesc;
506
507 ptid = ptid_build (new_pid, new_pid, 0);
508
509 if (debug_threads)
510 {
511 debug_printf ("HEW: Got fork event from LWP %ld, "
512 "new child is %d\n",
513 ptid_get_lwp (ptid_of (event_thr)),
514 ptid_get_pid (ptid));
515 }
516
517 /* Add the new process to the tables and clone the breakpoint
518 lists of the parent. We need to do this even if the new process
519 will be detached, since we will need the process object and the
520 breakpoints to remove any breakpoints from memory when we
521 detach, and the client side will access registers. */
522 child_proc = linux_add_process (new_pid, 0);
523 gdb_assert (child_proc != NULL);
524 child_lwp = add_lwp (ptid);
525 gdb_assert (child_lwp != NULL);
526 child_lwp->stopped = 1;
527 child_lwp->must_set_ptrace_flags = 1;
528 child_lwp->status_pending_p = 0;
529 child_thr = get_lwp_thread (child_lwp);
530 child_thr->last_resume_kind = resume_stop;
531 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
532
533 /* If we're suspending all threads, leave this one suspended
534 too. If the fork/clone parent is stepping over a breakpoint,
535 all other threads have been suspended already. Leave the
536 child suspended too. */
537 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
538 || event_lwp->bp_reinsert != 0)
539 {
540 if (debug_threads)
541 debug_printf ("HEW: leaving child suspended\n");
542 child_lwp->suspended = 1;
543 }
544
545 parent_proc = get_thread_process (event_thr);
546 child_proc->attached = parent_proc->attached;
547
548 if (event_lwp->bp_reinsert != 0
549 && can_software_single_step ()
550 && event == PTRACE_EVENT_VFORK)
551 {
552 /* If we leave reinsert breakpoints there, child will
553 hit it, so uninsert reinsert breakpoints from parent
554 (and child). Once vfork child is done, reinsert
555 them back to parent. */
556 uninsert_reinsert_breakpoints (event_thr);
557 }
558
559 clone_all_breakpoints (child_thr, event_thr);
560
561 tdesc = XNEW (struct target_desc);
562 copy_target_description (tdesc, parent_proc->tdesc);
563 child_proc->tdesc = tdesc;
564
565 /* Clone arch-specific process data. */
566 if (the_low_target.new_fork != NULL)
567 the_low_target.new_fork (parent_proc, child_proc);
568
569 /* Save fork info in the parent thread. */
570 if (event == PTRACE_EVENT_FORK)
571 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
572 else if (event == PTRACE_EVENT_VFORK)
573 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
574
575 event_lwp->waitstatus.value.related_pid = ptid;
576
577 /* The status_pending field contains bits denoting the
578 extended event, so when the pending event is handled,
579 the handler will look at lwp->waitstatus. */
580 event_lwp->status_pending_p = 1;
581 event_lwp->status_pending = wstat;
582
583 /* If the parent thread is doing step-over with reinsert
584 breakpoints, the list of reinsert breakpoints are cloned
585 from the parent's. Remove them from the child process.
586 In case of vfork, we'll reinsert them back once vforked
587 child is done. */
588 if (event_lwp->bp_reinsert != 0
589 && can_software_single_step ())
590 {
591 /* The child process is forked and stopped, so it is safe
592 to access its memory without stopping all other threads
593 from other processes. */
594 delete_reinsert_breakpoints (child_thr);
595
596 gdb_assert (has_reinsert_breakpoints (event_thr));
597 gdb_assert (!has_reinsert_breakpoints (child_thr));
598 }
599
600 /* Report the event. */
601 return 0;
602 }
603
604 if (debug_threads)
605 debug_printf ("HEW: Got clone event "
606 "from LWP %ld, new child is LWP %ld\n",
607 lwpid_of (event_thr), new_pid);
608
609 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
610 new_lwp = add_lwp (ptid);
611
612 /* Either we're going to immediately resume the new thread
613 or leave it stopped. linux_resume_one_lwp is a nop if it
614 thinks the thread is currently running, so set this first
615 before calling linux_resume_one_lwp. */
616 new_lwp->stopped = 1;
617
618 /* If we're suspending all threads, leave this one suspended
619 too. If the fork/clone parent is stepping over a breakpoint,
620 all other threads have been suspended already. Leave the
621 child suspended too. */
622 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
623 || event_lwp->bp_reinsert != 0)
624 new_lwp->suspended = 1;
625
626 /* Normally we will get the pending SIGSTOP. But in some cases
627 we might get another signal delivered to the group first.
628 If we do get another signal, be sure not to lose it. */
629 if (WSTOPSIG (status) != SIGSTOP)
630 {
631 new_lwp->stop_expected = 1;
632 new_lwp->status_pending_p = 1;
633 new_lwp->status_pending = status;
634 }
635 else if (report_thread_events)
636 {
637 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
638 new_lwp->status_pending_p = 1;
639 new_lwp->status_pending = status;
640 }
641
642 /* Don't report the event. */
643 return 1;
644 }
645 else if (event == PTRACE_EVENT_VFORK_DONE)
646 {
647 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
648
649 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
650 {
651 reinsert_reinsert_breakpoints (event_thr);
652
653 gdb_assert (has_reinsert_breakpoints (event_thr));
654 }
655
656 /* Report the event. */
657 return 0;
658 }
659 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
660 {
661 struct process_info *proc;
662 VEC (int) *syscalls_to_catch;
663 ptid_t event_ptid;
664 pid_t event_pid;
665
666 if (debug_threads)
667 {
668 debug_printf ("HEW: Got exec event from LWP %ld\n",
669 lwpid_of (event_thr));
670 }
671
672 /* Get the event ptid. */
673 event_ptid = ptid_of (event_thr);
674 event_pid = ptid_get_pid (event_ptid);
675
676 /* Save the syscall list from the execing process. */
677 proc = get_thread_process (event_thr);
678 syscalls_to_catch = proc->syscalls_to_catch;
679 proc->syscalls_to_catch = NULL;
680
681 /* Delete the execing process and all its threads. */
682 linux_mourn (proc);
683 current_thread = NULL;
684
685 /* Create a new process/lwp/thread. */
686 proc = linux_add_process (event_pid, 0);
687 event_lwp = add_lwp (event_ptid);
688 event_thr = get_lwp_thread (event_lwp);
689 gdb_assert (current_thread == event_thr);
690 linux_arch_setup_thread (event_thr);
691
692 /* Set the event status. */
693 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
694 event_lwp->waitstatus.value.execd_pathname
695 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
696
697 /* Mark the exec status as pending. */
698 event_lwp->stopped = 1;
699 event_lwp->status_pending_p = 1;
700 event_lwp->status_pending = wstat;
701 event_thr->last_resume_kind = resume_continue;
702 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
703
704 /* Update syscall state in the new lwp, effectively mid-syscall too. */
705 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
706
707 /* Restore the list to catch. Don't rely on the client, which is free
708 to avoid sending a new list when the architecture doesn't change.
709 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
710 proc->syscalls_to_catch = syscalls_to_catch;
711
712 /* Report the event. */
713 *orig_event_lwp = event_lwp;
714 return 0;
715 }
716
717 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
718 }
719
720 /* Return the PC as read from the regcache of LWP, without any
721 adjustment. */
722
723 static CORE_ADDR
724 get_pc (struct lwp_info *lwp)
725 {
726 struct thread_info *saved_thread;
727 struct regcache *regcache;
728 CORE_ADDR pc;
729
730 if (the_low_target.get_pc == NULL)
731 return 0;
732
733 saved_thread = current_thread;
734 current_thread = get_lwp_thread (lwp);
735
736 regcache = get_thread_regcache (current_thread, 1);
737 pc = (*the_low_target.get_pc) (regcache);
738
739 if (debug_threads)
740 debug_printf ("pc is 0x%lx\n", (long) pc);
741
742 current_thread = saved_thread;
743 return pc;
744 }
745
746 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
747 Fill *SYSNO with the syscall nr trapped. */
748
749 static void
750 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
751 {
752 struct thread_info *saved_thread;
753 struct regcache *regcache;
754
755 if (the_low_target.get_syscall_trapinfo == NULL)
756 {
757 /* If we cannot get the syscall trapinfo, report an unknown
758 system call number. */
759 *sysno = UNKNOWN_SYSCALL;
760 return;
761 }
762
763 saved_thread = current_thread;
764 current_thread = get_lwp_thread (lwp);
765
766 regcache = get_thread_regcache (current_thread, 1);
767 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
768
769 if (debug_threads)
770 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
771
772 current_thread = saved_thread;
773 }
774
775 static int check_stopped_by_watchpoint (struct lwp_info *child);
776
777 /* Called when the LWP stopped for a signal/trap. If it stopped for a
778 trap check what caused it (breakpoint, watchpoint, trace, etc.),
779 and save the result in the LWP's stop_reason field. If it stopped
780 for a breakpoint, decrement the PC if necessary on the lwp's
781 architecture. Returns true if we now have the LWP's stop PC. */
782
783 static int
784 save_stop_reason (struct lwp_info *lwp)
785 {
786 CORE_ADDR pc;
787 CORE_ADDR sw_breakpoint_pc;
788 struct thread_info *saved_thread;
789 #if USE_SIGTRAP_SIGINFO
790 siginfo_t siginfo;
791 #endif
792
793 if (the_low_target.get_pc == NULL)
794 return 0;
795
796 pc = get_pc (lwp);
797 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
798
799 /* breakpoint_at reads from the current thread. */
800 saved_thread = current_thread;
801 current_thread = get_lwp_thread (lwp);
802
803 #if USE_SIGTRAP_SIGINFO
804 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
805 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
806 {
807 if (siginfo.si_signo == SIGTRAP)
808 {
809 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
810 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
811 {
812 /* The si_code is ambiguous on this arch -- check debug
813 registers. */
814 if (!check_stopped_by_watchpoint (lwp))
815 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
816 }
817 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
818 {
819 /* If we determine the LWP stopped for a SW breakpoint,
820 trust it. Particularly don't check watchpoint
821 registers, because at least on s390, we'd find
822 stopped-by-watchpoint as long as there's a watchpoint
823 set. */
824 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
825 }
826 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
827 {
828 /* This can indicate either a hardware breakpoint or
829 hardware watchpoint. Check debug registers. */
830 if (!check_stopped_by_watchpoint (lwp))
831 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
832 }
833 else if (siginfo.si_code == TRAP_TRACE)
834 {
835 /* We may have single stepped an instruction that
836 triggered a watchpoint. In that case, on some
837 architectures (such as x86), instead of TRAP_HWBKPT,
838 si_code indicates TRAP_TRACE, and we need to check
839 the debug registers separately. */
840 if (!check_stopped_by_watchpoint (lwp))
841 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
842 }
843 }
844 }
845 #else
846 /* We may have just stepped a breakpoint instruction. E.g., in
847 non-stop mode, GDB first tells the thread A to step a range, and
848 then the user inserts a breakpoint inside the range. In that
849 case we need to report the breakpoint PC. */
850 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
851 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
852 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
853
854 if (hardware_breakpoint_inserted_here (pc))
855 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
856
857 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
858 check_stopped_by_watchpoint (lwp);
859 #endif
860
861 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
862 {
863 if (debug_threads)
864 {
865 struct thread_info *thr = get_lwp_thread (lwp);
866
867 debug_printf ("CSBB: %s stopped by software breakpoint\n",
868 target_pid_to_str (ptid_of (thr)));
869 }
870
871 /* Back up the PC if necessary. */
872 if (pc != sw_breakpoint_pc)
873 {
874 struct regcache *regcache
875 = get_thread_regcache (current_thread, 1);
876 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
877 }
878
879 /* Update this so we record the correct stop PC below. */
880 pc = sw_breakpoint_pc;
881 }
882 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
883 {
884 if (debug_threads)
885 {
886 struct thread_info *thr = get_lwp_thread (lwp);
887
888 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
889 target_pid_to_str (ptid_of (thr)));
890 }
891 }
892 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
893 {
894 if (debug_threads)
895 {
896 struct thread_info *thr = get_lwp_thread (lwp);
897
898 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
899 target_pid_to_str (ptid_of (thr)));
900 }
901 }
902 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
903 {
904 if (debug_threads)
905 {
906 struct thread_info *thr = get_lwp_thread (lwp);
907
908 debug_printf ("CSBB: %s stopped by trace\n",
909 target_pid_to_str (ptid_of (thr)));
910 }
911 }
912
913 lwp->stop_pc = pc;
914 current_thread = saved_thread;
915 return 1;
916 }
917
918 static struct lwp_info *
919 add_lwp (ptid_t ptid)
920 {
921 struct lwp_info *lwp;
922
923 lwp = XCNEW (struct lwp_info);
924
925 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
926
927 if (the_low_target.new_thread != NULL)
928 the_low_target.new_thread (lwp);
929
930 lwp->thread = add_thread (ptid, lwp);
931
932 return lwp;
933 }
934
935 /* Start an inferior process and returns its pid.
936 ALLARGS is a vector of program-name and args. */
937
938 static int
939 linux_create_inferior (char *program, char **allargs)
940 {
941 struct lwp_info *new_lwp;
942 int pid;
943 ptid_t ptid;
944 struct cleanup *restore_personality
945 = maybe_disable_address_space_randomization (disable_randomization);
946
947 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
948 pid = vfork ();
949 #else
950 pid = fork ();
951 #endif
952 if (pid < 0)
953 perror_with_name ("fork");
954
955 if (pid == 0)
956 {
957 close_most_fds ();
958 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
959
960 setpgid (0, 0);
961
962 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
963 stdout to stderr so that inferior i/o doesn't corrupt the connection.
964 Also, redirect stdin to /dev/null. */
965 if (remote_connection_is_stdio ())
966 {
967 close (0);
968 open ("/dev/null", O_RDONLY);
969 dup2 (2, 1);
970 if (write (2, "stdin/stdout redirected\n",
971 sizeof ("stdin/stdout redirected\n") - 1) < 0)
972 {
973 /* Errors ignored. */;
974 }
975 }
976
977 execv (program, allargs);
978 if (errno == ENOENT)
979 execvp (program, allargs);
980
981 fprintf (stderr, "Cannot exec %s: %s.\n", program,
982 strerror (errno));
983 fflush (stderr);
984 _exit (0177);
985 }
986
987 do_cleanups (restore_personality);
988
989 linux_add_process (pid, 0);
990
991 ptid = ptid_build (pid, pid, 0);
992 new_lwp = add_lwp (ptid);
993 new_lwp->must_set_ptrace_flags = 1;
994
995 return pid;
996 }
997
998 /* Implement the post_create_inferior target_ops method. */
999
1000 static void
1001 linux_post_create_inferior (void)
1002 {
1003 struct lwp_info *lwp = get_thread_lwp (current_thread);
1004
1005 linux_arch_setup ();
1006
1007 if (lwp->must_set_ptrace_flags)
1008 {
1009 struct process_info *proc = current_process ();
1010 int options = linux_low_ptrace_options (proc->attached);
1011
1012 linux_enable_event_reporting (lwpid_of (current_thread), options);
1013 lwp->must_set_ptrace_flags = 0;
1014 }
1015 }
1016
1017 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1018 error. */
1019
1020 int
1021 linux_attach_lwp (ptid_t ptid)
1022 {
1023 struct lwp_info *new_lwp;
1024 int lwpid = ptid_get_lwp (ptid);
1025
1026 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1027 != 0)
1028 return errno;
1029
1030 new_lwp = add_lwp (ptid);
1031
1032 /* We need to wait for SIGSTOP before being able to make the next
1033 ptrace call on this LWP. */
1034 new_lwp->must_set_ptrace_flags = 1;
1035
1036 if (linux_proc_pid_is_stopped (lwpid))
1037 {
1038 if (debug_threads)
1039 debug_printf ("Attached to a stopped process\n");
1040
1041 /* The process is definitely stopped. It is in a job control
1042 stop, unless the kernel predates the TASK_STOPPED /
1043 TASK_TRACED distinction, in which case it might be in a
1044 ptrace stop. Make sure it is in a ptrace stop; from there we
1045 can kill it, signal it, et cetera.
1046
1047 First make sure there is a pending SIGSTOP. Since we are
1048 already attached, the process can not transition from stopped
1049 to running without a PTRACE_CONT; so we know this signal will
1050 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1051 probably already in the queue (unless this kernel is old
1052 enough to use TASK_STOPPED for ptrace stops); but since
1053 SIGSTOP is not an RT signal, it can only be queued once. */
1054 kill_lwp (lwpid, SIGSTOP);
1055
1056 /* Finally, resume the stopped process. This will deliver the
1057 SIGSTOP (or a higher priority signal, just like normal
1058 PTRACE_ATTACH), which we'll catch later on. */
1059 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1060 }
1061
1062 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1063 brings it to a halt.
1064
1065 There are several cases to consider here:
1066
1067 1) gdbserver has already attached to the process and is being notified
1068 of a new thread that is being created.
1069 In this case we should ignore that SIGSTOP and resume the
1070 process. This is handled below by setting stop_expected = 1,
1071 and the fact that add_thread sets last_resume_kind ==
1072 resume_continue.
1073
1074 2) This is the first thread (the process thread), and we're attaching
1075 to it via attach_inferior.
1076 In this case we want the process thread to stop.
1077 This is handled by having linux_attach set last_resume_kind ==
1078 resume_stop after we return.
1079
1080 If the pid we are attaching to is also the tgid, we attach to and
1081 stop all the existing threads. Otherwise, we attach to pid and
1082 ignore any other threads in the same group as this pid.
1083
1084 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1085 existing threads.
1086 In this case we want the thread to stop.
1087 FIXME: This case is currently not properly handled.
1088 We should wait for the SIGSTOP but don't. Things work apparently
1089 because enough time passes between when we ptrace (ATTACH) and when
1090 gdb makes the next ptrace call on the thread.
1091
1092 On the other hand, if we are currently trying to stop all threads, we
1093 should treat the new thread as if we had sent it a SIGSTOP. This works
1094 because we are guaranteed that the add_lwp call above added us to the
1095 end of the list, and so the new thread has not yet reached
1096 wait_for_sigstop (but will). */
1097 new_lwp->stop_expected = 1;
1098
1099 return 0;
1100 }
1101
1102 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1103 already attached. Returns true if a new LWP is found, false
1104 otherwise. */
1105
1106 static int
1107 attach_proc_task_lwp_callback (ptid_t ptid)
1108 {
1109 /* Is this a new thread? */
1110 if (find_thread_ptid (ptid) == NULL)
1111 {
1112 int lwpid = ptid_get_lwp (ptid);
1113 int err;
1114
1115 if (debug_threads)
1116 debug_printf ("Found new lwp %d\n", lwpid);
1117
1118 err = linux_attach_lwp (ptid);
1119
1120 /* Be quiet if we simply raced with the thread exiting. EPERM
1121 is returned if the thread's task still exists, and is marked
1122 as exited or zombie, as well as other conditions, so in that
1123 case, confirm the status in /proc/PID/status. */
1124 if (err == ESRCH
1125 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1126 {
1127 if (debug_threads)
1128 {
1129 debug_printf ("Cannot attach to lwp %d: "
1130 "thread is gone (%d: %s)\n",
1131 lwpid, err, strerror (err));
1132 }
1133 }
1134 else if (err != 0)
1135 {
1136 warning (_("Cannot attach to lwp %d: %s"),
1137 lwpid,
1138 linux_ptrace_attach_fail_reason_string (ptid, err));
1139 }
1140
1141 return 1;
1142 }
1143 return 0;
1144 }
1145
1146 static void async_file_mark (void);
1147
1148 /* Attach to PID. If PID is the tgid, attach to it and all
1149 of its threads. */
1150
1151 static int
1152 linux_attach (unsigned long pid)
1153 {
1154 struct process_info *proc;
1155 struct thread_info *initial_thread;
1156 ptid_t ptid = ptid_build (pid, pid, 0);
1157 int err;
1158
1159 /* Attach to PID. We will check for other threads
1160 soon. */
1161 err = linux_attach_lwp (ptid);
1162 if (err != 0)
1163 error ("Cannot attach to process %ld: %s",
1164 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1165
1166 proc = linux_add_process (pid, 1);
1167
1168 /* Don't ignore the initial SIGSTOP if we just attached to this
1169 process. It will be collected by wait shortly. */
1170 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1171 initial_thread->last_resume_kind = resume_stop;
1172
1173 /* We must attach to every LWP. If /proc is mounted, use that to
1174 find them now. On the one hand, the inferior may be using raw
1175 clone instead of using pthreads. On the other hand, even if it
1176 is using pthreads, GDB may not be connected yet (thread_db needs
1177 to do symbol lookups, through qSymbol). Also, thread_db walks
1178 structures in the inferior's address space to find the list of
1179 threads/LWPs, and those structures may well be corrupted. Note
1180 that once thread_db is loaded, we'll still use it to list threads
1181 and associate pthread info with each LWP. */
1182 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1183
1184 /* GDB will shortly read the xml target description for this
1185 process, to figure out the process' architecture. But the target
1186 description is only filled in when the first process/thread in
1187 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1188 that now, otherwise, if GDB is fast enough, it could read the
1189 target description _before_ that initial stop. */
1190 if (non_stop)
1191 {
1192 struct lwp_info *lwp;
1193 int wstat, lwpid;
1194 ptid_t pid_ptid = pid_to_ptid (pid);
1195
1196 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1197 &wstat, __WALL);
1198 gdb_assert (lwpid > 0);
1199
1200 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1201
1202 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1203 {
1204 lwp->status_pending_p = 1;
1205 lwp->status_pending = wstat;
1206 }
1207
1208 initial_thread->last_resume_kind = resume_continue;
1209
1210 async_file_mark ();
1211
1212 gdb_assert (proc->tdesc != NULL);
1213 }
1214
1215 return 0;
1216 }
1217
1218 struct counter
1219 {
1220 int pid;
1221 int count;
1222 };
1223
1224 static int
1225 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1226 {
1227 struct counter *counter = (struct counter *) args;
1228
1229 if (ptid_get_pid (entry->id) == counter->pid)
1230 {
1231 if (++counter->count > 1)
1232 return 1;
1233 }
1234
1235 return 0;
1236 }
1237
1238 static int
1239 last_thread_of_process_p (int pid)
1240 {
1241 struct counter counter = { pid , 0 };
1242
1243 return (find_inferior (&all_threads,
1244 second_thread_of_pid_p, &counter) == NULL);
1245 }
1246
1247 /* Kill LWP. */
1248
1249 static void
1250 linux_kill_one_lwp (struct lwp_info *lwp)
1251 {
1252 struct thread_info *thr = get_lwp_thread (lwp);
1253 int pid = lwpid_of (thr);
1254
1255 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1256 there is no signal context, and ptrace(PTRACE_KILL) (or
1257 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1258 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1259 alternative is to kill with SIGKILL. We only need one SIGKILL
1260 per process, not one for each thread. But since we still support
1261 support debugging programs using raw clone without CLONE_THREAD,
1262 we send one for each thread. For years, we used PTRACE_KILL
1263 only, so we're being a bit paranoid about some old kernels where
1264 PTRACE_KILL might work better (dubious if there are any such, but
1265 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1266 second, and so we're fine everywhere. */
1267
1268 errno = 0;
1269 kill_lwp (pid, SIGKILL);
1270 if (debug_threads)
1271 {
1272 int save_errno = errno;
1273
1274 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1275 target_pid_to_str (ptid_of (thr)),
1276 save_errno ? strerror (save_errno) : "OK");
1277 }
1278
1279 errno = 0;
1280 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1281 if (debug_threads)
1282 {
1283 int save_errno = errno;
1284
1285 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1286 target_pid_to_str (ptid_of (thr)),
1287 save_errno ? strerror (save_errno) : "OK");
1288 }
1289 }
1290
1291 /* Kill LWP and wait for it to die. */
1292
1293 static void
1294 kill_wait_lwp (struct lwp_info *lwp)
1295 {
1296 struct thread_info *thr = get_lwp_thread (lwp);
1297 int pid = ptid_get_pid (ptid_of (thr));
1298 int lwpid = ptid_get_lwp (ptid_of (thr));
1299 int wstat;
1300 int res;
1301
1302 if (debug_threads)
1303 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1304
1305 do
1306 {
1307 linux_kill_one_lwp (lwp);
1308
1309 /* Make sure it died. Notes:
1310
1311 - The loop is most likely unnecessary.
1312
1313 - We don't use linux_wait_for_event as that could delete lwps
1314 while we're iterating over them. We're not interested in
1315 any pending status at this point, only in making sure all
1316 wait status on the kernel side are collected until the
1317 process is reaped.
1318
1319 - We don't use __WALL here as the __WALL emulation relies on
1320 SIGCHLD, and killing a stopped process doesn't generate
1321 one, nor an exit status.
1322 */
1323 res = my_waitpid (lwpid, &wstat, 0);
1324 if (res == -1 && errno == ECHILD)
1325 res = my_waitpid (lwpid, &wstat, __WCLONE);
1326 } while (res > 0 && WIFSTOPPED (wstat));
1327
1328 /* Even if it was stopped, the child may have already disappeared.
1329 E.g., if it was killed by SIGKILL. */
1330 if (res < 0 && errno != ECHILD)
1331 perror_with_name ("kill_wait_lwp");
1332 }
1333
1334 /* Callback for `find_inferior'. Kills an lwp of a given process,
1335 except the leader. */
1336
1337 static int
1338 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1339 {
1340 struct thread_info *thread = (struct thread_info *) entry;
1341 struct lwp_info *lwp = get_thread_lwp (thread);
1342 int pid = * (int *) args;
1343
1344 if (ptid_get_pid (entry->id) != pid)
1345 return 0;
1346
1347 /* We avoid killing the first thread here, because of a Linux kernel (at
1348 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1349 the children get a chance to be reaped, it will remain a zombie
1350 forever. */
1351
1352 if (lwpid_of (thread) == pid)
1353 {
1354 if (debug_threads)
1355 debug_printf ("lkop: is last of process %s\n",
1356 target_pid_to_str (entry->id));
1357 return 0;
1358 }
1359
1360 kill_wait_lwp (lwp);
1361 return 0;
1362 }
1363
1364 static int
1365 linux_kill (int pid)
1366 {
1367 struct process_info *process;
1368 struct lwp_info *lwp;
1369
1370 process = find_process_pid (pid);
1371 if (process == NULL)
1372 return -1;
1373
1374 /* If we're killing a running inferior, make sure it is stopped
1375 first, as PTRACE_KILL will not work otherwise. */
1376 stop_all_lwps (0, NULL);
1377
1378 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1379
1380 /* See the comment in linux_kill_one_lwp. We did not kill the first
1381 thread in the list, so do so now. */
1382 lwp = find_lwp_pid (pid_to_ptid (pid));
1383
1384 if (lwp == NULL)
1385 {
1386 if (debug_threads)
1387 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1388 pid);
1389 }
1390 else
1391 kill_wait_lwp (lwp);
1392
1393 the_target->mourn (process);
1394
1395 /* Since we presently can only stop all lwps of all processes, we
1396 need to unstop lwps of other processes. */
1397 unstop_all_lwps (0, NULL);
1398 return 0;
1399 }
1400
1401 /* Get pending signal of THREAD, for detaching purposes. This is the
1402 signal the thread last stopped for, which we need to deliver to the
1403 thread when detaching, otherwise, it'd be suppressed/lost. */
1404
1405 static int
1406 get_detach_signal (struct thread_info *thread)
1407 {
1408 enum gdb_signal signo = GDB_SIGNAL_0;
1409 int status;
1410 struct lwp_info *lp = get_thread_lwp (thread);
1411
1412 if (lp->status_pending_p)
1413 status = lp->status_pending;
1414 else
1415 {
1416 /* If the thread had been suspended by gdbserver, and it stopped
1417 cleanly, then it'll have stopped with SIGSTOP. But we don't
1418 want to deliver that SIGSTOP. */
1419 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1420 || thread->last_status.value.sig == GDB_SIGNAL_0)
1421 return 0;
1422
1423 /* Otherwise, we may need to deliver the signal we
1424 intercepted. */
1425 status = lp->last_status;
1426 }
1427
1428 if (!WIFSTOPPED (status))
1429 {
1430 if (debug_threads)
1431 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1432 target_pid_to_str (ptid_of (thread)));
1433 return 0;
1434 }
1435
1436 /* Extended wait statuses aren't real SIGTRAPs. */
1437 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1438 {
1439 if (debug_threads)
1440 debug_printf ("GPS: lwp %s had stopped with extended "
1441 "status: no pending signal\n",
1442 target_pid_to_str (ptid_of (thread)));
1443 return 0;
1444 }
1445
1446 signo = gdb_signal_from_host (WSTOPSIG (status));
1447
1448 if (program_signals_p && !program_signals[signo])
1449 {
1450 if (debug_threads)
1451 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1452 target_pid_to_str (ptid_of (thread)),
1453 gdb_signal_to_string (signo));
1454 return 0;
1455 }
1456 else if (!program_signals_p
1457 /* If we have no way to know which signals GDB does not
1458 want to have passed to the program, assume
1459 SIGTRAP/SIGINT, which is GDB's default. */
1460 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1461 {
1462 if (debug_threads)
1463 debug_printf ("GPS: lwp %s had signal %s, "
1464 "but we don't know if we should pass it. "
1465 "Default to not.\n",
1466 target_pid_to_str (ptid_of (thread)),
1467 gdb_signal_to_string (signo));
1468 return 0;
1469 }
1470 else
1471 {
1472 if (debug_threads)
1473 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1474 target_pid_to_str (ptid_of (thread)),
1475 gdb_signal_to_string (signo));
1476
1477 return WSTOPSIG (status);
1478 }
1479 }
1480
1481 /* Detach from LWP. */
1482
1483 static void
1484 linux_detach_one_lwp (struct lwp_info *lwp)
1485 {
1486 struct thread_info *thread = get_lwp_thread (lwp);
1487 int sig;
1488 int lwpid;
1489
1490 /* If there is a pending SIGSTOP, get rid of it. */
1491 if (lwp->stop_expected)
1492 {
1493 if (debug_threads)
1494 debug_printf ("Sending SIGCONT to %s\n",
1495 target_pid_to_str (ptid_of (thread)));
1496
1497 kill_lwp (lwpid_of (thread), SIGCONT);
1498 lwp->stop_expected = 0;
1499 }
1500
1501 /* Pass on any pending signal for this thread. */
1502 sig = get_detach_signal (thread);
1503
1504 /* Preparing to resume may try to write registers, and fail if the
1505 lwp is zombie. If that happens, ignore the error. We'll handle
1506 it below, when detach fails with ESRCH. */
1507 TRY
1508 {
1509 /* Flush any pending changes to the process's registers. */
1510 regcache_invalidate_thread (thread);
1511
1512 /* Finally, let it resume. */
1513 if (the_low_target.prepare_to_resume != NULL)
1514 the_low_target.prepare_to_resume (lwp);
1515 }
1516 CATCH (ex, RETURN_MASK_ERROR)
1517 {
1518 if (!check_ptrace_stopped_lwp_gone (lwp))
1519 throw_exception (ex);
1520 }
1521 END_CATCH
1522
1523 lwpid = lwpid_of (thread);
1524 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1525 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1526 {
1527 int save_errno = errno;
1528
1529 /* We know the thread exists, so ESRCH must mean the lwp is
1530 zombie. This can happen if one of the already-detached
1531 threads exits the whole thread group. In that case we're
1532 still attached, and must reap the lwp. */
1533 if (save_errno == ESRCH)
1534 {
1535 int ret, status;
1536
1537 ret = my_waitpid (lwpid, &status, __WALL);
1538 if (ret == -1)
1539 {
1540 warning (_("Couldn't reap LWP %d while detaching: %s"),
1541 lwpid, strerror (errno));
1542 }
1543 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1544 {
1545 warning (_("Reaping LWP %d while detaching "
1546 "returned unexpected status 0x%x"),
1547 lwpid, status);
1548 }
1549 }
1550 else
1551 {
1552 error (_("Can't detach %s: %s"),
1553 target_pid_to_str (ptid_of (thread)),
1554 strerror (save_errno));
1555 }
1556 }
1557 else if (debug_threads)
1558 {
1559 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1560 target_pid_to_str (ptid_of (thread)),
1561 strsignal (sig));
1562 }
1563
1564 delete_lwp (lwp);
1565 }
1566
1567 /* Callback for find_inferior. Detaches from non-leader threads of a
1568 given process. */
1569
1570 static int
1571 linux_detach_lwp_callback (struct inferior_list_entry *entry, void *args)
1572 {
1573 struct thread_info *thread = (struct thread_info *) entry;
1574 struct lwp_info *lwp = get_thread_lwp (thread);
1575 int pid = *(int *) args;
1576 int lwpid = lwpid_of (thread);
1577
1578 /* Skip other processes. */
1579 if (ptid_get_pid (entry->id) != pid)
1580 return 0;
1581
1582 /* We don't actually detach from the thread group leader just yet.
1583 If the thread group exits, we must reap the zombie clone lwps
1584 before we're able to reap the leader. */
1585 if (ptid_get_pid (entry->id) == lwpid)
1586 return 0;
1587
1588 linux_detach_one_lwp (lwp);
1589 return 0;
1590 }
1591
1592 static int
1593 linux_detach (int pid)
1594 {
1595 struct process_info *process;
1596 struct lwp_info *main_lwp;
1597
1598 process = find_process_pid (pid);
1599 if (process == NULL)
1600 return -1;
1601
1602 /* As there's a step over already in progress, let it finish first,
1603 otherwise nesting a stabilize_threads operation on top gets real
1604 messy. */
1605 complete_ongoing_step_over ();
1606
1607 /* Stop all threads before detaching. First, ptrace requires that
1608 the thread is stopped to sucessfully detach. Second, thread_db
1609 may need to uninstall thread event breakpoints from memory, which
1610 only works with a stopped process anyway. */
1611 stop_all_lwps (0, NULL);
1612
1613 #ifdef USE_THREAD_DB
1614 thread_db_detach (process);
1615 #endif
1616
1617 /* Stabilize threads (move out of jump pads). */
1618 stabilize_threads ();
1619
1620 /* Detach from the clone lwps first. If the thread group exits just
1621 while we're detaching, we must reap the clone lwps before we're
1622 able to reap the leader. */
1623 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1624
1625 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1626 linux_detach_one_lwp (main_lwp);
1627
1628 the_target->mourn (process);
1629
1630 /* Since we presently can only stop all lwps of all processes, we
1631 need to unstop lwps of other processes. */
1632 unstop_all_lwps (0, NULL);
1633 return 0;
1634 }
1635
1636 /* Remove all LWPs that belong to process PROC from the lwp list. */
1637
1638 static int
1639 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1640 {
1641 struct thread_info *thread = (struct thread_info *) entry;
1642 struct lwp_info *lwp = get_thread_lwp (thread);
1643 struct process_info *process = (struct process_info *) proc;
1644
1645 if (pid_of (thread) == pid_of (process))
1646 delete_lwp (lwp);
1647
1648 return 0;
1649 }
1650
1651 static void
1652 linux_mourn (struct process_info *process)
1653 {
1654 struct process_info_private *priv;
1655
1656 #ifdef USE_THREAD_DB
1657 thread_db_mourn (process);
1658 #endif
1659
1660 find_inferior (&all_threads, delete_lwp_callback, process);
1661
1662 /* Freeing all private data. */
1663 priv = process->priv;
1664 free (priv->arch_private);
1665 free (priv);
1666 process->priv = NULL;
1667
1668 remove_process (process);
1669 }
1670
1671 static void
1672 linux_join (int pid)
1673 {
1674 int status, ret;
1675
1676 do {
1677 ret = my_waitpid (pid, &status, 0);
1678 if (WIFEXITED (status) || WIFSIGNALED (status))
1679 break;
1680 } while (ret != -1 || errno != ECHILD);
1681 }
1682
1683 /* Return nonzero if the given thread is still alive. */
1684 static int
1685 linux_thread_alive (ptid_t ptid)
1686 {
1687 struct lwp_info *lwp = find_lwp_pid (ptid);
1688
1689 /* We assume we always know if a thread exits. If a whole process
1690 exited but we still haven't been able to report it to GDB, we'll
1691 hold on to the last lwp of the dead process. */
1692 if (lwp != NULL)
1693 return !lwp_is_marked_dead (lwp);
1694 else
1695 return 0;
1696 }
1697
1698 /* Return 1 if this lwp still has an interesting status pending. If
1699 not (e.g., it had stopped for a breakpoint that is gone), return
1700 false. */
1701
1702 static int
1703 thread_still_has_status_pending_p (struct thread_info *thread)
1704 {
1705 struct lwp_info *lp = get_thread_lwp (thread);
1706
1707 if (!lp->status_pending_p)
1708 return 0;
1709
1710 if (thread->last_resume_kind != resume_stop
1711 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1712 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1713 {
1714 struct thread_info *saved_thread;
1715 CORE_ADDR pc;
1716 int discard = 0;
1717
1718 gdb_assert (lp->last_status != 0);
1719
1720 pc = get_pc (lp);
1721
1722 saved_thread = current_thread;
1723 current_thread = thread;
1724
1725 if (pc != lp->stop_pc)
1726 {
1727 if (debug_threads)
1728 debug_printf ("PC of %ld changed\n",
1729 lwpid_of (thread));
1730 discard = 1;
1731 }
1732
1733 #if !USE_SIGTRAP_SIGINFO
1734 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1735 && !(*the_low_target.breakpoint_at) (pc))
1736 {
1737 if (debug_threads)
1738 debug_printf ("previous SW breakpoint of %ld gone\n",
1739 lwpid_of (thread));
1740 discard = 1;
1741 }
1742 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1743 && !hardware_breakpoint_inserted_here (pc))
1744 {
1745 if (debug_threads)
1746 debug_printf ("previous HW breakpoint of %ld gone\n",
1747 lwpid_of (thread));
1748 discard = 1;
1749 }
1750 #endif
1751
1752 current_thread = saved_thread;
1753
1754 if (discard)
1755 {
1756 if (debug_threads)
1757 debug_printf ("discarding pending breakpoint status\n");
1758 lp->status_pending_p = 0;
1759 return 0;
1760 }
1761 }
1762
1763 return 1;
1764 }
1765
1766 /* Returns true if LWP is resumed from the client's perspective. */
1767
1768 static int
1769 lwp_resumed (struct lwp_info *lwp)
1770 {
1771 struct thread_info *thread = get_lwp_thread (lwp);
1772
1773 if (thread->last_resume_kind != resume_stop)
1774 return 1;
1775
1776 /* Did gdb send us a `vCont;t', but we haven't reported the
1777 corresponding stop to gdb yet? If so, the thread is still
1778 resumed/running from gdb's perspective. */
1779 if (thread->last_resume_kind == resume_stop
1780 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1781 return 1;
1782
1783 return 0;
1784 }
1785
1786 /* Return 1 if this lwp has an interesting status pending. */
1787 static int
1788 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1789 {
1790 struct thread_info *thread = (struct thread_info *) entry;
1791 struct lwp_info *lp = get_thread_lwp (thread);
1792 ptid_t ptid = * (ptid_t *) arg;
1793
1794 /* Check if we're only interested in events from a specific process
1795 or a specific LWP. */
1796 if (!ptid_match (ptid_of (thread), ptid))
1797 return 0;
1798
1799 if (!lwp_resumed (lp))
1800 return 0;
1801
1802 if (lp->status_pending_p
1803 && !thread_still_has_status_pending_p (thread))
1804 {
1805 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1806 return 0;
1807 }
1808
1809 return lp->status_pending_p;
1810 }
1811
1812 static int
1813 same_lwp (struct inferior_list_entry *entry, void *data)
1814 {
1815 ptid_t ptid = *(ptid_t *) data;
1816 int lwp;
1817
1818 if (ptid_get_lwp (ptid) != 0)
1819 lwp = ptid_get_lwp (ptid);
1820 else
1821 lwp = ptid_get_pid (ptid);
1822
1823 if (ptid_get_lwp (entry->id) == lwp)
1824 return 1;
1825
1826 return 0;
1827 }
1828
1829 struct lwp_info *
1830 find_lwp_pid (ptid_t ptid)
1831 {
1832 struct inferior_list_entry *thread
1833 = find_inferior (&all_threads, same_lwp, &ptid);
1834
1835 if (thread == NULL)
1836 return NULL;
1837
1838 return get_thread_lwp ((struct thread_info *) thread);
1839 }
1840
1841 /* Return the number of known LWPs in the tgid given by PID. */
1842
1843 static int
1844 num_lwps (int pid)
1845 {
1846 struct inferior_list_entry *inf, *tmp;
1847 int count = 0;
1848
1849 ALL_INFERIORS (&all_threads, inf, tmp)
1850 {
1851 if (ptid_get_pid (inf->id) == pid)
1852 count++;
1853 }
1854
1855 return count;
1856 }
1857
1858 /* The arguments passed to iterate_over_lwps. */
1859
1860 struct iterate_over_lwps_args
1861 {
1862 /* The FILTER argument passed to iterate_over_lwps. */
1863 ptid_t filter;
1864
1865 /* The CALLBACK argument passed to iterate_over_lwps. */
1866 iterate_over_lwps_ftype *callback;
1867
1868 /* The DATA argument passed to iterate_over_lwps. */
1869 void *data;
1870 };
1871
1872 /* Callback for find_inferior used by iterate_over_lwps to filter
1873 calls to the callback supplied to that function. Returning a
1874 nonzero value causes find_inferiors to stop iterating and return
1875 the current inferior_list_entry. Returning zero indicates that
1876 find_inferiors should continue iterating. */
1877
1878 static int
1879 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1880 {
1881 struct iterate_over_lwps_args *args
1882 = (struct iterate_over_lwps_args *) args_p;
1883
1884 if (ptid_match (entry->id, args->filter))
1885 {
1886 struct thread_info *thr = (struct thread_info *) entry;
1887 struct lwp_info *lwp = get_thread_lwp (thr);
1888
1889 return (*args->callback) (lwp, args->data);
1890 }
1891
1892 return 0;
1893 }
1894
1895 /* See nat/linux-nat.h. */
1896
1897 struct lwp_info *
1898 iterate_over_lwps (ptid_t filter,
1899 iterate_over_lwps_ftype callback,
1900 void *data)
1901 {
1902 struct iterate_over_lwps_args args = {filter, callback, data};
1903 struct inferior_list_entry *entry;
1904
1905 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1906 if (entry == NULL)
1907 return NULL;
1908
1909 return get_thread_lwp ((struct thread_info *) entry);
1910 }
1911
1912 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1913 their exits until all other threads in the group have exited. */
1914
1915 static void
1916 check_zombie_leaders (void)
1917 {
1918 struct process_info *proc, *tmp;
1919
1920 ALL_PROCESSES (proc, tmp)
1921 {
1922 pid_t leader_pid = pid_of (proc);
1923 struct lwp_info *leader_lp;
1924
1925 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1926
1927 if (debug_threads)
1928 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1929 "num_lwps=%d, zombie=%d\n",
1930 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1931 linux_proc_pid_is_zombie (leader_pid));
1932
1933 if (leader_lp != NULL && !leader_lp->stopped
1934 /* Check if there are other threads in the group, as we may
1935 have raced with the inferior simply exiting. */
1936 && !last_thread_of_process_p (leader_pid)
1937 && linux_proc_pid_is_zombie (leader_pid))
1938 {
1939 /* A leader zombie can mean one of two things:
1940
1941 - It exited, and there's an exit status pending
1942 available, or only the leader exited (not the whole
1943 program). In the latter case, we can't waitpid the
1944 leader's exit status until all other threads are gone.
1945
1946 - There are 3 or more threads in the group, and a thread
1947 other than the leader exec'd. On an exec, the Linux
1948 kernel destroys all other threads (except the execing
1949 one) in the thread group, and resets the execing thread's
1950 tid to the tgid. No exit notification is sent for the
1951 execing thread -- from the ptracer's perspective, it
1952 appears as though the execing thread just vanishes.
1953 Until we reap all other threads except the leader and the
1954 execing thread, the leader will be zombie, and the
1955 execing thread will be in `D (disc sleep)'. As soon as
1956 all other threads are reaped, the execing thread changes
1957 it's tid to the tgid, and the previous (zombie) leader
1958 vanishes, giving place to the "new" leader. We could try
1959 distinguishing the exit and exec cases, by waiting once
1960 more, and seeing if something comes out, but it doesn't
1961 sound useful. The previous leader _does_ go away, and
1962 we'll re-add the new one once we see the exec event
1963 (which is just the same as what would happen if the
1964 previous leader did exit voluntarily before some other
1965 thread execs). */
1966
1967 if (debug_threads)
1968 fprintf (stderr,
1969 "CZL: Thread group leader %d zombie "
1970 "(it exited, or another thread execd).\n",
1971 leader_pid);
1972
1973 delete_lwp (leader_lp);
1974 }
1975 }
1976 }
1977
1978 /* Callback for `find_inferior'. Returns the first LWP that is not
1979 stopped. ARG is a PTID filter. */
1980
1981 static int
1982 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1983 {
1984 struct thread_info *thr = (struct thread_info *) entry;
1985 struct lwp_info *lwp;
1986 ptid_t filter = *(ptid_t *) arg;
1987
1988 if (!ptid_match (ptid_of (thr), filter))
1989 return 0;
1990
1991 lwp = get_thread_lwp (thr);
1992 if (!lwp->stopped)
1993 return 1;
1994
1995 return 0;
1996 }
1997
1998 /* Increment LWP's suspend count. */
1999
2000 static void
2001 lwp_suspended_inc (struct lwp_info *lwp)
2002 {
2003 lwp->suspended++;
2004
2005 if (debug_threads && lwp->suspended > 4)
2006 {
2007 struct thread_info *thread = get_lwp_thread (lwp);
2008
2009 debug_printf ("LWP %ld has a suspiciously high suspend count,"
2010 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
2011 }
2012 }
2013
2014 /* Decrement LWP's suspend count. */
2015
2016 static void
2017 lwp_suspended_decr (struct lwp_info *lwp)
2018 {
2019 lwp->suspended--;
2020
2021 if (lwp->suspended < 0)
2022 {
2023 struct thread_info *thread = get_lwp_thread (lwp);
2024
2025 internal_error (__FILE__, __LINE__,
2026 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2027 lwp->suspended);
2028 }
2029 }
2030
2031 /* This function should only be called if the LWP got a SIGTRAP.
2032
2033 Handle any tracepoint steps or hits. Return true if a tracepoint
2034 event was handled, 0 otherwise. */
2035
2036 static int
2037 handle_tracepoints (struct lwp_info *lwp)
2038 {
2039 struct thread_info *tinfo = get_lwp_thread (lwp);
2040 int tpoint_related_event = 0;
2041
2042 gdb_assert (lwp->suspended == 0);
2043
2044 /* If this tracepoint hit causes a tracing stop, we'll immediately
2045 uninsert tracepoints. To do this, we temporarily pause all
2046 threads, unpatch away, and then unpause threads. We need to make
2047 sure the unpausing doesn't resume LWP too. */
2048 lwp_suspended_inc (lwp);
2049
2050 /* And we need to be sure that any all-threads-stopping doesn't try
2051 to move threads out of the jump pads, as it could deadlock the
2052 inferior (LWP could be in the jump pad, maybe even holding the
2053 lock.) */
2054
2055 /* Do any necessary step collect actions. */
2056 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2057
2058 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2059
2060 /* See if we just hit a tracepoint and do its main collect
2061 actions. */
2062 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2063
2064 lwp_suspended_decr (lwp);
2065
2066 gdb_assert (lwp->suspended == 0);
2067 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
2068
2069 if (tpoint_related_event)
2070 {
2071 if (debug_threads)
2072 debug_printf ("got a tracepoint event\n");
2073 return 1;
2074 }
2075
2076 return 0;
2077 }
2078
2079 /* Convenience wrapper. Returns true if LWP is presently collecting a
2080 fast tracepoint. */
2081
2082 static int
2083 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2084 struct fast_tpoint_collect_status *status)
2085 {
2086 CORE_ADDR thread_area;
2087 struct thread_info *thread = get_lwp_thread (lwp);
2088
2089 if (the_low_target.get_thread_area == NULL)
2090 return 0;
2091
2092 /* Get the thread area address. This is used to recognize which
2093 thread is which when tracing with the in-process agent library.
2094 We don't read anything from the address, and treat it as opaque;
2095 it's the address itself that we assume is unique per-thread. */
2096 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2097 return 0;
2098
2099 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2100 }
2101
2102 /* The reason we resume in the caller, is because we want to be able
2103 to pass lwp->status_pending as WSTAT, and we need to clear
2104 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2105 refuses to resume. */
2106
2107 static int
2108 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2109 {
2110 struct thread_info *saved_thread;
2111
2112 saved_thread = current_thread;
2113 current_thread = get_lwp_thread (lwp);
2114
2115 if ((wstat == NULL
2116 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2117 && supports_fast_tracepoints ()
2118 && agent_loaded_p ())
2119 {
2120 struct fast_tpoint_collect_status status;
2121 int r;
2122
2123 if (debug_threads)
2124 debug_printf ("Checking whether LWP %ld needs to move out of the "
2125 "jump pad.\n",
2126 lwpid_of (current_thread));
2127
2128 r = linux_fast_tracepoint_collecting (lwp, &status);
2129
2130 if (wstat == NULL
2131 || (WSTOPSIG (*wstat) != SIGILL
2132 && WSTOPSIG (*wstat) != SIGFPE
2133 && WSTOPSIG (*wstat) != SIGSEGV
2134 && WSTOPSIG (*wstat) != SIGBUS))
2135 {
2136 lwp->collecting_fast_tracepoint = r;
2137
2138 if (r != 0)
2139 {
2140 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2141 {
2142 /* Haven't executed the original instruction yet.
2143 Set breakpoint there, and wait till it's hit,
2144 then single-step until exiting the jump pad. */
2145 lwp->exit_jump_pad_bkpt
2146 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2147 }
2148
2149 if (debug_threads)
2150 debug_printf ("Checking whether LWP %ld needs to move out of "
2151 "the jump pad...it does\n",
2152 lwpid_of (current_thread));
2153 current_thread = saved_thread;
2154
2155 return 1;
2156 }
2157 }
2158 else
2159 {
2160 /* If we get a synchronous signal while collecting, *and*
2161 while executing the (relocated) original instruction,
2162 reset the PC to point at the tpoint address, before
2163 reporting to GDB. Otherwise, it's an IPA lib bug: just
2164 report the signal to GDB, and pray for the best. */
2165
2166 lwp->collecting_fast_tracepoint = 0;
2167
2168 if (r != 0
2169 && (status.adjusted_insn_addr <= lwp->stop_pc
2170 && lwp->stop_pc < status.adjusted_insn_addr_end))
2171 {
2172 siginfo_t info;
2173 struct regcache *regcache;
2174
2175 /* The si_addr on a few signals references the address
2176 of the faulting instruction. Adjust that as
2177 well. */
2178 if ((WSTOPSIG (*wstat) == SIGILL
2179 || WSTOPSIG (*wstat) == SIGFPE
2180 || WSTOPSIG (*wstat) == SIGBUS
2181 || WSTOPSIG (*wstat) == SIGSEGV)
2182 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2183 (PTRACE_TYPE_ARG3) 0, &info) == 0
2184 /* Final check just to make sure we don't clobber
2185 the siginfo of non-kernel-sent signals. */
2186 && (uintptr_t) info.si_addr == lwp->stop_pc)
2187 {
2188 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2189 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2190 (PTRACE_TYPE_ARG3) 0, &info);
2191 }
2192
2193 regcache = get_thread_regcache (current_thread, 1);
2194 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2195 lwp->stop_pc = status.tpoint_addr;
2196
2197 /* Cancel any fast tracepoint lock this thread was
2198 holding. */
2199 force_unlock_trace_buffer ();
2200 }
2201
2202 if (lwp->exit_jump_pad_bkpt != NULL)
2203 {
2204 if (debug_threads)
2205 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2206 "stopping all threads momentarily.\n");
2207
2208 stop_all_lwps (1, lwp);
2209
2210 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2211 lwp->exit_jump_pad_bkpt = NULL;
2212
2213 unstop_all_lwps (1, lwp);
2214
2215 gdb_assert (lwp->suspended >= 0);
2216 }
2217 }
2218 }
2219
2220 if (debug_threads)
2221 debug_printf ("Checking whether LWP %ld needs to move out of the "
2222 "jump pad...no\n",
2223 lwpid_of (current_thread));
2224
2225 current_thread = saved_thread;
2226 return 0;
2227 }
2228
2229 /* Enqueue one signal in the "signals to report later when out of the
2230 jump pad" list. */
2231
2232 static void
2233 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2234 {
2235 struct pending_signals *p_sig;
2236 struct thread_info *thread = get_lwp_thread (lwp);
2237
2238 if (debug_threads)
2239 debug_printf ("Deferring signal %d for LWP %ld.\n",
2240 WSTOPSIG (*wstat), lwpid_of (thread));
2241
2242 if (debug_threads)
2243 {
2244 struct pending_signals *sig;
2245
2246 for (sig = lwp->pending_signals_to_report;
2247 sig != NULL;
2248 sig = sig->prev)
2249 debug_printf (" Already queued %d\n",
2250 sig->signal);
2251
2252 debug_printf (" (no more currently queued signals)\n");
2253 }
2254
2255 /* Don't enqueue non-RT signals if they are already in the deferred
2256 queue. (SIGSTOP being the easiest signal to see ending up here
2257 twice) */
2258 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2259 {
2260 struct pending_signals *sig;
2261
2262 for (sig = lwp->pending_signals_to_report;
2263 sig != NULL;
2264 sig = sig->prev)
2265 {
2266 if (sig->signal == WSTOPSIG (*wstat))
2267 {
2268 if (debug_threads)
2269 debug_printf ("Not requeuing already queued non-RT signal %d"
2270 " for LWP %ld\n",
2271 sig->signal,
2272 lwpid_of (thread));
2273 return;
2274 }
2275 }
2276 }
2277
2278 p_sig = XCNEW (struct pending_signals);
2279 p_sig->prev = lwp->pending_signals_to_report;
2280 p_sig->signal = WSTOPSIG (*wstat);
2281
2282 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2283 &p_sig->info);
2284
2285 lwp->pending_signals_to_report = p_sig;
2286 }
2287
2288 /* Dequeue one signal from the "signals to report later when out of
2289 the jump pad" list. */
2290
2291 static int
2292 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2293 {
2294 struct thread_info *thread = get_lwp_thread (lwp);
2295
2296 if (lwp->pending_signals_to_report != NULL)
2297 {
2298 struct pending_signals **p_sig;
2299
2300 p_sig = &lwp->pending_signals_to_report;
2301 while ((*p_sig)->prev != NULL)
2302 p_sig = &(*p_sig)->prev;
2303
2304 *wstat = W_STOPCODE ((*p_sig)->signal);
2305 if ((*p_sig)->info.si_signo != 0)
2306 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2307 &(*p_sig)->info);
2308 free (*p_sig);
2309 *p_sig = NULL;
2310
2311 if (debug_threads)
2312 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2313 WSTOPSIG (*wstat), lwpid_of (thread));
2314
2315 if (debug_threads)
2316 {
2317 struct pending_signals *sig;
2318
2319 for (sig = lwp->pending_signals_to_report;
2320 sig != NULL;
2321 sig = sig->prev)
2322 debug_printf (" Still queued %d\n",
2323 sig->signal);
2324
2325 debug_printf (" (no more queued signals)\n");
2326 }
2327
2328 return 1;
2329 }
2330
2331 return 0;
2332 }
2333
2334 /* Fetch the possibly triggered data watchpoint info and store it in
2335 CHILD.
2336
2337 On some archs, like x86, that use debug registers to set
2338 watchpoints, it's possible that the way to know which watched
2339 address trapped, is to check the register that is used to select
2340 which address to watch. Problem is, between setting the watchpoint
2341 and reading back which data address trapped, the user may change
2342 the set of watchpoints, and, as a consequence, GDB changes the
2343 debug registers in the inferior. To avoid reading back a stale
2344 stopped-data-address when that happens, we cache in LP the fact
2345 that a watchpoint trapped, and the corresponding data address, as
2346 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2347 registers meanwhile, we have the cached data we can rely on. */
2348
2349 static int
2350 check_stopped_by_watchpoint (struct lwp_info *child)
2351 {
2352 if (the_low_target.stopped_by_watchpoint != NULL)
2353 {
2354 struct thread_info *saved_thread;
2355
2356 saved_thread = current_thread;
2357 current_thread = get_lwp_thread (child);
2358
2359 if (the_low_target.stopped_by_watchpoint ())
2360 {
2361 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2362
2363 if (the_low_target.stopped_data_address != NULL)
2364 child->stopped_data_address
2365 = the_low_target.stopped_data_address ();
2366 else
2367 child->stopped_data_address = 0;
2368 }
2369
2370 current_thread = saved_thread;
2371 }
2372
2373 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2374 }
2375
2376 /* Return the ptrace options that we want to try to enable. */
2377
2378 static int
2379 linux_low_ptrace_options (int attached)
2380 {
2381 int options = 0;
2382
2383 if (!attached)
2384 options |= PTRACE_O_EXITKILL;
2385
2386 if (report_fork_events)
2387 options |= PTRACE_O_TRACEFORK;
2388
2389 if (report_vfork_events)
2390 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2391
2392 if (report_exec_events)
2393 options |= PTRACE_O_TRACEEXEC;
2394
2395 options |= PTRACE_O_TRACESYSGOOD;
2396
2397 return options;
2398 }
2399
2400 /* Do low-level handling of the event, and check if we should go on
2401 and pass it to caller code. Return the affected lwp if we are, or
2402 NULL otherwise. */
2403
2404 static struct lwp_info *
2405 linux_low_filter_event (int lwpid, int wstat)
2406 {
2407 struct lwp_info *child;
2408 struct thread_info *thread;
2409 int have_stop_pc = 0;
2410
2411 child = find_lwp_pid (pid_to_ptid (lwpid));
2412
2413 /* Check for stop events reported by a process we didn't already
2414 know about - anything not already in our LWP list.
2415
2416 If we're expecting to receive stopped processes after
2417 fork, vfork, and clone events, then we'll just add the
2418 new one to our list and go back to waiting for the event
2419 to be reported - the stopped process might be returned
2420 from waitpid before or after the event is.
2421
2422 But note the case of a non-leader thread exec'ing after the
2423 leader having exited, and gone from our lists (because
2424 check_zombie_leaders deleted it). The non-leader thread
2425 changes its tid to the tgid. */
2426
2427 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2428 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2429 {
2430 ptid_t child_ptid;
2431
2432 /* A multi-thread exec after we had seen the leader exiting. */
2433 if (debug_threads)
2434 {
2435 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2436 "after exec.\n", lwpid);
2437 }
2438
2439 child_ptid = ptid_build (lwpid, lwpid, 0);
2440 child = add_lwp (child_ptid);
2441 child->stopped = 1;
2442 current_thread = child->thread;
2443 }
2444
2445 /* If we didn't find a process, one of two things presumably happened:
2446 - A process we started and then detached from has exited. Ignore it.
2447 - A process we are controlling has forked and the new child's stop
2448 was reported to us by the kernel. Save its PID. */
2449 if (child == NULL && WIFSTOPPED (wstat))
2450 {
2451 add_to_pid_list (&stopped_pids, lwpid, wstat);
2452 return NULL;
2453 }
2454 else if (child == NULL)
2455 return NULL;
2456
2457 thread = get_lwp_thread (child);
2458
2459 child->stopped = 1;
2460
2461 child->last_status = wstat;
2462
2463 /* Check if the thread has exited. */
2464 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2465 {
2466 if (debug_threads)
2467 debug_printf ("LLFE: %d exited.\n", lwpid);
2468
2469 if (finish_step_over (child))
2470 {
2471 /* Unsuspend all other LWPs, and set them back running again. */
2472 unsuspend_all_lwps (child);
2473 }
2474
2475 /* If there is at least one more LWP, then the exit signal was
2476 not the end of the debugged application and should be
2477 ignored, unless GDB wants to hear about thread exits. */
2478 if (report_thread_events
2479 || last_thread_of_process_p (pid_of (thread)))
2480 {
2481 /* Since events are serialized to GDB core, and we can't
2482 report this one right now. Leave the status pending for
2483 the next time we're able to report it. */
2484 mark_lwp_dead (child, wstat);
2485 return child;
2486 }
2487 else
2488 {
2489 delete_lwp (child);
2490 return NULL;
2491 }
2492 }
2493
2494 gdb_assert (WIFSTOPPED (wstat));
2495
2496 if (WIFSTOPPED (wstat))
2497 {
2498 struct process_info *proc;
2499
2500 /* Architecture-specific setup after inferior is running. */
2501 proc = find_process_pid (pid_of (thread));
2502 if (proc->tdesc == NULL)
2503 {
2504 if (proc->attached)
2505 {
2506 /* This needs to happen after we have attached to the
2507 inferior and it is stopped for the first time, but
2508 before we access any inferior registers. */
2509 linux_arch_setup_thread (thread);
2510 }
2511 else
2512 {
2513 /* The process is started, but GDBserver will do
2514 architecture-specific setup after the program stops at
2515 the first instruction. */
2516 child->status_pending_p = 1;
2517 child->status_pending = wstat;
2518 return child;
2519 }
2520 }
2521 }
2522
2523 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2524 {
2525 struct process_info *proc = find_process_pid (pid_of (thread));
2526 int options = linux_low_ptrace_options (proc->attached);
2527
2528 linux_enable_event_reporting (lwpid, options);
2529 child->must_set_ptrace_flags = 0;
2530 }
2531
2532 /* Always update syscall_state, even if it will be filtered later. */
2533 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2534 {
2535 child->syscall_state
2536 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2537 ? TARGET_WAITKIND_SYSCALL_RETURN
2538 : TARGET_WAITKIND_SYSCALL_ENTRY);
2539 }
2540 else
2541 {
2542 /* Almost all other ptrace-stops are known to be outside of system
2543 calls, with further exceptions in handle_extended_wait. */
2544 child->syscall_state = TARGET_WAITKIND_IGNORE;
2545 }
2546
2547 /* Be careful to not overwrite stop_pc until save_stop_reason is
2548 called. */
2549 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2550 && linux_is_extended_waitstatus (wstat))
2551 {
2552 child->stop_pc = get_pc (child);
2553 if (handle_extended_wait (&child, wstat))
2554 {
2555 /* The event has been handled, so just return without
2556 reporting it. */
2557 return NULL;
2558 }
2559 }
2560
2561 if (linux_wstatus_maybe_breakpoint (wstat))
2562 {
2563 if (save_stop_reason (child))
2564 have_stop_pc = 1;
2565 }
2566
2567 if (!have_stop_pc)
2568 child->stop_pc = get_pc (child);
2569
2570 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2571 && child->stop_expected)
2572 {
2573 if (debug_threads)
2574 debug_printf ("Expected stop.\n");
2575 child->stop_expected = 0;
2576
2577 if (thread->last_resume_kind == resume_stop)
2578 {
2579 /* We want to report the stop to the core. Treat the
2580 SIGSTOP as a normal event. */
2581 if (debug_threads)
2582 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2583 target_pid_to_str (ptid_of (thread)));
2584 }
2585 else if (stopping_threads != NOT_STOPPING_THREADS)
2586 {
2587 /* Stopping threads. We don't want this SIGSTOP to end up
2588 pending. */
2589 if (debug_threads)
2590 debug_printf ("LLW: SIGSTOP caught for %s "
2591 "while stopping threads.\n",
2592 target_pid_to_str (ptid_of (thread)));
2593 return NULL;
2594 }
2595 else
2596 {
2597 /* This is a delayed SIGSTOP. Filter out the event. */
2598 if (debug_threads)
2599 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2600 child->stepping ? "step" : "continue",
2601 target_pid_to_str (ptid_of (thread)));
2602
2603 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2604 return NULL;
2605 }
2606 }
2607
2608 child->status_pending_p = 1;
2609 child->status_pending = wstat;
2610 return child;
2611 }
2612
2613 /* Return true if THREAD is doing hardware single step. */
2614
2615 static int
2616 maybe_hw_step (struct thread_info *thread)
2617 {
2618 if (can_hardware_single_step ())
2619 return 1;
2620 else
2621 {
2622 /* GDBserver must insert reinsert breakpoint for software
2623 single step. */
2624 gdb_assert (has_reinsert_breakpoints (thread));
2625 return 0;
2626 }
2627 }
2628
2629 /* Resume LWPs that are currently stopped without any pending status
2630 to report, but are resumed from the core's perspective. */
2631
2632 static void
2633 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2634 {
2635 struct thread_info *thread = (struct thread_info *) entry;
2636 struct lwp_info *lp = get_thread_lwp (thread);
2637
2638 if (lp->stopped
2639 && !lp->suspended
2640 && !lp->status_pending_p
2641 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2642 {
2643 int step = thread->last_resume_kind == resume_step;
2644
2645 if (debug_threads)
2646 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2647 target_pid_to_str (ptid_of (thread)),
2648 paddress (lp->stop_pc),
2649 step);
2650
2651 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2652 }
2653 }
2654
2655 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2656 match FILTER_PTID (leaving others pending). The PTIDs can be:
2657 minus_one_ptid, to specify any child; a pid PTID, specifying all
2658 lwps of a thread group; or a PTID representing a single lwp. Store
2659 the stop status through the status pointer WSTAT. OPTIONS is
2660 passed to the waitpid call. Return 0 if no event was found and
2661 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2662 was found. Return the PID of the stopped child otherwise. */
2663
2664 static int
2665 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2666 int *wstatp, int options)
2667 {
2668 struct thread_info *event_thread;
2669 struct lwp_info *event_child, *requested_child;
2670 sigset_t block_mask, prev_mask;
2671
2672 retry:
2673 /* N.B. event_thread points to the thread_info struct that contains
2674 event_child. Keep them in sync. */
2675 event_thread = NULL;
2676 event_child = NULL;
2677 requested_child = NULL;
2678
2679 /* Check for a lwp with a pending status. */
2680
2681 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2682 {
2683 event_thread = (struct thread_info *)
2684 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2685 if (event_thread != NULL)
2686 event_child = get_thread_lwp (event_thread);
2687 if (debug_threads && event_thread)
2688 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2689 }
2690 else if (!ptid_equal (filter_ptid, null_ptid))
2691 {
2692 requested_child = find_lwp_pid (filter_ptid);
2693
2694 if (stopping_threads == NOT_STOPPING_THREADS
2695 && requested_child->status_pending_p
2696 && requested_child->collecting_fast_tracepoint)
2697 {
2698 enqueue_one_deferred_signal (requested_child,
2699 &requested_child->status_pending);
2700 requested_child->status_pending_p = 0;
2701 requested_child->status_pending = 0;
2702 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2703 }
2704
2705 if (requested_child->suspended
2706 && requested_child->status_pending_p)
2707 {
2708 internal_error (__FILE__, __LINE__,
2709 "requesting an event out of a"
2710 " suspended child?");
2711 }
2712
2713 if (requested_child->status_pending_p)
2714 {
2715 event_child = requested_child;
2716 event_thread = get_lwp_thread (event_child);
2717 }
2718 }
2719
2720 if (event_child != NULL)
2721 {
2722 if (debug_threads)
2723 debug_printf ("Got an event from pending child %ld (%04x)\n",
2724 lwpid_of (event_thread), event_child->status_pending);
2725 *wstatp = event_child->status_pending;
2726 event_child->status_pending_p = 0;
2727 event_child->status_pending = 0;
2728 current_thread = event_thread;
2729 return lwpid_of (event_thread);
2730 }
2731
2732 /* But if we don't find a pending event, we'll have to wait.
2733
2734 We only enter this loop if no process has a pending wait status.
2735 Thus any action taken in response to a wait status inside this
2736 loop is responding as soon as we detect the status, not after any
2737 pending events. */
2738
2739 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2740 all signals while here. */
2741 sigfillset (&block_mask);
2742 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2743
2744 /* Always pull all events out of the kernel. We'll randomly select
2745 an event LWP out of all that have events, to prevent
2746 starvation. */
2747 while (event_child == NULL)
2748 {
2749 pid_t ret = 0;
2750
2751 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2752 quirks:
2753
2754 - If the thread group leader exits while other threads in the
2755 thread group still exist, waitpid(TGID, ...) hangs. That
2756 waitpid won't return an exit status until the other threads
2757 in the group are reaped.
2758
2759 - When a non-leader thread execs, that thread just vanishes
2760 without reporting an exit (so we'd hang if we waited for it
2761 explicitly in that case). The exec event is reported to
2762 the TGID pid. */
2763 errno = 0;
2764 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2765
2766 if (debug_threads)
2767 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2768 ret, errno ? strerror (errno) : "ERRNO-OK");
2769
2770 if (ret > 0)
2771 {
2772 if (debug_threads)
2773 {
2774 debug_printf ("LLW: waitpid %ld received %s\n",
2775 (long) ret, status_to_str (*wstatp));
2776 }
2777
2778 /* Filter all events. IOW, leave all events pending. We'll
2779 randomly select an event LWP out of all that have events
2780 below. */
2781 linux_low_filter_event (ret, *wstatp);
2782 /* Retry until nothing comes out of waitpid. A single
2783 SIGCHLD can indicate more than one child stopped. */
2784 continue;
2785 }
2786
2787 /* Now that we've pulled all events out of the kernel, resume
2788 LWPs that don't have an interesting event to report. */
2789 if (stopping_threads == NOT_STOPPING_THREADS)
2790 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2791
2792 /* ... and find an LWP with a status to report to the core, if
2793 any. */
2794 event_thread = (struct thread_info *)
2795 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2796 if (event_thread != NULL)
2797 {
2798 event_child = get_thread_lwp (event_thread);
2799 *wstatp = event_child->status_pending;
2800 event_child->status_pending_p = 0;
2801 event_child->status_pending = 0;
2802 break;
2803 }
2804
2805 /* Check for zombie thread group leaders. Those can't be reaped
2806 until all other threads in the thread group are. */
2807 check_zombie_leaders ();
2808
2809 /* If there are no resumed children left in the set of LWPs we
2810 want to wait for, bail. We can't just block in
2811 waitpid/sigsuspend, because lwps might have been left stopped
2812 in trace-stop state, and we'd be stuck forever waiting for
2813 their status to change (which would only happen if we resumed
2814 them). Even if WNOHANG is set, this return code is preferred
2815 over 0 (below), as it is more detailed. */
2816 if ((find_inferior (&all_threads,
2817 not_stopped_callback,
2818 &wait_ptid) == NULL))
2819 {
2820 if (debug_threads)
2821 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2822 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2823 return -1;
2824 }
2825
2826 /* No interesting event to report to the caller. */
2827 if ((options & WNOHANG))
2828 {
2829 if (debug_threads)
2830 debug_printf ("WNOHANG set, no event found\n");
2831
2832 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2833 return 0;
2834 }
2835
2836 /* Block until we get an event reported with SIGCHLD. */
2837 if (debug_threads)
2838 debug_printf ("sigsuspend'ing\n");
2839
2840 sigsuspend (&prev_mask);
2841 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2842 goto retry;
2843 }
2844
2845 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2846
2847 current_thread = event_thread;
2848
2849 return lwpid_of (event_thread);
2850 }
2851
2852 /* Wait for an event from child(ren) PTID. PTIDs can be:
2853 minus_one_ptid, to specify any child; a pid PTID, specifying all
2854 lwps of a thread group; or a PTID representing a single lwp. Store
2855 the stop status through the status pointer WSTAT. OPTIONS is
2856 passed to the waitpid call. Return 0 if no event was found and
2857 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2858 was found. Return the PID of the stopped child otherwise. */
2859
2860 static int
2861 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2862 {
2863 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2864 }
2865
2866 /* Count the LWP's that have had events. */
2867
2868 static int
2869 count_events_callback (struct inferior_list_entry *entry, void *data)
2870 {
2871 struct thread_info *thread = (struct thread_info *) entry;
2872 struct lwp_info *lp = get_thread_lwp (thread);
2873 int *count = (int *) data;
2874
2875 gdb_assert (count != NULL);
2876
2877 /* Count only resumed LWPs that have an event pending. */
2878 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2879 && lp->status_pending_p)
2880 (*count)++;
2881
2882 return 0;
2883 }
2884
2885 /* Select the LWP (if any) that is currently being single-stepped. */
2886
2887 static int
2888 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2889 {
2890 struct thread_info *thread = (struct thread_info *) entry;
2891 struct lwp_info *lp = get_thread_lwp (thread);
2892
2893 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2894 && thread->last_resume_kind == resume_step
2895 && lp->status_pending_p)
2896 return 1;
2897 else
2898 return 0;
2899 }
2900
2901 /* Select the Nth LWP that has had an event. */
2902
2903 static int
2904 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2905 {
2906 struct thread_info *thread = (struct thread_info *) entry;
2907 struct lwp_info *lp = get_thread_lwp (thread);
2908 int *selector = (int *) data;
2909
2910 gdb_assert (selector != NULL);
2911
2912 /* Select only resumed LWPs that have an event pending. */
2913 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2914 && lp->status_pending_p)
2915 if ((*selector)-- == 0)
2916 return 1;
2917
2918 return 0;
2919 }
2920
2921 /* Select one LWP out of those that have events pending. */
2922
2923 static void
2924 select_event_lwp (struct lwp_info **orig_lp)
2925 {
2926 int num_events = 0;
2927 int random_selector;
2928 struct thread_info *event_thread = NULL;
2929
2930 /* In all-stop, give preference to the LWP that is being
2931 single-stepped. There will be at most one, and it's the LWP that
2932 the core is most interested in. If we didn't do this, then we'd
2933 have to handle pending step SIGTRAPs somehow in case the core
2934 later continues the previously-stepped thread, otherwise we'd
2935 report the pending SIGTRAP, and the core, not having stepped the
2936 thread, wouldn't understand what the trap was for, and therefore
2937 would report it to the user as a random signal. */
2938 if (!non_stop)
2939 {
2940 event_thread
2941 = (struct thread_info *) find_inferior (&all_threads,
2942 select_singlestep_lwp_callback,
2943 NULL);
2944 if (event_thread != NULL)
2945 {
2946 if (debug_threads)
2947 debug_printf ("SEL: Select single-step %s\n",
2948 target_pid_to_str (ptid_of (event_thread)));
2949 }
2950 }
2951 if (event_thread == NULL)
2952 {
2953 /* No single-stepping LWP. Select one at random, out of those
2954 which have had events. */
2955
2956 /* First see how many events we have. */
2957 find_inferior (&all_threads, count_events_callback, &num_events);
2958 gdb_assert (num_events > 0);
2959
2960 /* Now randomly pick a LWP out of those that have had
2961 events. */
2962 random_selector = (int)
2963 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2964
2965 if (debug_threads && num_events > 1)
2966 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2967 num_events, random_selector);
2968
2969 event_thread
2970 = (struct thread_info *) find_inferior (&all_threads,
2971 select_event_lwp_callback,
2972 &random_selector);
2973 }
2974
2975 if (event_thread != NULL)
2976 {
2977 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2978
2979 /* Switch the event LWP. */
2980 *orig_lp = event_lp;
2981 }
2982 }
2983
2984 /* Decrement the suspend count of an LWP. */
2985
2986 static int
2987 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2988 {
2989 struct thread_info *thread = (struct thread_info *) entry;
2990 struct lwp_info *lwp = get_thread_lwp (thread);
2991
2992 /* Ignore EXCEPT. */
2993 if (lwp == except)
2994 return 0;
2995
2996 lwp_suspended_decr (lwp);
2997 return 0;
2998 }
2999
3000 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
3001 NULL. */
3002
3003 static void
3004 unsuspend_all_lwps (struct lwp_info *except)
3005 {
3006 find_inferior (&all_threads, unsuspend_one_lwp, except);
3007 }
3008
3009 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
3010 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
3011 void *data);
3012 static int lwp_running (struct inferior_list_entry *entry, void *data);
3013 static ptid_t linux_wait_1 (ptid_t ptid,
3014 struct target_waitstatus *ourstatus,
3015 int target_options);
3016
3017 /* Stabilize threads (move out of jump pads).
3018
3019 If a thread is midway collecting a fast tracepoint, we need to
3020 finish the collection and move it out of the jump pad before
3021 reporting the signal.
3022
3023 This avoids recursion while collecting (when a signal arrives
3024 midway, and the signal handler itself collects), which would trash
3025 the trace buffer. In case the user set a breakpoint in a signal
3026 handler, this avoids the backtrace showing the jump pad, etc..
3027 Most importantly, there are certain things we can't do safely if
3028 threads are stopped in a jump pad (or in its callee's). For
3029 example:
3030
3031 - starting a new trace run. A thread still collecting the
3032 previous run, could trash the trace buffer when resumed. The trace
3033 buffer control structures would have been reset but the thread had
3034 no way to tell. The thread could even midway memcpy'ing to the
3035 buffer, which would mean that when resumed, it would clobber the
3036 trace buffer that had been set for a new run.
3037
3038 - we can't rewrite/reuse the jump pads for new tracepoints
3039 safely. Say you do tstart while a thread is stopped midway while
3040 collecting. When the thread is later resumed, it finishes the
3041 collection, and returns to the jump pad, to execute the original
3042 instruction that was under the tracepoint jump at the time the
3043 older run had been started. If the jump pad had been rewritten
3044 since for something else in the new run, the thread would now
3045 execute the wrong / random instructions. */
3046
3047 static void
3048 linux_stabilize_threads (void)
3049 {
3050 struct thread_info *saved_thread;
3051 struct thread_info *thread_stuck;
3052
3053 thread_stuck
3054 = (struct thread_info *) find_inferior (&all_threads,
3055 stuck_in_jump_pad_callback,
3056 NULL);
3057 if (thread_stuck != NULL)
3058 {
3059 if (debug_threads)
3060 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3061 lwpid_of (thread_stuck));
3062 return;
3063 }
3064
3065 saved_thread = current_thread;
3066
3067 stabilizing_threads = 1;
3068
3069 /* Kick 'em all. */
3070 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3071
3072 /* Loop until all are stopped out of the jump pads. */
3073 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3074 {
3075 struct target_waitstatus ourstatus;
3076 struct lwp_info *lwp;
3077 int wstat;
3078
3079 /* Note that we go through the full wait even loop. While
3080 moving threads out of jump pad, we need to be able to step
3081 over internal breakpoints and such. */
3082 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3083
3084 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3085 {
3086 lwp = get_thread_lwp (current_thread);
3087
3088 /* Lock it. */
3089 lwp_suspended_inc (lwp);
3090
3091 if (ourstatus.value.sig != GDB_SIGNAL_0
3092 || current_thread->last_resume_kind == resume_stop)
3093 {
3094 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3095 enqueue_one_deferred_signal (lwp, &wstat);
3096 }
3097 }
3098 }
3099
3100 unsuspend_all_lwps (NULL);
3101
3102 stabilizing_threads = 0;
3103
3104 current_thread = saved_thread;
3105
3106 if (debug_threads)
3107 {
3108 thread_stuck
3109 = (struct thread_info *) find_inferior (&all_threads,
3110 stuck_in_jump_pad_callback,
3111 NULL);
3112 if (thread_stuck != NULL)
3113 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3114 lwpid_of (thread_stuck));
3115 }
3116 }
3117
3118 /* Convenience function that is called when the kernel reports an
3119 event that is not passed out to GDB. */
3120
3121 static ptid_t
3122 ignore_event (struct target_waitstatus *ourstatus)
3123 {
3124 /* If we got an event, there may still be others, as a single
3125 SIGCHLD can indicate more than one child stopped. This forces
3126 another target_wait call. */
3127 async_file_mark ();
3128
3129 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3130 return null_ptid;
3131 }
3132
3133 /* Convenience function that is called when the kernel reports an exit
3134 event. This decides whether to report the event to GDB as a
3135 process exit event, a thread exit event, or to suppress the
3136 event. */
3137
3138 static ptid_t
3139 filter_exit_event (struct lwp_info *event_child,
3140 struct target_waitstatus *ourstatus)
3141 {
3142 struct thread_info *thread = get_lwp_thread (event_child);
3143 ptid_t ptid = ptid_of (thread);
3144
3145 if (!last_thread_of_process_p (pid_of (thread)))
3146 {
3147 if (report_thread_events)
3148 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3149 else
3150 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3151
3152 delete_lwp (event_child);
3153 }
3154 return ptid;
3155 }
3156
3157 /* Returns 1 if GDB is interested in any event_child syscalls. */
3158
3159 static int
3160 gdb_catching_syscalls_p (struct lwp_info *event_child)
3161 {
3162 struct thread_info *thread = get_lwp_thread (event_child);
3163 struct process_info *proc = get_thread_process (thread);
3164
3165 return !VEC_empty (int, proc->syscalls_to_catch);
3166 }
3167
3168 /* Returns 1 if GDB is interested in the event_child syscall.
3169 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3170
3171 static int
3172 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3173 {
3174 int i, iter;
3175 int sysno;
3176 struct thread_info *thread = get_lwp_thread (event_child);
3177 struct process_info *proc = get_thread_process (thread);
3178
3179 if (VEC_empty (int, proc->syscalls_to_catch))
3180 return 0;
3181
3182 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3183 return 1;
3184
3185 get_syscall_trapinfo (event_child, &sysno);
3186 for (i = 0;
3187 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3188 i++)
3189 if (iter == sysno)
3190 return 1;
3191
3192 return 0;
3193 }
3194
3195 /* Wait for process, returns status. */
3196
3197 static ptid_t
3198 linux_wait_1 (ptid_t ptid,
3199 struct target_waitstatus *ourstatus, int target_options)
3200 {
3201 int w;
3202 struct lwp_info *event_child;
3203 int options;
3204 int pid;
3205 int step_over_finished;
3206 int bp_explains_trap;
3207 int maybe_internal_trap;
3208 int report_to_gdb;
3209 int trace_event;
3210 int in_step_range;
3211 int any_resumed;
3212
3213 if (debug_threads)
3214 {
3215 debug_enter ();
3216 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3217 }
3218
3219 /* Translate generic target options into linux options. */
3220 options = __WALL;
3221 if (target_options & TARGET_WNOHANG)
3222 options |= WNOHANG;
3223
3224 bp_explains_trap = 0;
3225 trace_event = 0;
3226 in_step_range = 0;
3227 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3228
3229 /* Find a resumed LWP, if any. */
3230 if (find_inferior (&all_threads,
3231 status_pending_p_callback,
3232 &minus_one_ptid) != NULL)
3233 any_resumed = 1;
3234 else if ((find_inferior (&all_threads,
3235 not_stopped_callback,
3236 &minus_one_ptid) != NULL))
3237 any_resumed = 1;
3238 else
3239 any_resumed = 0;
3240
3241 if (ptid_equal (step_over_bkpt, null_ptid))
3242 pid = linux_wait_for_event (ptid, &w, options);
3243 else
3244 {
3245 if (debug_threads)
3246 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3247 target_pid_to_str (step_over_bkpt));
3248 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3249 }
3250
3251 if (pid == 0 || (pid == -1 && !any_resumed))
3252 {
3253 gdb_assert (target_options & TARGET_WNOHANG);
3254
3255 if (debug_threads)
3256 {
3257 debug_printf ("linux_wait_1 ret = null_ptid, "
3258 "TARGET_WAITKIND_IGNORE\n");
3259 debug_exit ();
3260 }
3261
3262 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3263 return null_ptid;
3264 }
3265 else if (pid == -1)
3266 {
3267 if (debug_threads)
3268 {
3269 debug_printf ("linux_wait_1 ret = null_ptid, "
3270 "TARGET_WAITKIND_NO_RESUMED\n");
3271 debug_exit ();
3272 }
3273
3274 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3275 return null_ptid;
3276 }
3277
3278 event_child = get_thread_lwp (current_thread);
3279
3280 /* linux_wait_for_event only returns an exit status for the last
3281 child of a process. Report it. */
3282 if (WIFEXITED (w) || WIFSIGNALED (w))
3283 {
3284 if (WIFEXITED (w))
3285 {
3286 ourstatus->kind = TARGET_WAITKIND_EXITED;
3287 ourstatus->value.integer = WEXITSTATUS (w);
3288
3289 if (debug_threads)
3290 {
3291 debug_printf ("linux_wait_1 ret = %s, exited with "
3292 "retcode %d\n",
3293 target_pid_to_str (ptid_of (current_thread)),
3294 WEXITSTATUS (w));
3295 debug_exit ();
3296 }
3297 }
3298 else
3299 {
3300 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3301 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3302
3303 if (debug_threads)
3304 {
3305 debug_printf ("linux_wait_1 ret = %s, terminated with "
3306 "signal %d\n",
3307 target_pid_to_str (ptid_of (current_thread)),
3308 WTERMSIG (w));
3309 debug_exit ();
3310 }
3311 }
3312
3313 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3314 return filter_exit_event (event_child, ourstatus);
3315
3316 return ptid_of (current_thread);
3317 }
3318
3319 /* If step-over executes a breakpoint instruction, in the case of a
3320 hardware single step it means a gdb/gdbserver breakpoint had been
3321 planted on top of a permanent breakpoint, in the case of a software
3322 single step it may just mean that gdbserver hit the reinsert breakpoint.
3323 The PC has been adjusted by save_stop_reason to point at
3324 the breakpoint address.
3325 So in the case of the hardware single step advance the PC manually
3326 past the breakpoint and in the case of software single step advance only
3327 if it's not the reinsert_breakpoint we are hitting.
3328 This avoids that a program would keep trapping a permanent breakpoint
3329 forever. */
3330 if (!ptid_equal (step_over_bkpt, null_ptid)
3331 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3332 && (event_child->stepping
3333 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3334 {
3335 int increment_pc = 0;
3336 int breakpoint_kind = 0;
3337 CORE_ADDR stop_pc = event_child->stop_pc;
3338
3339 breakpoint_kind =
3340 the_target->breakpoint_kind_from_current_state (&stop_pc);
3341 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3342
3343 if (debug_threads)
3344 {
3345 debug_printf ("step-over for %s executed software breakpoint\n",
3346 target_pid_to_str (ptid_of (current_thread)));
3347 }
3348
3349 if (increment_pc != 0)
3350 {
3351 struct regcache *regcache
3352 = get_thread_regcache (current_thread, 1);
3353
3354 event_child->stop_pc += increment_pc;
3355 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3356
3357 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3358 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3359 }
3360 }
3361
3362 /* If this event was not handled before, and is not a SIGTRAP, we
3363 report it. SIGILL and SIGSEGV are also treated as traps in case
3364 a breakpoint is inserted at the current PC. If this target does
3365 not support internal breakpoints at all, we also report the
3366 SIGTRAP without further processing; it's of no concern to us. */
3367 maybe_internal_trap
3368 = (supports_breakpoints ()
3369 && (WSTOPSIG (w) == SIGTRAP
3370 || ((WSTOPSIG (w) == SIGILL
3371 || WSTOPSIG (w) == SIGSEGV)
3372 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3373
3374 if (maybe_internal_trap)
3375 {
3376 /* Handle anything that requires bookkeeping before deciding to
3377 report the event or continue waiting. */
3378
3379 /* First check if we can explain the SIGTRAP with an internal
3380 breakpoint, or if we should possibly report the event to GDB.
3381 Do this before anything that may remove or insert a
3382 breakpoint. */
3383 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3384
3385 /* We have a SIGTRAP, possibly a step-over dance has just
3386 finished. If so, tweak the state machine accordingly,
3387 reinsert breakpoints and delete any reinsert (software
3388 single-step) breakpoints. */
3389 step_over_finished = finish_step_over (event_child);
3390
3391 /* Now invoke the callbacks of any internal breakpoints there. */
3392 check_breakpoints (event_child->stop_pc);
3393
3394 /* Handle tracepoint data collecting. This may overflow the
3395 trace buffer, and cause a tracing stop, removing
3396 breakpoints. */
3397 trace_event = handle_tracepoints (event_child);
3398
3399 if (bp_explains_trap)
3400 {
3401 if (debug_threads)
3402 debug_printf ("Hit a gdbserver breakpoint.\n");
3403 }
3404 }
3405 else
3406 {
3407 /* We have some other signal, possibly a step-over dance was in
3408 progress, and it should be cancelled too. */
3409 step_over_finished = finish_step_over (event_child);
3410 }
3411
3412 /* We have all the data we need. Either report the event to GDB, or
3413 resume threads and keep waiting for more. */
3414
3415 /* If we're collecting a fast tracepoint, finish the collection and
3416 move out of the jump pad before delivering a signal. See
3417 linux_stabilize_threads. */
3418
3419 if (WIFSTOPPED (w)
3420 && WSTOPSIG (w) != SIGTRAP
3421 && supports_fast_tracepoints ()
3422 && agent_loaded_p ())
3423 {
3424 if (debug_threads)
3425 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3426 "to defer or adjust it.\n",
3427 WSTOPSIG (w), lwpid_of (current_thread));
3428
3429 /* Allow debugging the jump pad itself. */
3430 if (current_thread->last_resume_kind != resume_step
3431 && maybe_move_out_of_jump_pad (event_child, &w))
3432 {
3433 enqueue_one_deferred_signal (event_child, &w);
3434
3435 if (debug_threads)
3436 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3437 WSTOPSIG (w), lwpid_of (current_thread));
3438
3439 linux_resume_one_lwp (event_child, 0, 0, NULL);
3440
3441 return ignore_event (ourstatus);
3442 }
3443 }
3444
3445 if (event_child->collecting_fast_tracepoint)
3446 {
3447 if (debug_threads)
3448 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3449 "Check if we're already there.\n",
3450 lwpid_of (current_thread),
3451 event_child->collecting_fast_tracepoint);
3452
3453 trace_event = 1;
3454
3455 event_child->collecting_fast_tracepoint
3456 = linux_fast_tracepoint_collecting (event_child, NULL);
3457
3458 if (event_child->collecting_fast_tracepoint != 1)
3459 {
3460 /* No longer need this breakpoint. */
3461 if (event_child->exit_jump_pad_bkpt != NULL)
3462 {
3463 if (debug_threads)
3464 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3465 "stopping all threads momentarily.\n");
3466
3467 /* Other running threads could hit this breakpoint.
3468 We don't handle moribund locations like GDB does,
3469 instead we always pause all threads when removing
3470 breakpoints, so that any step-over or
3471 decr_pc_after_break adjustment is always taken
3472 care of while the breakpoint is still
3473 inserted. */
3474 stop_all_lwps (1, event_child);
3475
3476 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3477 event_child->exit_jump_pad_bkpt = NULL;
3478
3479 unstop_all_lwps (1, event_child);
3480
3481 gdb_assert (event_child->suspended >= 0);
3482 }
3483 }
3484
3485 if (event_child->collecting_fast_tracepoint == 0)
3486 {
3487 if (debug_threads)
3488 debug_printf ("fast tracepoint finished "
3489 "collecting successfully.\n");
3490
3491 /* We may have a deferred signal to report. */
3492 if (dequeue_one_deferred_signal (event_child, &w))
3493 {
3494 if (debug_threads)
3495 debug_printf ("dequeued one signal.\n");
3496 }
3497 else
3498 {
3499 if (debug_threads)
3500 debug_printf ("no deferred signals.\n");
3501
3502 if (stabilizing_threads)
3503 {
3504 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3505 ourstatus->value.sig = GDB_SIGNAL_0;
3506
3507 if (debug_threads)
3508 {
3509 debug_printf ("linux_wait_1 ret = %s, stopped "
3510 "while stabilizing threads\n",
3511 target_pid_to_str (ptid_of (current_thread)));
3512 debug_exit ();
3513 }
3514
3515 return ptid_of (current_thread);
3516 }
3517 }
3518 }
3519 }
3520
3521 /* Check whether GDB would be interested in this event. */
3522
3523 /* Check if GDB is interested in this syscall. */
3524 if (WIFSTOPPED (w)
3525 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3526 && !gdb_catch_this_syscall_p (event_child))
3527 {
3528 if (debug_threads)
3529 {
3530 debug_printf ("Ignored syscall for LWP %ld.\n",
3531 lwpid_of (current_thread));
3532 }
3533
3534 linux_resume_one_lwp (event_child, event_child->stepping,
3535 0, NULL);
3536 return ignore_event (ourstatus);
3537 }
3538
3539 /* If GDB is not interested in this signal, don't stop other
3540 threads, and don't report it to GDB. Just resume the inferior
3541 right away. We do this for threading-related signals as well as
3542 any that GDB specifically requested we ignore. But never ignore
3543 SIGSTOP if we sent it ourselves, and do not ignore signals when
3544 stepping - they may require special handling to skip the signal
3545 handler. Also never ignore signals that could be caused by a
3546 breakpoint. */
3547 if (WIFSTOPPED (w)
3548 && current_thread->last_resume_kind != resume_step
3549 && (
3550 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3551 (current_process ()->priv->thread_db != NULL
3552 && (WSTOPSIG (w) == __SIGRTMIN
3553 || WSTOPSIG (w) == __SIGRTMIN + 1))
3554 ||
3555 #endif
3556 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3557 && !(WSTOPSIG (w) == SIGSTOP
3558 && current_thread->last_resume_kind == resume_stop)
3559 && !linux_wstatus_maybe_breakpoint (w))))
3560 {
3561 siginfo_t info, *info_p;
3562
3563 if (debug_threads)
3564 debug_printf ("Ignored signal %d for LWP %ld.\n",
3565 WSTOPSIG (w), lwpid_of (current_thread));
3566
3567 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3568 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3569 info_p = &info;
3570 else
3571 info_p = NULL;
3572
3573 if (step_over_finished)
3574 {
3575 /* We cancelled this thread's step-over above. We still
3576 need to unsuspend all other LWPs, and set them back
3577 running again while the signal handler runs. */
3578 unsuspend_all_lwps (event_child);
3579
3580 /* Enqueue the pending signal info so that proceed_all_lwps
3581 doesn't lose it. */
3582 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3583
3584 proceed_all_lwps ();
3585 }
3586 else
3587 {
3588 linux_resume_one_lwp (event_child, event_child->stepping,
3589 WSTOPSIG (w), info_p);
3590 }
3591 return ignore_event (ourstatus);
3592 }
3593
3594 /* Note that all addresses are always "out of the step range" when
3595 there's no range to begin with. */
3596 in_step_range = lwp_in_step_range (event_child);
3597
3598 /* If GDB wanted this thread to single step, and the thread is out
3599 of the step range, we always want to report the SIGTRAP, and let
3600 GDB handle it. Watchpoints should always be reported. So should
3601 signals we can't explain. A SIGTRAP we can't explain could be a
3602 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3603 do, we're be able to handle GDB breakpoints on top of internal
3604 breakpoints, by handling the internal breakpoint and still
3605 reporting the event to GDB. If we don't, we're out of luck, GDB
3606 won't see the breakpoint hit. If we see a single-step event but
3607 the thread should be continuing, don't pass the trap to gdb.
3608 That indicates that we had previously finished a single-step but
3609 left the single-step pending -- see
3610 complete_ongoing_step_over. */
3611 report_to_gdb = (!maybe_internal_trap
3612 || (current_thread->last_resume_kind == resume_step
3613 && !in_step_range)
3614 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3615 || (!in_step_range
3616 && !bp_explains_trap
3617 && !trace_event
3618 && !step_over_finished
3619 && !(current_thread->last_resume_kind == resume_continue
3620 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3621 || (gdb_breakpoint_here (event_child->stop_pc)
3622 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3623 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3624 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3625
3626 run_breakpoint_commands (event_child->stop_pc);
3627
3628 /* We found no reason GDB would want us to stop. We either hit one
3629 of our own breakpoints, or finished an internal step GDB
3630 shouldn't know about. */
3631 if (!report_to_gdb)
3632 {
3633 if (debug_threads)
3634 {
3635 if (bp_explains_trap)
3636 debug_printf ("Hit a gdbserver breakpoint.\n");
3637 if (step_over_finished)
3638 debug_printf ("Step-over finished.\n");
3639 if (trace_event)
3640 debug_printf ("Tracepoint event.\n");
3641 if (lwp_in_step_range (event_child))
3642 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3643 paddress (event_child->stop_pc),
3644 paddress (event_child->step_range_start),
3645 paddress (event_child->step_range_end));
3646 }
3647
3648 /* We're not reporting this breakpoint to GDB, so apply the
3649 decr_pc_after_break adjustment to the inferior's regcache
3650 ourselves. */
3651
3652 if (the_low_target.set_pc != NULL)
3653 {
3654 struct regcache *regcache
3655 = get_thread_regcache (current_thread, 1);
3656 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3657 }
3658
3659 /* We may have finished stepping over a breakpoint. If so,
3660 we've stopped and suspended all LWPs momentarily except the
3661 stepping one. This is where we resume them all again. We're
3662 going to keep waiting, so use proceed, which handles stepping
3663 over the next breakpoint. */
3664 if (debug_threads)
3665 debug_printf ("proceeding all threads.\n");
3666
3667 if (step_over_finished)
3668 unsuspend_all_lwps (event_child);
3669
3670 proceed_all_lwps ();
3671 return ignore_event (ourstatus);
3672 }
3673
3674 if (debug_threads)
3675 {
3676 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3677 {
3678 char *str;
3679
3680 str = target_waitstatus_to_string (&event_child->waitstatus);
3681 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3682 lwpid_of (get_lwp_thread (event_child)), str);
3683 xfree (str);
3684 }
3685 if (current_thread->last_resume_kind == resume_step)
3686 {
3687 if (event_child->step_range_start == event_child->step_range_end)
3688 debug_printf ("GDB wanted to single-step, reporting event.\n");
3689 else if (!lwp_in_step_range (event_child))
3690 debug_printf ("Out of step range, reporting event.\n");
3691 }
3692 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3693 debug_printf ("Stopped by watchpoint.\n");
3694 else if (gdb_breakpoint_here (event_child->stop_pc))
3695 debug_printf ("Stopped by GDB breakpoint.\n");
3696 if (debug_threads)
3697 debug_printf ("Hit a non-gdbserver trap event.\n");
3698 }
3699
3700 /* Alright, we're going to report a stop. */
3701
3702 if (!stabilizing_threads)
3703 {
3704 /* In all-stop, stop all threads. */
3705 if (!non_stop)
3706 stop_all_lwps (0, NULL);
3707
3708 /* If we're not waiting for a specific LWP, choose an event LWP
3709 from among those that have had events. Giving equal priority
3710 to all LWPs that have had events helps prevent
3711 starvation. */
3712 if (ptid_equal (ptid, minus_one_ptid))
3713 {
3714 event_child->status_pending_p = 1;
3715 event_child->status_pending = w;
3716
3717 select_event_lwp (&event_child);
3718
3719 /* current_thread and event_child must stay in sync. */
3720 current_thread = get_lwp_thread (event_child);
3721
3722 event_child->status_pending_p = 0;
3723 w = event_child->status_pending;
3724 }
3725
3726 if (step_over_finished)
3727 {
3728 if (!non_stop)
3729 {
3730 /* If we were doing a step-over, all other threads but
3731 the stepping one had been paused in start_step_over,
3732 with their suspend counts incremented. We don't want
3733 to do a full unstop/unpause, because we're in
3734 all-stop mode (so we want threads stopped), but we
3735 still need to unsuspend the other threads, to
3736 decrement their `suspended' count back. */
3737 unsuspend_all_lwps (event_child);
3738 }
3739 else
3740 {
3741 /* If we just finished a step-over, then all threads had
3742 been momentarily paused. In all-stop, that's fine,
3743 we want threads stopped by now anyway. In non-stop,
3744 we need to re-resume threads that GDB wanted to be
3745 running. */
3746 unstop_all_lwps (1, event_child);
3747 }
3748 }
3749
3750 /* Stabilize threads (move out of jump pads). */
3751 if (!non_stop)
3752 stabilize_threads ();
3753 }
3754 else
3755 {
3756 /* If we just finished a step-over, then all threads had been
3757 momentarily paused. In all-stop, that's fine, we want
3758 threads stopped by now anyway. In non-stop, we need to
3759 re-resume threads that GDB wanted to be running. */
3760 if (step_over_finished)
3761 unstop_all_lwps (1, event_child);
3762 }
3763
3764 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3765 {
3766 /* If the reported event is an exit, fork, vfork or exec, let
3767 GDB know. */
3768 *ourstatus = event_child->waitstatus;
3769 /* Clear the event lwp's waitstatus since we handled it already. */
3770 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3771 }
3772 else
3773 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3774
3775 /* Now that we've selected our final event LWP, un-adjust its PC if
3776 it was a software breakpoint, and the client doesn't know we can
3777 adjust the breakpoint ourselves. */
3778 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3779 && !swbreak_feature)
3780 {
3781 int decr_pc = the_low_target.decr_pc_after_break;
3782
3783 if (decr_pc != 0)
3784 {
3785 struct regcache *regcache
3786 = get_thread_regcache (current_thread, 1);
3787 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3788 }
3789 }
3790
3791 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3792 {
3793 get_syscall_trapinfo (event_child,
3794 &ourstatus->value.syscall_number);
3795 ourstatus->kind = event_child->syscall_state;
3796 }
3797 else if (current_thread->last_resume_kind == resume_stop
3798 && WSTOPSIG (w) == SIGSTOP)
3799 {
3800 /* A thread that has been requested to stop by GDB with vCont;t,
3801 and it stopped cleanly, so report as SIG0. The use of
3802 SIGSTOP is an implementation detail. */
3803 ourstatus->value.sig = GDB_SIGNAL_0;
3804 }
3805 else if (current_thread->last_resume_kind == resume_stop
3806 && WSTOPSIG (w) != SIGSTOP)
3807 {
3808 /* A thread that has been requested to stop by GDB with vCont;t,
3809 but, it stopped for other reasons. */
3810 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3811 }
3812 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3813 {
3814 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3815 }
3816
3817 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3818
3819 if (debug_threads)
3820 {
3821 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3822 target_pid_to_str (ptid_of (current_thread)),
3823 ourstatus->kind, ourstatus->value.sig);
3824 debug_exit ();
3825 }
3826
3827 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3828 return filter_exit_event (event_child, ourstatus);
3829
3830 return ptid_of (current_thread);
3831 }
3832
3833 /* Get rid of any pending event in the pipe. */
3834 static void
3835 async_file_flush (void)
3836 {
3837 int ret;
3838 char buf;
3839
3840 do
3841 ret = read (linux_event_pipe[0], &buf, 1);
3842 while (ret >= 0 || (ret == -1 && errno == EINTR));
3843 }
3844
3845 /* Put something in the pipe, so the event loop wakes up. */
3846 static void
3847 async_file_mark (void)
3848 {
3849 int ret;
3850
3851 async_file_flush ();
3852
3853 do
3854 ret = write (linux_event_pipe[1], "+", 1);
3855 while (ret == 0 || (ret == -1 && errno == EINTR));
3856
3857 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3858 be awakened anyway. */
3859 }
3860
3861 static ptid_t
3862 linux_wait (ptid_t ptid,
3863 struct target_waitstatus *ourstatus, int target_options)
3864 {
3865 ptid_t event_ptid;
3866
3867 /* Flush the async file first. */
3868 if (target_is_async_p ())
3869 async_file_flush ();
3870
3871 do
3872 {
3873 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3874 }
3875 while ((target_options & TARGET_WNOHANG) == 0
3876 && ptid_equal (event_ptid, null_ptid)
3877 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3878
3879 /* If at least one stop was reported, there may be more. A single
3880 SIGCHLD can signal more than one child stop. */
3881 if (target_is_async_p ()
3882 && (target_options & TARGET_WNOHANG) != 0
3883 && !ptid_equal (event_ptid, null_ptid))
3884 async_file_mark ();
3885
3886 return event_ptid;
3887 }
3888
3889 /* Send a signal to an LWP. */
3890
3891 static int
3892 kill_lwp (unsigned long lwpid, int signo)
3893 {
3894 int ret;
3895
3896 errno = 0;
3897 ret = syscall (__NR_tkill, lwpid, signo);
3898 if (errno == ENOSYS)
3899 {
3900 /* If tkill fails, then we are not using nptl threads, a
3901 configuration we no longer support. */
3902 perror_with_name (("tkill"));
3903 }
3904 return ret;
3905 }
3906
3907 void
3908 linux_stop_lwp (struct lwp_info *lwp)
3909 {
3910 send_sigstop (lwp);
3911 }
3912
3913 static void
3914 send_sigstop (struct lwp_info *lwp)
3915 {
3916 int pid;
3917
3918 pid = lwpid_of (get_lwp_thread (lwp));
3919
3920 /* If we already have a pending stop signal for this process, don't
3921 send another. */
3922 if (lwp->stop_expected)
3923 {
3924 if (debug_threads)
3925 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3926
3927 return;
3928 }
3929
3930 if (debug_threads)
3931 debug_printf ("Sending sigstop to lwp %d\n", pid);
3932
3933 lwp->stop_expected = 1;
3934 kill_lwp (pid, SIGSTOP);
3935 }
3936
3937 static int
3938 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3939 {
3940 struct thread_info *thread = (struct thread_info *) entry;
3941 struct lwp_info *lwp = get_thread_lwp (thread);
3942
3943 /* Ignore EXCEPT. */
3944 if (lwp == except)
3945 return 0;
3946
3947 if (lwp->stopped)
3948 return 0;
3949
3950 send_sigstop (lwp);
3951 return 0;
3952 }
3953
3954 /* Increment the suspend count of an LWP, and stop it, if not stopped
3955 yet. */
3956 static int
3957 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3958 void *except)
3959 {
3960 struct thread_info *thread = (struct thread_info *) entry;
3961 struct lwp_info *lwp = get_thread_lwp (thread);
3962
3963 /* Ignore EXCEPT. */
3964 if (lwp == except)
3965 return 0;
3966
3967 lwp_suspended_inc (lwp);
3968
3969 return send_sigstop_callback (entry, except);
3970 }
3971
3972 static void
3973 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3974 {
3975 /* Store the exit status for later. */
3976 lwp->status_pending_p = 1;
3977 lwp->status_pending = wstat;
3978
3979 /* Store in waitstatus as well, as there's nothing else to process
3980 for this event. */
3981 if (WIFEXITED (wstat))
3982 {
3983 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3984 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3985 }
3986 else if (WIFSIGNALED (wstat))
3987 {
3988 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3989 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3990 }
3991
3992 /* Prevent trying to stop it. */
3993 lwp->stopped = 1;
3994
3995 /* No further stops are expected from a dead lwp. */
3996 lwp->stop_expected = 0;
3997 }
3998
3999 /* Return true if LWP has exited already, and has a pending exit event
4000 to report to GDB. */
4001
4002 static int
4003 lwp_is_marked_dead (struct lwp_info *lwp)
4004 {
4005 return (lwp->status_pending_p
4006 && (WIFEXITED (lwp->status_pending)
4007 || WIFSIGNALED (lwp->status_pending)));
4008 }
4009
4010 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4011
4012 static void
4013 wait_for_sigstop (void)
4014 {
4015 struct thread_info *saved_thread;
4016 ptid_t saved_tid;
4017 int wstat;
4018 int ret;
4019
4020 saved_thread = current_thread;
4021 if (saved_thread != NULL)
4022 saved_tid = saved_thread->entry.id;
4023 else
4024 saved_tid = null_ptid; /* avoid bogus unused warning */
4025
4026 if (debug_threads)
4027 debug_printf ("wait_for_sigstop: pulling events\n");
4028
4029 /* Passing NULL_PTID as filter indicates we want all events to be
4030 left pending. Eventually this returns when there are no
4031 unwaited-for children left. */
4032 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4033 &wstat, __WALL);
4034 gdb_assert (ret == -1);
4035
4036 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4037 current_thread = saved_thread;
4038 else
4039 {
4040 if (debug_threads)
4041 debug_printf ("Previously current thread died.\n");
4042
4043 /* We can't change the current inferior behind GDB's back,
4044 otherwise, a subsequent command may apply to the wrong
4045 process. */
4046 current_thread = NULL;
4047 }
4048 }
4049
4050 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
4051 move it out, because we need to report the stop event to GDB. For
4052 example, if the user puts a breakpoint in the jump pad, it's
4053 because she wants to debug it. */
4054
4055 static int
4056 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
4057 {
4058 struct thread_info *thread = (struct thread_info *) entry;
4059 struct lwp_info *lwp = get_thread_lwp (thread);
4060
4061 if (lwp->suspended != 0)
4062 {
4063 internal_error (__FILE__, __LINE__,
4064 "LWP %ld is suspended, suspended=%d\n",
4065 lwpid_of (thread), lwp->suspended);
4066 }
4067 gdb_assert (lwp->stopped);
4068
4069 /* Allow debugging the jump pad, gdb_collect, etc.. */
4070 return (supports_fast_tracepoints ()
4071 && agent_loaded_p ()
4072 && (gdb_breakpoint_here (lwp->stop_pc)
4073 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4074 || thread->last_resume_kind == resume_step)
4075 && linux_fast_tracepoint_collecting (lwp, NULL));
4076 }
4077
4078 static void
4079 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
4080 {
4081 struct thread_info *thread = (struct thread_info *) entry;
4082 struct thread_info *saved_thread;
4083 struct lwp_info *lwp = get_thread_lwp (thread);
4084 int *wstat;
4085
4086 if (lwp->suspended != 0)
4087 {
4088 internal_error (__FILE__, __LINE__,
4089 "LWP %ld is suspended, suspended=%d\n",
4090 lwpid_of (thread), lwp->suspended);
4091 }
4092 gdb_assert (lwp->stopped);
4093
4094 /* For gdb_breakpoint_here. */
4095 saved_thread = current_thread;
4096 current_thread = thread;
4097
4098 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4099
4100 /* Allow debugging the jump pad, gdb_collect, etc. */
4101 if (!gdb_breakpoint_here (lwp->stop_pc)
4102 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4103 && thread->last_resume_kind != resume_step
4104 && maybe_move_out_of_jump_pad (lwp, wstat))
4105 {
4106 if (debug_threads)
4107 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4108 lwpid_of (thread));
4109
4110 if (wstat)
4111 {
4112 lwp->status_pending_p = 0;
4113 enqueue_one_deferred_signal (lwp, wstat);
4114
4115 if (debug_threads)
4116 debug_printf ("Signal %d for LWP %ld deferred "
4117 "(in jump pad)\n",
4118 WSTOPSIG (*wstat), lwpid_of (thread));
4119 }
4120
4121 linux_resume_one_lwp (lwp, 0, 0, NULL);
4122 }
4123 else
4124 lwp_suspended_inc (lwp);
4125
4126 current_thread = saved_thread;
4127 }
4128
4129 static int
4130 lwp_running (struct inferior_list_entry *entry, void *data)
4131 {
4132 struct thread_info *thread = (struct thread_info *) entry;
4133 struct lwp_info *lwp = get_thread_lwp (thread);
4134
4135 if (lwp_is_marked_dead (lwp))
4136 return 0;
4137 if (lwp->stopped)
4138 return 0;
4139 return 1;
4140 }
4141
4142 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4143 If SUSPEND, then also increase the suspend count of every LWP,
4144 except EXCEPT. */
4145
4146 static void
4147 stop_all_lwps (int suspend, struct lwp_info *except)
4148 {
4149 /* Should not be called recursively. */
4150 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4151
4152 if (debug_threads)
4153 {
4154 debug_enter ();
4155 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4156 suspend ? "stop-and-suspend" : "stop",
4157 except != NULL
4158 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4159 : "none");
4160 }
4161
4162 stopping_threads = (suspend
4163 ? STOPPING_AND_SUSPENDING_THREADS
4164 : STOPPING_THREADS);
4165
4166 if (suspend)
4167 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4168 else
4169 find_inferior (&all_threads, send_sigstop_callback, except);
4170 wait_for_sigstop ();
4171 stopping_threads = NOT_STOPPING_THREADS;
4172
4173 if (debug_threads)
4174 {
4175 debug_printf ("stop_all_lwps done, setting stopping_threads "
4176 "back to !stopping\n");
4177 debug_exit ();
4178 }
4179 }
4180
4181 /* Enqueue one signal in the chain of signals which need to be
4182 delivered to this process on next resume. */
4183
4184 static void
4185 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4186 {
4187 struct pending_signals *p_sig = XNEW (struct pending_signals);
4188
4189 p_sig->prev = lwp->pending_signals;
4190 p_sig->signal = signal;
4191 if (info == NULL)
4192 memset (&p_sig->info, 0, sizeof (siginfo_t));
4193 else
4194 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4195 lwp->pending_signals = p_sig;
4196 }
4197
4198 /* Install breakpoints for software single stepping. */
4199
4200 static void
4201 install_software_single_step_breakpoints (struct lwp_info *lwp)
4202 {
4203 int i;
4204 CORE_ADDR pc;
4205 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4206 VEC (CORE_ADDR) *next_pcs = NULL;
4207 struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4208
4209 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4210
4211 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4212 set_reinsert_breakpoint (pc, current_ptid);
4213
4214 do_cleanups (old_chain);
4215 }
4216
4217 /* Single step via hardware or software single step.
4218 Return 1 if hardware single stepping, 0 if software single stepping
4219 or can't single step. */
4220
4221 static int
4222 single_step (struct lwp_info* lwp)
4223 {
4224 int step = 0;
4225
4226 if (can_hardware_single_step ())
4227 {
4228 step = 1;
4229 }
4230 else if (can_software_single_step ())
4231 {
4232 install_software_single_step_breakpoints (lwp);
4233 step = 0;
4234 }
4235 else
4236 {
4237 if (debug_threads)
4238 debug_printf ("stepping is not implemented on this target");
4239 }
4240
4241 return step;
4242 }
4243
4244 /* The signal can be delivered to the inferior if we are not trying to
4245 finish a fast tracepoint collect. Since signal can be delivered in
4246 the step-over, the program may go to signal handler and trap again
4247 after return from the signal handler. We can live with the spurious
4248 double traps. */
4249
4250 static int
4251 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4252 {
4253 return !lwp->collecting_fast_tracepoint;
4254 }
4255
4256 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4257 SIGNAL is nonzero, give it that signal. */
4258
4259 static void
4260 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4261 int step, int signal, siginfo_t *info)
4262 {
4263 struct thread_info *thread = get_lwp_thread (lwp);
4264 struct thread_info *saved_thread;
4265 int fast_tp_collecting;
4266 int ptrace_request;
4267 struct process_info *proc = get_thread_process (thread);
4268
4269 /* Note that target description may not be initialised
4270 (proc->tdesc == NULL) at this point because the program hasn't
4271 stopped at the first instruction yet. It means GDBserver skips
4272 the extra traps from the wrapper program (see option --wrapper).
4273 Code in this function that requires register access should be
4274 guarded by proc->tdesc == NULL or something else. */
4275
4276 if (lwp->stopped == 0)
4277 return;
4278
4279 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4280
4281 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4282
4283 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4284
4285 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4286 user used the "jump" command, or "set $pc = foo"). */
4287 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4288 {
4289 /* Collecting 'while-stepping' actions doesn't make sense
4290 anymore. */
4291 release_while_stepping_state_list (thread);
4292 }
4293
4294 /* If we have pending signals or status, and a new signal, enqueue the
4295 signal. Also enqueue the signal if it can't be delivered to the
4296 inferior right now. */
4297 if (signal != 0
4298 && (lwp->status_pending_p
4299 || lwp->pending_signals != NULL
4300 || !lwp_signal_can_be_delivered (lwp)))
4301 {
4302 enqueue_pending_signal (lwp, signal, info);
4303
4304 /* Postpone any pending signal. It was enqueued above. */
4305 signal = 0;
4306 }
4307
4308 if (lwp->status_pending_p)
4309 {
4310 if (debug_threads)
4311 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4312 " has pending status\n",
4313 lwpid_of (thread), step ? "step" : "continue",
4314 lwp->stop_expected ? "expected" : "not expected");
4315 return;
4316 }
4317
4318 saved_thread = current_thread;
4319 current_thread = thread;
4320
4321 /* This bit needs some thinking about. If we get a signal that
4322 we must report while a single-step reinsert is still pending,
4323 we often end up resuming the thread. It might be better to
4324 (ew) allow a stack of pending events; then we could be sure that
4325 the reinsert happened right away and not lose any signals.
4326
4327 Making this stack would also shrink the window in which breakpoints are
4328 uninserted (see comment in linux_wait_for_lwp) but not enough for
4329 complete correctness, so it won't solve that problem. It may be
4330 worthwhile just to solve this one, however. */
4331 if (lwp->bp_reinsert != 0)
4332 {
4333 if (debug_threads)
4334 debug_printf (" pending reinsert at 0x%s\n",
4335 paddress (lwp->bp_reinsert));
4336
4337 if (can_hardware_single_step ())
4338 {
4339 if (fast_tp_collecting == 0)
4340 {
4341 if (step == 0)
4342 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4343 if (lwp->suspended)
4344 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4345 lwp->suspended);
4346 }
4347 }
4348
4349 step = maybe_hw_step (thread);
4350 }
4351 else
4352 {
4353 /* If the thread isn't doing step-over, there shouldn't be any
4354 reinsert breakpoints. */
4355 gdb_assert (!has_reinsert_breakpoints (thread));
4356 }
4357
4358 if (fast_tp_collecting == 1)
4359 {
4360 if (debug_threads)
4361 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4362 " (exit-jump-pad-bkpt)\n",
4363 lwpid_of (thread));
4364 }
4365 else if (fast_tp_collecting == 2)
4366 {
4367 if (debug_threads)
4368 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4369 " single-stepping\n",
4370 lwpid_of (thread));
4371
4372 if (can_hardware_single_step ())
4373 step = 1;
4374 else
4375 {
4376 internal_error (__FILE__, __LINE__,
4377 "moving out of jump pad single-stepping"
4378 " not implemented on this target");
4379 }
4380 }
4381
4382 /* If we have while-stepping actions in this thread set it stepping.
4383 If we have a signal to deliver, it may or may not be set to
4384 SIG_IGN, we don't know. Assume so, and allow collecting
4385 while-stepping into a signal handler. A possible smart thing to
4386 do would be to set an internal breakpoint at the signal return
4387 address, continue, and carry on catching this while-stepping
4388 action only when that breakpoint is hit. A future
4389 enhancement. */
4390 if (thread->while_stepping != NULL)
4391 {
4392 if (debug_threads)
4393 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4394 lwpid_of (thread));
4395
4396 step = single_step (lwp);
4397 }
4398
4399 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4400 {
4401 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4402
4403 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4404
4405 if (debug_threads)
4406 {
4407 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4408 (long) lwp->stop_pc);
4409 }
4410 }
4411
4412 /* If we have pending signals, consume one if it can be delivered to
4413 the inferior. */
4414 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4415 {
4416 struct pending_signals **p_sig;
4417
4418 p_sig = &lwp->pending_signals;
4419 while ((*p_sig)->prev != NULL)
4420 p_sig = &(*p_sig)->prev;
4421
4422 signal = (*p_sig)->signal;
4423 if ((*p_sig)->info.si_signo != 0)
4424 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4425 &(*p_sig)->info);
4426
4427 free (*p_sig);
4428 *p_sig = NULL;
4429 }
4430
4431 if (debug_threads)
4432 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4433 lwpid_of (thread), step ? "step" : "continue", signal,
4434 lwp->stop_expected ? "expected" : "not expected");
4435
4436 if (the_low_target.prepare_to_resume != NULL)
4437 the_low_target.prepare_to_resume (lwp);
4438
4439 regcache_invalidate_thread (thread);
4440 errno = 0;
4441 lwp->stepping = step;
4442 if (step)
4443 ptrace_request = PTRACE_SINGLESTEP;
4444 else if (gdb_catching_syscalls_p (lwp))
4445 ptrace_request = PTRACE_SYSCALL;
4446 else
4447 ptrace_request = PTRACE_CONT;
4448 ptrace (ptrace_request,
4449 lwpid_of (thread),
4450 (PTRACE_TYPE_ARG3) 0,
4451 /* Coerce to a uintptr_t first to avoid potential gcc warning
4452 of coercing an 8 byte integer to a 4 byte pointer. */
4453 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4454
4455 current_thread = saved_thread;
4456 if (errno)
4457 perror_with_name ("resuming thread");
4458
4459 /* Successfully resumed. Clear state that no longer makes sense,
4460 and mark the LWP as running. Must not do this before resuming
4461 otherwise if that fails other code will be confused. E.g., we'd
4462 later try to stop the LWP and hang forever waiting for a stop
4463 status. Note that we must not throw after this is cleared,
4464 otherwise handle_zombie_lwp_error would get confused. */
4465 lwp->stopped = 0;
4466 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4467 }
4468
4469 /* Called when we try to resume a stopped LWP and that errors out. If
4470 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4471 or about to become), discard the error, clear any pending status
4472 the LWP may have, and return true (we'll collect the exit status
4473 soon enough). Otherwise, return false. */
4474
4475 static int
4476 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4477 {
4478 struct thread_info *thread = get_lwp_thread (lp);
4479
4480 /* If we get an error after resuming the LWP successfully, we'd
4481 confuse !T state for the LWP being gone. */
4482 gdb_assert (lp->stopped);
4483
4484 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4485 because even if ptrace failed with ESRCH, the tracee may be "not
4486 yet fully dead", but already refusing ptrace requests. In that
4487 case the tracee has 'R (Running)' state for a little bit
4488 (observed in Linux 3.18). See also the note on ESRCH in the
4489 ptrace(2) man page. Instead, check whether the LWP has any state
4490 other than ptrace-stopped. */
4491
4492 /* Don't assume anything if /proc/PID/status can't be read. */
4493 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4494 {
4495 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4496 lp->status_pending_p = 0;
4497 return 1;
4498 }
4499 return 0;
4500 }
4501
4502 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4503 disappears while we try to resume it. */
4504
4505 static void
4506 linux_resume_one_lwp (struct lwp_info *lwp,
4507 int step, int signal, siginfo_t *info)
4508 {
4509 TRY
4510 {
4511 linux_resume_one_lwp_throw (lwp, step, signal, info);
4512 }
4513 CATCH (ex, RETURN_MASK_ERROR)
4514 {
4515 if (!check_ptrace_stopped_lwp_gone (lwp))
4516 throw_exception (ex);
4517 }
4518 END_CATCH
4519 }
4520
4521 struct thread_resume_array
4522 {
4523 struct thread_resume *resume;
4524 size_t n;
4525 };
4526
4527 /* This function is called once per thread via find_inferior.
4528 ARG is a pointer to a thread_resume_array struct.
4529 We look up the thread specified by ENTRY in ARG, and mark the thread
4530 with a pointer to the appropriate resume request.
4531
4532 This algorithm is O(threads * resume elements), but resume elements
4533 is small (and will remain small at least until GDB supports thread
4534 suspension). */
4535
4536 static int
4537 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4538 {
4539 struct thread_info *thread = (struct thread_info *) entry;
4540 struct lwp_info *lwp = get_thread_lwp (thread);
4541 int ndx;
4542 struct thread_resume_array *r;
4543
4544 r = (struct thread_resume_array *) arg;
4545
4546 for (ndx = 0; ndx < r->n; ndx++)
4547 {
4548 ptid_t ptid = r->resume[ndx].thread;
4549 if (ptid_equal (ptid, minus_one_ptid)
4550 || ptid_equal (ptid, entry->id)
4551 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4552 of PID'. */
4553 || (ptid_get_pid (ptid) == pid_of (thread)
4554 && (ptid_is_pid (ptid)
4555 || ptid_get_lwp (ptid) == -1)))
4556 {
4557 if (r->resume[ndx].kind == resume_stop
4558 && thread->last_resume_kind == resume_stop)
4559 {
4560 if (debug_threads)
4561 debug_printf ("already %s LWP %ld at GDB's request\n",
4562 (thread->last_status.kind
4563 == TARGET_WAITKIND_STOPPED)
4564 ? "stopped"
4565 : "stopping",
4566 lwpid_of (thread));
4567
4568 continue;
4569 }
4570
4571 lwp->resume = &r->resume[ndx];
4572 thread->last_resume_kind = lwp->resume->kind;
4573
4574 lwp->step_range_start = lwp->resume->step_range_start;
4575 lwp->step_range_end = lwp->resume->step_range_end;
4576
4577 /* If we had a deferred signal to report, dequeue one now.
4578 This can happen if LWP gets more than one signal while
4579 trying to get out of a jump pad. */
4580 if (lwp->stopped
4581 && !lwp->status_pending_p
4582 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4583 {
4584 lwp->status_pending_p = 1;
4585
4586 if (debug_threads)
4587 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4588 "leaving status pending.\n",
4589 WSTOPSIG (lwp->status_pending),
4590 lwpid_of (thread));
4591 }
4592
4593 return 0;
4594 }
4595 }
4596
4597 /* No resume action for this thread. */
4598 lwp->resume = NULL;
4599
4600 return 0;
4601 }
4602
4603 /* find_inferior callback for linux_resume.
4604 Set *FLAG_P if this lwp has an interesting status pending. */
4605
4606 static int
4607 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4608 {
4609 struct thread_info *thread = (struct thread_info *) entry;
4610 struct lwp_info *lwp = get_thread_lwp (thread);
4611
4612 /* LWPs which will not be resumed are not interesting, because
4613 we might not wait for them next time through linux_wait. */
4614 if (lwp->resume == NULL)
4615 return 0;
4616
4617 if (thread_still_has_status_pending_p (thread))
4618 * (int *) flag_p = 1;
4619
4620 return 0;
4621 }
4622
4623 /* Return 1 if this lwp that GDB wants running is stopped at an
4624 internal breakpoint that we need to step over. It assumes that any
4625 required STOP_PC adjustment has already been propagated to the
4626 inferior's regcache. */
4627
4628 static int
4629 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4630 {
4631 struct thread_info *thread = (struct thread_info *) entry;
4632 struct lwp_info *lwp = get_thread_lwp (thread);
4633 struct thread_info *saved_thread;
4634 CORE_ADDR pc;
4635 struct process_info *proc = get_thread_process (thread);
4636
4637 /* GDBserver is skipping the extra traps from the wrapper program,
4638 don't have to do step over. */
4639 if (proc->tdesc == NULL)
4640 return 0;
4641
4642 /* LWPs which will not be resumed are not interesting, because we
4643 might not wait for them next time through linux_wait. */
4644
4645 if (!lwp->stopped)
4646 {
4647 if (debug_threads)
4648 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4649 lwpid_of (thread));
4650 return 0;
4651 }
4652
4653 if (thread->last_resume_kind == resume_stop)
4654 {
4655 if (debug_threads)
4656 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4657 " stopped\n",
4658 lwpid_of (thread));
4659 return 0;
4660 }
4661
4662 gdb_assert (lwp->suspended >= 0);
4663
4664 if (lwp->suspended)
4665 {
4666 if (debug_threads)
4667 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4668 lwpid_of (thread));
4669 return 0;
4670 }
4671
4672 if (lwp->status_pending_p)
4673 {
4674 if (debug_threads)
4675 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4676 " status.\n",
4677 lwpid_of (thread));
4678 return 0;
4679 }
4680
4681 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4682 or we have. */
4683 pc = get_pc (lwp);
4684
4685 /* If the PC has changed since we stopped, then don't do anything,
4686 and let the breakpoint/tracepoint be hit. This happens if, for
4687 instance, GDB handled the decr_pc_after_break subtraction itself,
4688 GDB is OOL stepping this thread, or the user has issued a "jump"
4689 command, or poked thread's registers herself. */
4690 if (pc != lwp->stop_pc)
4691 {
4692 if (debug_threads)
4693 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4694 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4695 lwpid_of (thread),
4696 paddress (lwp->stop_pc), paddress (pc));
4697 return 0;
4698 }
4699
4700 /* On software single step target, resume the inferior with signal
4701 rather than stepping over. */
4702 if (can_software_single_step ()
4703 && lwp->pending_signals != NULL
4704 && lwp_signal_can_be_delivered (lwp))
4705 {
4706 if (debug_threads)
4707 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4708 " signals.\n",
4709 lwpid_of (thread));
4710
4711 return 0;
4712 }
4713
4714 saved_thread = current_thread;
4715 current_thread = thread;
4716
4717 /* We can only step over breakpoints we know about. */
4718 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4719 {
4720 /* Don't step over a breakpoint that GDB expects to hit
4721 though. If the condition is being evaluated on the target's side
4722 and it evaluate to false, step over this breakpoint as well. */
4723 if (gdb_breakpoint_here (pc)
4724 && gdb_condition_true_at_breakpoint (pc)
4725 && gdb_no_commands_at_breakpoint (pc))
4726 {
4727 if (debug_threads)
4728 debug_printf ("Need step over [LWP %ld]? yes, but found"
4729 " GDB breakpoint at 0x%s; skipping step over\n",
4730 lwpid_of (thread), paddress (pc));
4731
4732 current_thread = saved_thread;
4733 return 0;
4734 }
4735 else
4736 {
4737 if (debug_threads)
4738 debug_printf ("Need step over [LWP %ld]? yes, "
4739 "found breakpoint at 0x%s\n",
4740 lwpid_of (thread), paddress (pc));
4741
4742 /* We've found an lwp that needs stepping over --- return 1 so
4743 that find_inferior stops looking. */
4744 current_thread = saved_thread;
4745
4746 return 1;
4747 }
4748 }
4749
4750 current_thread = saved_thread;
4751
4752 if (debug_threads)
4753 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4754 " at 0x%s\n",
4755 lwpid_of (thread), paddress (pc));
4756
4757 return 0;
4758 }
4759
4760 /* Start a step-over operation on LWP. When LWP stopped at a
4761 breakpoint, to make progress, we need to remove the breakpoint out
4762 of the way. If we let other threads run while we do that, they may
4763 pass by the breakpoint location and miss hitting it. To avoid
4764 that, a step-over momentarily stops all threads while LWP is
4765 single-stepped by either hardware or software while the breakpoint
4766 is temporarily uninserted from the inferior. When the single-step
4767 finishes, we reinsert the breakpoint, and let all threads that are
4768 supposed to be running, run again. */
4769
4770 static int
4771 start_step_over (struct lwp_info *lwp)
4772 {
4773 struct thread_info *thread = get_lwp_thread (lwp);
4774 struct thread_info *saved_thread;
4775 CORE_ADDR pc;
4776 int step;
4777
4778 if (debug_threads)
4779 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4780 lwpid_of (thread));
4781
4782 stop_all_lwps (1, lwp);
4783
4784 if (lwp->suspended != 0)
4785 {
4786 internal_error (__FILE__, __LINE__,
4787 "LWP %ld suspended=%d\n", lwpid_of (thread),
4788 lwp->suspended);
4789 }
4790
4791 if (debug_threads)
4792 debug_printf ("Done stopping all threads for step-over.\n");
4793
4794 /* Note, we should always reach here with an already adjusted PC,
4795 either by GDB (if we're resuming due to GDB's request), or by our
4796 caller, if we just finished handling an internal breakpoint GDB
4797 shouldn't care about. */
4798 pc = get_pc (lwp);
4799
4800 saved_thread = current_thread;
4801 current_thread = thread;
4802
4803 lwp->bp_reinsert = pc;
4804 uninsert_breakpoints_at (pc);
4805 uninsert_fast_tracepoint_jumps_at (pc);
4806
4807 step = single_step (lwp);
4808
4809 current_thread = saved_thread;
4810
4811 linux_resume_one_lwp (lwp, step, 0, NULL);
4812
4813 /* Require next event from this LWP. */
4814 step_over_bkpt = thread->entry.id;
4815 return 1;
4816 }
4817
4818 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4819 start_step_over, if still there, and delete any reinsert
4820 breakpoints we've set, on non hardware single-step targets. */
4821
4822 static int
4823 finish_step_over (struct lwp_info *lwp)
4824 {
4825 if (lwp->bp_reinsert != 0)
4826 {
4827 struct thread_info *saved_thread = current_thread;
4828
4829 if (debug_threads)
4830 debug_printf ("Finished step over.\n");
4831
4832 current_thread = get_lwp_thread (lwp);
4833
4834 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4835 may be no breakpoint to reinsert there by now. */
4836 reinsert_breakpoints_at (lwp->bp_reinsert);
4837 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4838
4839 lwp->bp_reinsert = 0;
4840
4841 /* Delete any software-single-step reinsert breakpoints. No
4842 longer needed. We don't have to worry about other threads
4843 hitting this trap, and later not being able to explain it,
4844 because we were stepping over a breakpoint, and we hold all
4845 threads but LWP stopped while doing that. */
4846 if (!can_hardware_single_step ())
4847 {
4848 gdb_assert (has_reinsert_breakpoints (current_thread));
4849 delete_reinsert_breakpoints (current_thread);
4850 }
4851
4852 step_over_bkpt = null_ptid;
4853 current_thread = saved_thread;
4854 return 1;
4855 }
4856 else
4857 return 0;
4858 }
4859
4860 /* If there's a step over in progress, wait until all threads stop
4861 (that is, until the stepping thread finishes its step), and
4862 unsuspend all lwps. The stepping thread ends with its status
4863 pending, which is processed later when we get back to processing
4864 events. */
4865
4866 static void
4867 complete_ongoing_step_over (void)
4868 {
4869 if (!ptid_equal (step_over_bkpt, null_ptid))
4870 {
4871 struct lwp_info *lwp;
4872 int wstat;
4873 int ret;
4874
4875 if (debug_threads)
4876 debug_printf ("detach: step over in progress, finish it first\n");
4877
4878 /* Passing NULL_PTID as filter indicates we want all events to
4879 be left pending. Eventually this returns when there are no
4880 unwaited-for children left. */
4881 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4882 &wstat, __WALL);
4883 gdb_assert (ret == -1);
4884
4885 lwp = find_lwp_pid (step_over_bkpt);
4886 if (lwp != NULL)
4887 finish_step_over (lwp);
4888 step_over_bkpt = null_ptid;
4889 unsuspend_all_lwps (lwp);
4890 }
4891 }
4892
4893 /* This function is called once per thread. We check the thread's resume
4894 request, which will tell us whether to resume, step, or leave the thread
4895 stopped; and what signal, if any, it should be sent.
4896
4897 For threads which we aren't explicitly told otherwise, we preserve
4898 the stepping flag; this is used for stepping over gdbserver-placed
4899 breakpoints.
4900
4901 If pending_flags was set in any thread, we queue any needed
4902 signals, since we won't actually resume. We already have a pending
4903 event to report, so we don't need to preserve any step requests;
4904 they should be re-issued if necessary. */
4905
4906 static int
4907 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4908 {
4909 struct thread_info *thread = (struct thread_info *) entry;
4910 struct lwp_info *lwp = get_thread_lwp (thread);
4911 int step;
4912 int leave_all_stopped = * (int *) arg;
4913 int leave_pending;
4914
4915 if (lwp->resume == NULL)
4916 return 0;
4917
4918 if (lwp->resume->kind == resume_stop)
4919 {
4920 if (debug_threads)
4921 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4922
4923 if (!lwp->stopped)
4924 {
4925 if (debug_threads)
4926 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4927
4928 /* Stop the thread, and wait for the event asynchronously,
4929 through the event loop. */
4930 send_sigstop (lwp);
4931 }
4932 else
4933 {
4934 if (debug_threads)
4935 debug_printf ("already stopped LWP %ld\n",
4936 lwpid_of (thread));
4937
4938 /* The LWP may have been stopped in an internal event that
4939 was not meant to be notified back to GDB (e.g., gdbserver
4940 breakpoint), so we should be reporting a stop event in
4941 this case too. */
4942
4943 /* If the thread already has a pending SIGSTOP, this is a
4944 no-op. Otherwise, something later will presumably resume
4945 the thread and this will cause it to cancel any pending
4946 operation, due to last_resume_kind == resume_stop. If
4947 the thread already has a pending status to report, we
4948 will still report it the next time we wait - see
4949 status_pending_p_callback. */
4950
4951 /* If we already have a pending signal to report, then
4952 there's no need to queue a SIGSTOP, as this means we're
4953 midway through moving the LWP out of the jumppad, and we
4954 will report the pending signal as soon as that is
4955 finished. */
4956 if (lwp->pending_signals_to_report == NULL)
4957 send_sigstop (lwp);
4958 }
4959
4960 /* For stop requests, we're done. */
4961 lwp->resume = NULL;
4962 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4963 return 0;
4964 }
4965
4966 /* If this thread which is about to be resumed has a pending status,
4967 then don't resume it - we can just report the pending status.
4968 Likewise if it is suspended, because e.g., another thread is
4969 stepping past a breakpoint. Make sure to queue any signals that
4970 would otherwise be sent. In all-stop mode, we do this decision
4971 based on if *any* thread has a pending status. If there's a
4972 thread that needs the step-over-breakpoint dance, then don't
4973 resume any other thread but that particular one. */
4974 leave_pending = (lwp->suspended
4975 || lwp->status_pending_p
4976 || leave_all_stopped);
4977
4978 if (!leave_pending)
4979 {
4980 if (debug_threads)
4981 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4982
4983 step = (lwp->resume->kind == resume_step);
4984 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4985 }
4986 else
4987 {
4988 if (debug_threads)
4989 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4990
4991 /* If we have a new signal, enqueue the signal. */
4992 if (lwp->resume->sig != 0)
4993 {
4994 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4995
4996 p_sig->prev = lwp->pending_signals;
4997 p_sig->signal = lwp->resume->sig;
4998
4999 /* If this is the same signal we were previously stopped by,
5000 make sure to queue its siginfo. We can ignore the return
5001 value of ptrace; if it fails, we'll skip
5002 PTRACE_SETSIGINFO. */
5003 if (WIFSTOPPED (lwp->last_status)
5004 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
5005 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
5006 &p_sig->info);
5007
5008 lwp->pending_signals = p_sig;
5009 }
5010 }
5011
5012 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5013 lwp->resume = NULL;
5014 return 0;
5015 }
5016
5017 static void
5018 linux_resume (struct thread_resume *resume_info, size_t n)
5019 {
5020 struct thread_resume_array array = { resume_info, n };
5021 struct thread_info *need_step_over = NULL;
5022 int any_pending;
5023 int leave_all_stopped;
5024
5025 if (debug_threads)
5026 {
5027 debug_enter ();
5028 debug_printf ("linux_resume:\n");
5029 }
5030
5031 find_inferior (&all_threads, linux_set_resume_request, &array);
5032
5033 /* If there is a thread which would otherwise be resumed, which has
5034 a pending status, then don't resume any threads - we can just
5035 report the pending status. Make sure to queue any signals that
5036 would otherwise be sent. In non-stop mode, we'll apply this
5037 logic to each thread individually. We consume all pending events
5038 before considering to start a step-over (in all-stop). */
5039 any_pending = 0;
5040 if (!non_stop)
5041 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
5042
5043 /* If there is a thread which would otherwise be resumed, which is
5044 stopped at a breakpoint that needs stepping over, then don't
5045 resume any threads - have it step over the breakpoint with all
5046 other threads stopped, then resume all threads again. Make sure
5047 to queue any signals that would otherwise be delivered or
5048 queued. */
5049 if (!any_pending && supports_breakpoints ())
5050 need_step_over
5051 = (struct thread_info *) find_inferior (&all_threads,
5052 need_step_over_p, NULL);
5053
5054 leave_all_stopped = (need_step_over != NULL || any_pending);
5055
5056 if (debug_threads)
5057 {
5058 if (need_step_over != NULL)
5059 debug_printf ("Not resuming all, need step over\n");
5060 else if (any_pending)
5061 debug_printf ("Not resuming, all-stop and found "
5062 "an LWP with pending status\n");
5063 else
5064 debug_printf ("Resuming, no pending status or step over needed\n");
5065 }
5066
5067 /* Even if we're leaving threads stopped, queue all signals we'd
5068 otherwise deliver. */
5069 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5070
5071 if (need_step_over)
5072 start_step_over (get_thread_lwp (need_step_over));
5073
5074 if (debug_threads)
5075 {
5076 debug_printf ("linux_resume done\n");
5077 debug_exit ();
5078 }
5079
5080 /* We may have events that were pending that can/should be sent to
5081 the client now. Trigger a linux_wait call. */
5082 if (target_is_async_p ())
5083 async_file_mark ();
5084 }
5085
5086 /* This function is called once per thread. We check the thread's
5087 last resume request, which will tell us whether to resume, step, or
5088 leave the thread stopped. Any signal the client requested to be
5089 delivered has already been enqueued at this point.
5090
5091 If any thread that GDB wants running is stopped at an internal
5092 breakpoint that needs stepping over, we start a step-over operation
5093 on that particular thread, and leave all others stopped. */
5094
5095 static int
5096 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5097 {
5098 struct thread_info *thread = (struct thread_info *) entry;
5099 struct lwp_info *lwp = get_thread_lwp (thread);
5100 int step;
5101
5102 if (lwp == except)
5103 return 0;
5104
5105 if (debug_threads)
5106 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5107
5108 if (!lwp->stopped)
5109 {
5110 if (debug_threads)
5111 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5112 return 0;
5113 }
5114
5115 if (thread->last_resume_kind == resume_stop
5116 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5117 {
5118 if (debug_threads)
5119 debug_printf (" client wants LWP to remain %ld stopped\n",
5120 lwpid_of (thread));
5121 return 0;
5122 }
5123
5124 if (lwp->status_pending_p)
5125 {
5126 if (debug_threads)
5127 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5128 lwpid_of (thread));
5129 return 0;
5130 }
5131
5132 gdb_assert (lwp->suspended >= 0);
5133
5134 if (lwp->suspended)
5135 {
5136 if (debug_threads)
5137 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5138 return 0;
5139 }
5140
5141 if (thread->last_resume_kind == resume_stop
5142 && lwp->pending_signals_to_report == NULL
5143 && lwp->collecting_fast_tracepoint == 0)
5144 {
5145 /* We haven't reported this LWP as stopped yet (otherwise, the
5146 last_status.kind check above would catch it, and we wouldn't
5147 reach here. This LWP may have been momentarily paused by a
5148 stop_all_lwps call while handling for example, another LWP's
5149 step-over. In that case, the pending expected SIGSTOP signal
5150 that was queued at vCont;t handling time will have already
5151 been consumed by wait_for_sigstop, and so we need to requeue
5152 another one here. Note that if the LWP already has a SIGSTOP
5153 pending, this is a no-op. */
5154
5155 if (debug_threads)
5156 debug_printf ("Client wants LWP %ld to stop. "
5157 "Making sure it has a SIGSTOP pending\n",
5158 lwpid_of (thread));
5159
5160 send_sigstop (lwp);
5161 }
5162
5163 if (thread->last_resume_kind == resume_step)
5164 {
5165 if (debug_threads)
5166 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5167 lwpid_of (thread));
5168 step = 1;
5169 }
5170 else if (lwp->bp_reinsert != 0)
5171 {
5172 if (debug_threads)
5173 debug_printf (" stepping LWP %ld, reinsert set\n",
5174 lwpid_of (thread));
5175
5176 step = maybe_hw_step (thread);
5177 }
5178 else
5179 step = 0;
5180
5181 linux_resume_one_lwp (lwp, step, 0, NULL);
5182 return 0;
5183 }
5184
5185 static int
5186 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5187 {
5188 struct thread_info *thread = (struct thread_info *) entry;
5189 struct lwp_info *lwp = get_thread_lwp (thread);
5190
5191 if (lwp == except)
5192 return 0;
5193
5194 lwp_suspended_decr (lwp);
5195
5196 return proceed_one_lwp (entry, except);
5197 }
5198
5199 /* When we finish a step-over, set threads running again. If there's
5200 another thread that may need a step-over, now's the time to start
5201 it. Eventually, we'll move all threads past their breakpoints. */
5202
5203 static void
5204 proceed_all_lwps (void)
5205 {
5206 struct thread_info *need_step_over;
5207
5208 /* If there is a thread which would otherwise be resumed, which is
5209 stopped at a breakpoint that needs stepping over, then don't
5210 resume any threads - have it step over the breakpoint with all
5211 other threads stopped, then resume all threads again. */
5212
5213 if (supports_breakpoints ())
5214 {
5215 need_step_over
5216 = (struct thread_info *) find_inferior (&all_threads,
5217 need_step_over_p, NULL);
5218
5219 if (need_step_over != NULL)
5220 {
5221 if (debug_threads)
5222 debug_printf ("proceed_all_lwps: found "
5223 "thread %ld needing a step-over\n",
5224 lwpid_of (need_step_over));
5225
5226 start_step_over (get_thread_lwp (need_step_over));
5227 return;
5228 }
5229 }
5230
5231 if (debug_threads)
5232 debug_printf ("Proceeding, no step-over needed\n");
5233
5234 find_inferior (&all_threads, proceed_one_lwp, NULL);
5235 }
5236
5237 /* Stopped LWPs that the client wanted to be running, that don't have
5238 pending statuses, are set to run again, except for EXCEPT, if not
5239 NULL. This undoes a stop_all_lwps call. */
5240
5241 static void
5242 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5243 {
5244 if (debug_threads)
5245 {
5246 debug_enter ();
5247 if (except)
5248 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5249 lwpid_of (get_lwp_thread (except)));
5250 else
5251 debug_printf ("unstopping all lwps\n");
5252 }
5253
5254 if (unsuspend)
5255 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5256 else
5257 find_inferior (&all_threads, proceed_one_lwp, except);
5258
5259 if (debug_threads)
5260 {
5261 debug_printf ("unstop_all_lwps done\n");
5262 debug_exit ();
5263 }
5264 }
5265
5266
5267 #ifdef HAVE_LINUX_REGSETS
5268
5269 #define use_linux_regsets 1
5270
5271 /* Returns true if REGSET has been disabled. */
5272
5273 static int
5274 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5275 {
5276 return (info->disabled_regsets != NULL
5277 && info->disabled_regsets[regset - info->regsets]);
5278 }
5279
5280 /* Disable REGSET. */
5281
5282 static void
5283 disable_regset (struct regsets_info *info, struct regset_info *regset)
5284 {
5285 int dr_offset;
5286
5287 dr_offset = regset - info->regsets;
5288 if (info->disabled_regsets == NULL)
5289 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5290 info->disabled_regsets[dr_offset] = 1;
5291 }
5292
5293 static int
5294 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5295 struct regcache *regcache)
5296 {
5297 struct regset_info *regset;
5298 int saw_general_regs = 0;
5299 int pid;
5300 struct iovec iov;
5301
5302 pid = lwpid_of (current_thread);
5303 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5304 {
5305 void *buf, *data;
5306 int nt_type, res;
5307
5308 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5309 continue;
5310
5311 buf = xmalloc (regset->size);
5312
5313 nt_type = regset->nt_type;
5314 if (nt_type)
5315 {
5316 iov.iov_base = buf;
5317 iov.iov_len = regset->size;
5318 data = (void *) &iov;
5319 }
5320 else
5321 data = buf;
5322
5323 #ifndef __sparc__
5324 res = ptrace (regset->get_request, pid,
5325 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5326 #else
5327 res = ptrace (regset->get_request, pid, data, nt_type);
5328 #endif
5329 if (res < 0)
5330 {
5331 if (errno == EIO)
5332 {
5333 /* If we get EIO on a regset, do not try it again for
5334 this process mode. */
5335 disable_regset (regsets_info, regset);
5336 }
5337 else if (errno == ENODATA)
5338 {
5339 /* ENODATA may be returned if the regset is currently
5340 not "active". This can happen in normal operation,
5341 so suppress the warning in this case. */
5342 }
5343 else
5344 {
5345 char s[256];
5346 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5347 pid);
5348 perror (s);
5349 }
5350 }
5351 else
5352 {
5353 if (regset->type == GENERAL_REGS)
5354 saw_general_regs = 1;
5355 regset->store_function (regcache, buf);
5356 }
5357 free (buf);
5358 }
5359 if (saw_general_regs)
5360 return 0;
5361 else
5362 return 1;
5363 }
5364
5365 static int
5366 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5367 struct regcache *regcache)
5368 {
5369 struct regset_info *regset;
5370 int saw_general_regs = 0;
5371 int pid;
5372 struct iovec iov;
5373
5374 pid = lwpid_of (current_thread);
5375 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5376 {
5377 void *buf, *data;
5378 int nt_type, res;
5379
5380 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5381 || regset->fill_function == NULL)
5382 continue;
5383
5384 buf = xmalloc (regset->size);
5385
5386 /* First fill the buffer with the current register set contents,
5387 in case there are any items in the kernel's regset that are
5388 not in gdbserver's regcache. */
5389
5390 nt_type = regset->nt_type;
5391 if (nt_type)
5392 {
5393 iov.iov_base = buf;
5394 iov.iov_len = regset->size;
5395 data = (void *) &iov;
5396 }
5397 else
5398 data = buf;
5399
5400 #ifndef __sparc__
5401 res = ptrace (regset->get_request, pid,
5402 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5403 #else
5404 res = ptrace (regset->get_request, pid, data, nt_type);
5405 #endif
5406
5407 if (res == 0)
5408 {
5409 /* Then overlay our cached registers on that. */
5410 regset->fill_function (regcache, buf);
5411
5412 /* Only now do we write the register set. */
5413 #ifndef __sparc__
5414 res = ptrace (regset->set_request, pid,
5415 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5416 #else
5417 res = ptrace (regset->set_request, pid, data, nt_type);
5418 #endif
5419 }
5420
5421 if (res < 0)
5422 {
5423 if (errno == EIO)
5424 {
5425 /* If we get EIO on a regset, do not try it again for
5426 this process mode. */
5427 disable_regset (regsets_info, regset);
5428 }
5429 else if (errno == ESRCH)
5430 {
5431 /* At this point, ESRCH should mean the process is
5432 already gone, in which case we simply ignore attempts
5433 to change its registers. See also the related
5434 comment in linux_resume_one_lwp. */
5435 free (buf);
5436 return 0;
5437 }
5438 else
5439 {
5440 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5441 }
5442 }
5443 else if (regset->type == GENERAL_REGS)
5444 saw_general_regs = 1;
5445 free (buf);
5446 }
5447 if (saw_general_regs)
5448 return 0;
5449 else
5450 return 1;
5451 }
5452
5453 #else /* !HAVE_LINUX_REGSETS */
5454
5455 #define use_linux_regsets 0
5456 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5457 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5458
5459 #endif
5460
5461 /* Return 1 if register REGNO is supported by one of the regset ptrace
5462 calls or 0 if it has to be transferred individually. */
5463
5464 static int
5465 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5466 {
5467 unsigned char mask = 1 << (regno % 8);
5468 size_t index = regno / 8;
5469
5470 return (use_linux_regsets
5471 && (regs_info->regset_bitmap == NULL
5472 || (regs_info->regset_bitmap[index] & mask) != 0));
5473 }
5474
5475 #ifdef HAVE_LINUX_USRREGS
5476
5477 static int
5478 register_addr (const struct usrregs_info *usrregs, int regnum)
5479 {
5480 int addr;
5481
5482 if (regnum < 0 || regnum >= usrregs->num_regs)
5483 error ("Invalid register number %d.", regnum);
5484
5485 addr = usrregs->regmap[regnum];
5486
5487 return addr;
5488 }
5489
5490 /* Fetch one register. */
5491 static void
5492 fetch_register (const struct usrregs_info *usrregs,
5493 struct regcache *regcache, int regno)
5494 {
5495 CORE_ADDR regaddr;
5496 int i, size;
5497 char *buf;
5498 int pid;
5499
5500 if (regno >= usrregs->num_regs)
5501 return;
5502 if ((*the_low_target.cannot_fetch_register) (regno))
5503 return;
5504
5505 regaddr = register_addr (usrregs, regno);
5506 if (regaddr == -1)
5507 return;
5508
5509 size = ((register_size (regcache->tdesc, regno)
5510 + sizeof (PTRACE_XFER_TYPE) - 1)
5511 & -sizeof (PTRACE_XFER_TYPE));
5512 buf = (char *) alloca (size);
5513
5514 pid = lwpid_of (current_thread);
5515 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5516 {
5517 errno = 0;
5518 *(PTRACE_XFER_TYPE *) (buf + i) =
5519 ptrace (PTRACE_PEEKUSER, pid,
5520 /* Coerce to a uintptr_t first to avoid potential gcc warning
5521 of coercing an 8 byte integer to a 4 byte pointer. */
5522 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5523 regaddr += sizeof (PTRACE_XFER_TYPE);
5524 if (errno != 0)
5525 error ("reading register %d: %s", regno, strerror (errno));
5526 }
5527
5528 if (the_low_target.supply_ptrace_register)
5529 the_low_target.supply_ptrace_register (regcache, regno, buf);
5530 else
5531 supply_register (regcache, regno, buf);
5532 }
5533
5534 /* Store one register. */
5535 static void
5536 store_register (const struct usrregs_info *usrregs,
5537 struct regcache *regcache, int regno)
5538 {
5539 CORE_ADDR regaddr;
5540 int i, size;
5541 char *buf;
5542 int pid;
5543
5544 if (regno >= usrregs->num_regs)
5545 return;
5546 if ((*the_low_target.cannot_store_register) (regno))
5547 return;
5548
5549 regaddr = register_addr (usrregs, regno);
5550 if (regaddr == -1)
5551 return;
5552
5553 size = ((register_size (regcache->tdesc, regno)
5554 + sizeof (PTRACE_XFER_TYPE) - 1)
5555 & -sizeof (PTRACE_XFER_TYPE));
5556 buf = (char *) alloca (size);
5557 memset (buf, 0, size);
5558
5559 if (the_low_target.collect_ptrace_register)
5560 the_low_target.collect_ptrace_register (regcache, regno, buf);
5561 else
5562 collect_register (regcache, regno, buf);
5563
5564 pid = lwpid_of (current_thread);
5565 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5566 {
5567 errno = 0;
5568 ptrace (PTRACE_POKEUSER, pid,
5569 /* Coerce to a uintptr_t first to avoid potential gcc warning
5570 about coercing an 8 byte integer to a 4 byte pointer. */
5571 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5572 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5573 if (errno != 0)
5574 {
5575 /* At this point, ESRCH should mean the process is
5576 already gone, in which case we simply ignore attempts
5577 to change its registers. See also the related
5578 comment in linux_resume_one_lwp. */
5579 if (errno == ESRCH)
5580 return;
5581
5582 if ((*the_low_target.cannot_store_register) (regno) == 0)
5583 error ("writing register %d: %s", regno, strerror (errno));
5584 }
5585 regaddr += sizeof (PTRACE_XFER_TYPE);
5586 }
5587 }
5588
5589 /* Fetch all registers, or just one, from the child process.
5590 If REGNO is -1, do this for all registers, skipping any that are
5591 assumed to have been retrieved by regsets_fetch_inferior_registers,
5592 unless ALL is non-zero.
5593 Otherwise, REGNO specifies which register (so we can save time). */
5594 static void
5595 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5596 struct regcache *regcache, int regno, int all)
5597 {
5598 struct usrregs_info *usr = regs_info->usrregs;
5599
5600 if (regno == -1)
5601 {
5602 for (regno = 0; regno < usr->num_regs; regno++)
5603 if (all || !linux_register_in_regsets (regs_info, regno))
5604 fetch_register (usr, regcache, regno);
5605 }
5606 else
5607 fetch_register (usr, regcache, regno);
5608 }
5609
5610 /* Store our register values back into the inferior.
5611 If REGNO is -1, do this for all registers, skipping any that are
5612 assumed to have been saved by regsets_store_inferior_registers,
5613 unless ALL is non-zero.
5614 Otherwise, REGNO specifies which register (so we can save time). */
5615 static void
5616 usr_store_inferior_registers (const struct regs_info *regs_info,
5617 struct regcache *regcache, int regno, int all)
5618 {
5619 struct usrregs_info *usr = regs_info->usrregs;
5620
5621 if (regno == -1)
5622 {
5623 for (regno = 0; regno < usr->num_regs; regno++)
5624 if (all || !linux_register_in_regsets (regs_info, regno))
5625 store_register (usr, regcache, regno);
5626 }
5627 else
5628 store_register (usr, regcache, regno);
5629 }
5630
5631 #else /* !HAVE_LINUX_USRREGS */
5632
5633 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5634 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5635
5636 #endif
5637
5638
5639 static void
5640 linux_fetch_registers (struct regcache *regcache, int regno)
5641 {
5642 int use_regsets;
5643 int all = 0;
5644 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5645
5646 if (regno == -1)
5647 {
5648 if (the_low_target.fetch_register != NULL
5649 && regs_info->usrregs != NULL)
5650 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5651 (*the_low_target.fetch_register) (regcache, regno);
5652
5653 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5654 if (regs_info->usrregs != NULL)
5655 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5656 }
5657 else
5658 {
5659 if (the_low_target.fetch_register != NULL
5660 && (*the_low_target.fetch_register) (regcache, regno))
5661 return;
5662
5663 use_regsets = linux_register_in_regsets (regs_info, regno);
5664 if (use_regsets)
5665 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5666 regcache);
5667 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5668 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5669 }
5670 }
5671
5672 static void
5673 linux_store_registers (struct regcache *regcache, int regno)
5674 {
5675 int use_regsets;
5676 int all = 0;
5677 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5678
5679 if (regno == -1)
5680 {
5681 all = regsets_store_inferior_registers (regs_info->regsets_info,
5682 regcache);
5683 if (regs_info->usrregs != NULL)
5684 usr_store_inferior_registers (regs_info, regcache, regno, all);
5685 }
5686 else
5687 {
5688 use_regsets = linux_register_in_regsets (regs_info, regno);
5689 if (use_regsets)
5690 all = regsets_store_inferior_registers (regs_info->regsets_info,
5691 regcache);
5692 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5693 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5694 }
5695 }
5696
5697
5698 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5699 to debugger memory starting at MYADDR. */
5700
5701 static int
5702 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5703 {
5704 int pid = lwpid_of (current_thread);
5705 register PTRACE_XFER_TYPE *buffer;
5706 register CORE_ADDR addr;
5707 register int count;
5708 char filename[64];
5709 register int i;
5710 int ret;
5711 int fd;
5712
5713 /* Try using /proc. Don't bother for one word. */
5714 if (len >= 3 * sizeof (long))
5715 {
5716 int bytes;
5717
5718 /* We could keep this file open and cache it - possibly one per
5719 thread. That requires some juggling, but is even faster. */
5720 sprintf (filename, "/proc/%d/mem", pid);
5721 fd = open (filename, O_RDONLY | O_LARGEFILE);
5722 if (fd == -1)
5723 goto no_proc;
5724
5725 /* If pread64 is available, use it. It's faster if the kernel
5726 supports it (only one syscall), and it's 64-bit safe even on
5727 32-bit platforms (for instance, SPARC debugging a SPARC64
5728 application). */
5729 #ifdef HAVE_PREAD64
5730 bytes = pread64 (fd, myaddr, len, memaddr);
5731 #else
5732 bytes = -1;
5733 if (lseek (fd, memaddr, SEEK_SET) != -1)
5734 bytes = read (fd, myaddr, len);
5735 #endif
5736
5737 close (fd);
5738 if (bytes == len)
5739 return 0;
5740
5741 /* Some data was read, we'll try to get the rest with ptrace. */
5742 if (bytes > 0)
5743 {
5744 memaddr += bytes;
5745 myaddr += bytes;
5746 len -= bytes;
5747 }
5748 }
5749
5750 no_proc:
5751 /* Round starting address down to longword boundary. */
5752 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5753 /* Round ending address up; get number of longwords that makes. */
5754 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5755 / sizeof (PTRACE_XFER_TYPE));
5756 /* Allocate buffer of that many longwords. */
5757 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5758
5759 /* Read all the longwords */
5760 errno = 0;
5761 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5762 {
5763 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5764 about coercing an 8 byte integer to a 4 byte pointer. */
5765 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5766 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5767 (PTRACE_TYPE_ARG4) 0);
5768 if (errno)
5769 break;
5770 }
5771 ret = errno;
5772
5773 /* Copy appropriate bytes out of the buffer. */
5774 if (i > 0)
5775 {
5776 i *= sizeof (PTRACE_XFER_TYPE);
5777 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5778 memcpy (myaddr,
5779 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5780 i < len ? i : len);
5781 }
5782
5783 return ret;
5784 }
5785
5786 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5787 memory at MEMADDR. On failure (cannot write to the inferior)
5788 returns the value of errno. Always succeeds if LEN is zero. */
5789
5790 static int
5791 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5792 {
5793 register int i;
5794 /* Round starting address down to longword boundary. */
5795 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5796 /* Round ending address up; get number of longwords that makes. */
5797 register int count
5798 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5799 / sizeof (PTRACE_XFER_TYPE);
5800
5801 /* Allocate buffer of that many longwords. */
5802 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5803
5804 int pid = lwpid_of (current_thread);
5805
5806 if (len == 0)
5807 {
5808 /* Zero length write always succeeds. */
5809 return 0;
5810 }
5811
5812 if (debug_threads)
5813 {
5814 /* Dump up to four bytes. */
5815 char str[4 * 2 + 1];
5816 char *p = str;
5817 int dump = len < 4 ? len : 4;
5818
5819 for (i = 0; i < dump; i++)
5820 {
5821 sprintf (p, "%02x", myaddr[i]);
5822 p += 2;
5823 }
5824 *p = '\0';
5825
5826 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5827 str, (long) memaddr, pid);
5828 }
5829
5830 /* Fill start and end extra bytes of buffer with existing memory data. */
5831
5832 errno = 0;
5833 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5834 about coercing an 8 byte integer to a 4 byte pointer. */
5835 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5836 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5837 (PTRACE_TYPE_ARG4) 0);
5838 if (errno)
5839 return errno;
5840
5841 if (count > 1)
5842 {
5843 errno = 0;
5844 buffer[count - 1]
5845 = ptrace (PTRACE_PEEKTEXT, pid,
5846 /* Coerce to a uintptr_t first to avoid potential gcc warning
5847 about coercing an 8 byte integer to a 4 byte pointer. */
5848 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5849 * sizeof (PTRACE_XFER_TYPE)),
5850 (PTRACE_TYPE_ARG4) 0);
5851 if (errno)
5852 return errno;
5853 }
5854
5855 /* Copy data to be written over corresponding part of buffer. */
5856
5857 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5858 myaddr, len);
5859
5860 /* Write the entire buffer. */
5861
5862 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5863 {
5864 errno = 0;
5865 ptrace (PTRACE_POKETEXT, pid,
5866 /* Coerce to a uintptr_t first to avoid potential gcc warning
5867 about coercing an 8 byte integer to a 4 byte pointer. */
5868 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5869 (PTRACE_TYPE_ARG4) buffer[i]);
5870 if (errno)
5871 return errno;
5872 }
5873
5874 return 0;
5875 }
5876
5877 static void
5878 linux_look_up_symbols (void)
5879 {
5880 #ifdef USE_THREAD_DB
5881 struct process_info *proc = current_process ();
5882
5883 if (proc->priv->thread_db != NULL)
5884 return;
5885
5886 thread_db_init ();
5887 #endif
5888 }
5889
5890 static void
5891 linux_request_interrupt (void)
5892 {
5893 extern unsigned long signal_pid;
5894
5895 /* Send a SIGINT to the process group. This acts just like the user
5896 typed a ^C on the controlling terminal. */
5897 kill (-signal_pid, SIGINT);
5898 }
5899
5900 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5901 to debugger memory starting at MYADDR. */
5902
5903 static int
5904 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5905 {
5906 char filename[PATH_MAX];
5907 int fd, n;
5908 int pid = lwpid_of (current_thread);
5909
5910 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5911
5912 fd = open (filename, O_RDONLY);
5913 if (fd < 0)
5914 return -1;
5915
5916 if (offset != (CORE_ADDR) 0
5917 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5918 n = -1;
5919 else
5920 n = read (fd, myaddr, len);
5921
5922 close (fd);
5923
5924 return n;
5925 }
5926
5927 /* These breakpoint and watchpoint related wrapper functions simply
5928 pass on the function call if the target has registered a
5929 corresponding function. */
5930
5931 static int
5932 linux_supports_z_point_type (char z_type)
5933 {
5934 return (the_low_target.supports_z_point_type != NULL
5935 && the_low_target.supports_z_point_type (z_type));
5936 }
5937
5938 static int
5939 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5940 int size, struct raw_breakpoint *bp)
5941 {
5942 if (type == raw_bkpt_type_sw)
5943 return insert_memory_breakpoint (bp);
5944 else if (the_low_target.insert_point != NULL)
5945 return the_low_target.insert_point (type, addr, size, bp);
5946 else
5947 /* Unsupported (see target.h). */
5948 return 1;
5949 }
5950
5951 static int
5952 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5953 int size, struct raw_breakpoint *bp)
5954 {
5955 if (type == raw_bkpt_type_sw)
5956 return remove_memory_breakpoint (bp);
5957 else if (the_low_target.remove_point != NULL)
5958 return the_low_target.remove_point (type, addr, size, bp);
5959 else
5960 /* Unsupported (see target.h). */
5961 return 1;
5962 }
5963
5964 /* Implement the to_stopped_by_sw_breakpoint target_ops
5965 method. */
5966
5967 static int
5968 linux_stopped_by_sw_breakpoint (void)
5969 {
5970 struct lwp_info *lwp = get_thread_lwp (current_thread);
5971
5972 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5973 }
5974
5975 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5976 method. */
5977
5978 static int
5979 linux_supports_stopped_by_sw_breakpoint (void)
5980 {
5981 return USE_SIGTRAP_SIGINFO;
5982 }
5983
5984 /* Implement the to_stopped_by_hw_breakpoint target_ops
5985 method. */
5986
5987 static int
5988 linux_stopped_by_hw_breakpoint (void)
5989 {
5990 struct lwp_info *lwp = get_thread_lwp (current_thread);
5991
5992 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5993 }
5994
5995 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5996 method. */
5997
5998 static int
5999 linux_supports_stopped_by_hw_breakpoint (void)
6000 {
6001 return USE_SIGTRAP_SIGINFO;
6002 }
6003
6004 /* Implement the supports_hardware_single_step target_ops method. */
6005
6006 static int
6007 linux_supports_hardware_single_step (void)
6008 {
6009 return can_hardware_single_step ();
6010 }
6011
6012 static int
6013 linux_supports_software_single_step (void)
6014 {
6015 return can_software_single_step ();
6016 }
6017
6018 static int
6019 linux_stopped_by_watchpoint (void)
6020 {
6021 struct lwp_info *lwp = get_thread_lwp (current_thread);
6022
6023 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6024 }
6025
6026 static CORE_ADDR
6027 linux_stopped_data_address (void)
6028 {
6029 struct lwp_info *lwp = get_thread_lwp (current_thread);
6030
6031 return lwp->stopped_data_address;
6032 }
6033
6034 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6035 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6036 && defined(PT_TEXT_END_ADDR)
6037
6038 /* This is only used for targets that define PT_TEXT_ADDR,
6039 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6040 the target has different ways of acquiring this information, like
6041 loadmaps. */
6042
6043 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6044 to tell gdb about. */
6045
6046 static int
6047 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6048 {
6049 unsigned long text, text_end, data;
6050 int pid = lwpid_of (current_thread);
6051
6052 errno = 0;
6053
6054 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6055 (PTRACE_TYPE_ARG4) 0);
6056 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6057 (PTRACE_TYPE_ARG4) 0);
6058 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6059 (PTRACE_TYPE_ARG4) 0);
6060
6061 if (errno == 0)
6062 {
6063 /* Both text and data offsets produced at compile-time (and so
6064 used by gdb) are relative to the beginning of the program,
6065 with the data segment immediately following the text segment.
6066 However, the actual runtime layout in memory may put the data
6067 somewhere else, so when we send gdb a data base-address, we
6068 use the real data base address and subtract the compile-time
6069 data base-address from it (which is just the length of the
6070 text segment). BSS immediately follows data in both
6071 cases. */
6072 *text_p = text;
6073 *data_p = data - (text_end - text);
6074
6075 return 1;
6076 }
6077 return 0;
6078 }
6079 #endif
6080
6081 static int
6082 linux_qxfer_osdata (const char *annex,
6083 unsigned char *readbuf, unsigned const char *writebuf,
6084 CORE_ADDR offset, int len)
6085 {
6086 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6087 }
6088
6089 /* Convert a native/host siginfo object, into/from the siginfo in the
6090 layout of the inferiors' architecture. */
6091
6092 static void
6093 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6094 {
6095 int done = 0;
6096
6097 if (the_low_target.siginfo_fixup != NULL)
6098 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6099
6100 /* If there was no callback, or the callback didn't do anything,
6101 then just do a straight memcpy. */
6102 if (!done)
6103 {
6104 if (direction == 1)
6105 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6106 else
6107 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6108 }
6109 }
6110
6111 static int
6112 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6113 unsigned const char *writebuf, CORE_ADDR offset, int len)
6114 {
6115 int pid;
6116 siginfo_t siginfo;
6117 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6118
6119 if (current_thread == NULL)
6120 return -1;
6121
6122 pid = lwpid_of (current_thread);
6123
6124 if (debug_threads)
6125 debug_printf ("%s siginfo for lwp %d.\n",
6126 readbuf != NULL ? "Reading" : "Writing",
6127 pid);
6128
6129 if (offset >= sizeof (siginfo))
6130 return -1;
6131
6132 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6133 return -1;
6134
6135 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6136 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6137 inferior with a 64-bit GDBSERVER should look the same as debugging it
6138 with a 32-bit GDBSERVER, we need to convert it. */
6139 siginfo_fixup (&siginfo, inf_siginfo, 0);
6140
6141 if (offset + len > sizeof (siginfo))
6142 len = sizeof (siginfo) - offset;
6143
6144 if (readbuf != NULL)
6145 memcpy (readbuf, inf_siginfo + offset, len);
6146 else
6147 {
6148 memcpy (inf_siginfo + offset, writebuf, len);
6149
6150 /* Convert back to ptrace layout before flushing it out. */
6151 siginfo_fixup (&siginfo, inf_siginfo, 1);
6152
6153 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6154 return -1;
6155 }
6156
6157 return len;
6158 }
6159
6160 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6161 so we notice when children change state; as the handler for the
6162 sigsuspend in my_waitpid. */
6163
6164 static void
6165 sigchld_handler (int signo)
6166 {
6167 int old_errno = errno;
6168
6169 if (debug_threads)
6170 {
6171 do
6172 {
6173 /* fprintf is not async-signal-safe, so call write
6174 directly. */
6175 if (write (2, "sigchld_handler\n",
6176 sizeof ("sigchld_handler\n") - 1) < 0)
6177 break; /* just ignore */
6178 } while (0);
6179 }
6180
6181 if (target_is_async_p ())
6182 async_file_mark (); /* trigger a linux_wait */
6183
6184 errno = old_errno;
6185 }
6186
6187 static int
6188 linux_supports_non_stop (void)
6189 {
6190 return 1;
6191 }
6192
6193 static int
6194 linux_async (int enable)
6195 {
6196 int previous = target_is_async_p ();
6197
6198 if (debug_threads)
6199 debug_printf ("linux_async (%d), previous=%d\n",
6200 enable, previous);
6201
6202 if (previous != enable)
6203 {
6204 sigset_t mask;
6205 sigemptyset (&mask);
6206 sigaddset (&mask, SIGCHLD);
6207
6208 sigprocmask (SIG_BLOCK, &mask, NULL);
6209
6210 if (enable)
6211 {
6212 if (pipe (linux_event_pipe) == -1)
6213 {
6214 linux_event_pipe[0] = -1;
6215 linux_event_pipe[1] = -1;
6216 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6217
6218 warning ("creating event pipe failed.");
6219 return previous;
6220 }
6221
6222 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6223 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6224
6225 /* Register the event loop handler. */
6226 add_file_handler (linux_event_pipe[0],
6227 handle_target_event, NULL);
6228
6229 /* Always trigger a linux_wait. */
6230 async_file_mark ();
6231 }
6232 else
6233 {
6234 delete_file_handler (linux_event_pipe[0]);
6235
6236 close (linux_event_pipe[0]);
6237 close (linux_event_pipe[1]);
6238 linux_event_pipe[0] = -1;
6239 linux_event_pipe[1] = -1;
6240 }
6241
6242 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6243 }
6244
6245 return previous;
6246 }
6247
6248 static int
6249 linux_start_non_stop (int nonstop)
6250 {
6251 /* Register or unregister from event-loop accordingly. */
6252 linux_async (nonstop);
6253
6254 if (target_is_async_p () != (nonstop != 0))
6255 return -1;
6256
6257 return 0;
6258 }
6259
6260 static int
6261 linux_supports_multi_process (void)
6262 {
6263 return 1;
6264 }
6265
6266 /* Check if fork events are supported. */
6267
6268 static int
6269 linux_supports_fork_events (void)
6270 {
6271 return linux_supports_tracefork ();
6272 }
6273
6274 /* Check if vfork events are supported. */
6275
6276 static int
6277 linux_supports_vfork_events (void)
6278 {
6279 return linux_supports_tracefork ();
6280 }
6281
6282 /* Check if exec events are supported. */
6283
6284 static int
6285 linux_supports_exec_events (void)
6286 {
6287 return linux_supports_traceexec ();
6288 }
6289
6290 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6291 options for the specified lwp. */
6292
6293 static int
6294 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6295 void *args)
6296 {
6297 struct thread_info *thread = (struct thread_info *) entry;
6298 struct lwp_info *lwp = get_thread_lwp (thread);
6299
6300 if (!lwp->stopped)
6301 {
6302 /* Stop the lwp so we can modify its ptrace options. */
6303 lwp->must_set_ptrace_flags = 1;
6304 linux_stop_lwp (lwp);
6305 }
6306 else
6307 {
6308 /* Already stopped; go ahead and set the ptrace options. */
6309 struct process_info *proc = find_process_pid (pid_of (thread));
6310 int options = linux_low_ptrace_options (proc->attached);
6311
6312 linux_enable_event_reporting (lwpid_of (thread), options);
6313 lwp->must_set_ptrace_flags = 0;
6314 }
6315
6316 return 0;
6317 }
6318
6319 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6320 ptrace flags for all inferiors. This is in case the new GDB connection
6321 doesn't support the same set of events that the previous one did. */
6322
6323 static void
6324 linux_handle_new_gdb_connection (void)
6325 {
6326 pid_t pid;
6327
6328 /* Request that all the lwps reset their ptrace options. */
6329 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6330 }
6331
6332 static int
6333 linux_supports_disable_randomization (void)
6334 {
6335 #ifdef HAVE_PERSONALITY
6336 return 1;
6337 #else
6338 return 0;
6339 #endif
6340 }
6341
6342 static int
6343 linux_supports_agent (void)
6344 {
6345 return 1;
6346 }
6347
6348 static int
6349 linux_supports_range_stepping (void)
6350 {
6351 if (*the_low_target.supports_range_stepping == NULL)
6352 return 0;
6353
6354 return (*the_low_target.supports_range_stepping) ();
6355 }
6356
6357 /* Enumerate spufs IDs for process PID. */
6358 static int
6359 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6360 {
6361 int pos = 0;
6362 int written = 0;
6363 char path[128];
6364 DIR *dir;
6365 struct dirent *entry;
6366
6367 sprintf (path, "/proc/%ld/fd", pid);
6368 dir = opendir (path);
6369 if (!dir)
6370 return -1;
6371
6372 rewinddir (dir);
6373 while ((entry = readdir (dir)) != NULL)
6374 {
6375 struct stat st;
6376 struct statfs stfs;
6377 int fd;
6378
6379 fd = atoi (entry->d_name);
6380 if (!fd)
6381 continue;
6382
6383 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6384 if (stat (path, &st) != 0)
6385 continue;
6386 if (!S_ISDIR (st.st_mode))
6387 continue;
6388
6389 if (statfs (path, &stfs) != 0)
6390 continue;
6391 if (stfs.f_type != SPUFS_MAGIC)
6392 continue;
6393
6394 if (pos >= offset && pos + 4 <= offset + len)
6395 {
6396 *(unsigned int *)(buf + pos - offset) = fd;
6397 written += 4;
6398 }
6399 pos += 4;
6400 }
6401
6402 closedir (dir);
6403 return written;
6404 }
6405
6406 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6407 object type, using the /proc file system. */
6408 static int
6409 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6410 unsigned const char *writebuf,
6411 CORE_ADDR offset, int len)
6412 {
6413 long pid = lwpid_of (current_thread);
6414 char buf[128];
6415 int fd = 0;
6416 int ret = 0;
6417
6418 if (!writebuf && !readbuf)
6419 return -1;
6420
6421 if (!*annex)
6422 {
6423 if (!readbuf)
6424 return -1;
6425 else
6426 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6427 }
6428
6429 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6430 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6431 if (fd <= 0)
6432 return -1;
6433
6434 if (offset != 0
6435 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6436 {
6437 close (fd);
6438 return 0;
6439 }
6440
6441 if (writebuf)
6442 ret = write (fd, writebuf, (size_t) len);
6443 else
6444 ret = read (fd, readbuf, (size_t) len);
6445
6446 close (fd);
6447 return ret;
6448 }
6449
6450 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6451 struct target_loadseg
6452 {
6453 /* Core address to which the segment is mapped. */
6454 Elf32_Addr addr;
6455 /* VMA recorded in the program header. */
6456 Elf32_Addr p_vaddr;
6457 /* Size of this segment in memory. */
6458 Elf32_Word p_memsz;
6459 };
6460
6461 # if defined PT_GETDSBT
6462 struct target_loadmap
6463 {
6464 /* Protocol version number, must be zero. */
6465 Elf32_Word version;
6466 /* Pointer to the DSBT table, its size, and the DSBT index. */
6467 unsigned *dsbt_table;
6468 unsigned dsbt_size, dsbt_index;
6469 /* Number of segments in this map. */
6470 Elf32_Word nsegs;
6471 /* The actual memory map. */
6472 struct target_loadseg segs[/*nsegs*/];
6473 };
6474 # define LINUX_LOADMAP PT_GETDSBT
6475 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6476 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6477 # else
6478 struct target_loadmap
6479 {
6480 /* Protocol version number, must be zero. */
6481 Elf32_Half version;
6482 /* Number of segments in this map. */
6483 Elf32_Half nsegs;
6484 /* The actual memory map. */
6485 struct target_loadseg segs[/*nsegs*/];
6486 };
6487 # define LINUX_LOADMAP PTRACE_GETFDPIC
6488 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6489 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6490 # endif
6491
6492 static int
6493 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6494 unsigned char *myaddr, unsigned int len)
6495 {
6496 int pid = lwpid_of (current_thread);
6497 int addr = -1;
6498 struct target_loadmap *data = NULL;
6499 unsigned int actual_length, copy_length;
6500
6501 if (strcmp (annex, "exec") == 0)
6502 addr = (int) LINUX_LOADMAP_EXEC;
6503 else if (strcmp (annex, "interp") == 0)
6504 addr = (int) LINUX_LOADMAP_INTERP;
6505 else
6506 return -1;
6507
6508 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6509 return -1;
6510
6511 if (data == NULL)
6512 return -1;
6513
6514 actual_length = sizeof (struct target_loadmap)
6515 + sizeof (struct target_loadseg) * data->nsegs;
6516
6517 if (offset < 0 || offset > actual_length)
6518 return -1;
6519
6520 copy_length = actual_length - offset < len ? actual_length - offset : len;
6521 memcpy (myaddr, (char *) data + offset, copy_length);
6522 return copy_length;
6523 }
6524 #else
6525 # define linux_read_loadmap NULL
6526 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6527
6528 static void
6529 linux_process_qsupported (char **features, int count)
6530 {
6531 if (the_low_target.process_qsupported != NULL)
6532 the_low_target.process_qsupported (features, count);
6533 }
6534
6535 static int
6536 linux_supports_catch_syscall (void)
6537 {
6538 return (the_low_target.get_syscall_trapinfo != NULL
6539 && linux_supports_tracesysgood ());
6540 }
6541
6542 static int
6543 linux_get_ipa_tdesc_idx (void)
6544 {
6545 if (the_low_target.get_ipa_tdesc_idx == NULL)
6546 return 0;
6547
6548 return (*the_low_target.get_ipa_tdesc_idx) ();
6549 }
6550
6551 static int
6552 linux_supports_tracepoints (void)
6553 {
6554 if (*the_low_target.supports_tracepoints == NULL)
6555 return 0;
6556
6557 return (*the_low_target.supports_tracepoints) ();
6558 }
6559
6560 static CORE_ADDR
6561 linux_read_pc (struct regcache *regcache)
6562 {
6563 if (the_low_target.get_pc == NULL)
6564 return 0;
6565
6566 return (*the_low_target.get_pc) (regcache);
6567 }
6568
6569 static void
6570 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6571 {
6572 gdb_assert (the_low_target.set_pc != NULL);
6573
6574 (*the_low_target.set_pc) (regcache, pc);
6575 }
6576
6577 static int
6578 linux_thread_stopped (struct thread_info *thread)
6579 {
6580 return get_thread_lwp (thread)->stopped;
6581 }
6582
6583 /* This exposes stop-all-threads functionality to other modules. */
6584
6585 static void
6586 linux_pause_all (int freeze)
6587 {
6588 stop_all_lwps (freeze, NULL);
6589 }
6590
6591 /* This exposes unstop-all-threads functionality to other gdbserver
6592 modules. */
6593
6594 static void
6595 linux_unpause_all (int unfreeze)
6596 {
6597 unstop_all_lwps (unfreeze, NULL);
6598 }
6599
6600 static int
6601 linux_prepare_to_access_memory (void)
6602 {
6603 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6604 running LWP. */
6605 if (non_stop)
6606 linux_pause_all (1);
6607 return 0;
6608 }
6609
6610 static void
6611 linux_done_accessing_memory (void)
6612 {
6613 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6614 running LWP. */
6615 if (non_stop)
6616 linux_unpause_all (1);
6617 }
6618
6619 static int
6620 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6621 CORE_ADDR collector,
6622 CORE_ADDR lockaddr,
6623 ULONGEST orig_size,
6624 CORE_ADDR *jump_entry,
6625 CORE_ADDR *trampoline,
6626 ULONGEST *trampoline_size,
6627 unsigned char *jjump_pad_insn,
6628 ULONGEST *jjump_pad_insn_size,
6629 CORE_ADDR *adjusted_insn_addr,
6630 CORE_ADDR *adjusted_insn_addr_end,
6631 char *err)
6632 {
6633 return (*the_low_target.install_fast_tracepoint_jump_pad)
6634 (tpoint, tpaddr, collector, lockaddr, orig_size,
6635 jump_entry, trampoline, trampoline_size,
6636 jjump_pad_insn, jjump_pad_insn_size,
6637 adjusted_insn_addr, adjusted_insn_addr_end,
6638 err);
6639 }
6640
6641 static struct emit_ops *
6642 linux_emit_ops (void)
6643 {
6644 if (the_low_target.emit_ops != NULL)
6645 return (*the_low_target.emit_ops) ();
6646 else
6647 return NULL;
6648 }
6649
6650 static int
6651 linux_get_min_fast_tracepoint_insn_len (void)
6652 {
6653 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6654 }
6655
6656 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6657
6658 static int
6659 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6660 CORE_ADDR *phdr_memaddr, int *num_phdr)
6661 {
6662 char filename[PATH_MAX];
6663 int fd;
6664 const int auxv_size = is_elf64
6665 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6666 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6667
6668 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6669
6670 fd = open (filename, O_RDONLY);
6671 if (fd < 0)
6672 return 1;
6673
6674 *phdr_memaddr = 0;
6675 *num_phdr = 0;
6676 while (read (fd, buf, auxv_size) == auxv_size
6677 && (*phdr_memaddr == 0 || *num_phdr == 0))
6678 {
6679 if (is_elf64)
6680 {
6681 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6682
6683 switch (aux->a_type)
6684 {
6685 case AT_PHDR:
6686 *phdr_memaddr = aux->a_un.a_val;
6687 break;
6688 case AT_PHNUM:
6689 *num_phdr = aux->a_un.a_val;
6690 break;
6691 }
6692 }
6693 else
6694 {
6695 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6696
6697 switch (aux->a_type)
6698 {
6699 case AT_PHDR:
6700 *phdr_memaddr = aux->a_un.a_val;
6701 break;
6702 case AT_PHNUM:
6703 *num_phdr = aux->a_un.a_val;
6704 break;
6705 }
6706 }
6707 }
6708
6709 close (fd);
6710
6711 if (*phdr_memaddr == 0 || *num_phdr == 0)
6712 {
6713 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6714 "phdr_memaddr = %ld, phdr_num = %d",
6715 (long) *phdr_memaddr, *num_phdr);
6716 return 2;
6717 }
6718
6719 return 0;
6720 }
6721
6722 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6723
6724 static CORE_ADDR
6725 get_dynamic (const int pid, const int is_elf64)
6726 {
6727 CORE_ADDR phdr_memaddr, relocation;
6728 int num_phdr, i;
6729 unsigned char *phdr_buf;
6730 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6731
6732 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6733 return 0;
6734
6735 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6736 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6737
6738 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6739 return 0;
6740
6741 /* Compute relocation: it is expected to be 0 for "regular" executables,
6742 non-zero for PIE ones. */
6743 relocation = -1;
6744 for (i = 0; relocation == -1 && i < num_phdr; i++)
6745 if (is_elf64)
6746 {
6747 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6748
6749 if (p->p_type == PT_PHDR)
6750 relocation = phdr_memaddr - p->p_vaddr;
6751 }
6752 else
6753 {
6754 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6755
6756 if (p->p_type == PT_PHDR)
6757 relocation = phdr_memaddr - p->p_vaddr;
6758 }
6759
6760 if (relocation == -1)
6761 {
6762 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6763 any real world executables, including PIE executables, have always
6764 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6765 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6766 or present DT_DEBUG anyway (fpc binaries are statically linked).
6767
6768 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6769
6770 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6771
6772 return 0;
6773 }
6774
6775 for (i = 0; i < num_phdr; i++)
6776 {
6777 if (is_elf64)
6778 {
6779 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6780
6781 if (p->p_type == PT_DYNAMIC)
6782 return p->p_vaddr + relocation;
6783 }
6784 else
6785 {
6786 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6787
6788 if (p->p_type == PT_DYNAMIC)
6789 return p->p_vaddr + relocation;
6790 }
6791 }
6792
6793 return 0;
6794 }
6795
6796 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6797 can be 0 if the inferior does not yet have the library list initialized.
6798 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6799 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6800
6801 static CORE_ADDR
6802 get_r_debug (const int pid, const int is_elf64)
6803 {
6804 CORE_ADDR dynamic_memaddr;
6805 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6806 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6807 CORE_ADDR map = -1;
6808
6809 dynamic_memaddr = get_dynamic (pid, is_elf64);
6810 if (dynamic_memaddr == 0)
6811 return map;
6812
6813 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6814 {
6815 if (is_elf64)
6816 {
6817 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6818 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6819 union
6820 {
6821 Elf64_Xword map;
6822 unsigned char buf[sizeof (Elf64_Xword)];
6823 }
6824 rld_map;
6825 #endif
6826 #ifdef DT_MIPS_RLD_MAP
6827 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6828 {
6829 if (linux_read_memory (dyn->d_un.d_val,
6830 rld_map.buf, sizeof (rld_map.buf)) == 0)
6831 return rld_map.map;
6832 else
6833 break;
6834 }
6835 #endif /* DT_MIPS_RLD_MAP */
6836 #ifdef DT_MIPS_RLD_MAP_REL
6837 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6838 {
6839 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6840 rld_map.buf, sizeof (rld_map.buf)) == 0)
6841 return rld_map.map;
6842 else
6843 break;
6844 }
6845 #endif /* DT_MIPS_RLD_MAP_REL */
6846
6847 if (dyn->d_tag == DT_DEBUG && map == -1)
6848 map = dyn->d_un.d_val;
6849
6850 if (dyn->d_tag == DT_NULL)
6851 break;
6852 }
6853 else
6854 {
6855 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6856 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6857 union
6858 {
6859 Elf32_Word map;
6860 unsigned char buf[sizeof (Elf32_Word)];
6861 }
6862 rld_map;
6863 #endif
6864 #ifdef DT_MIPS_RLD_MAP
6865 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6866 {
6867 if (linux_read_memory (dyn->d_un.d_val,
6868 rld_map.buf, sizeof (rld_map.buf)) == 0)
6869 return rld_map.map;
6870 else
6871 break;
6872 }
6873 #endif /* DT_MIPS_RLD_MAP */
6874 #ifdef DT_MIPS_RLD_MAP_REL
6875 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6876 {
6877 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6878 rld_map.buf, sizeof (rld_map.buf)) == 0)
6879 return rld_map.map;
6880 else
6881 break;
6882 }
6883 #endif /* DT_MIPS_RLD_MAP_REL */
6884
6885 if (dyn->d_tag == DT_DEBUG && map == -1)
6886 map = dyn->d_un.d_val;
6887
6888 if (dyn->d_tag == DT_NULL)
6889 break;
6890 }
6891
6892 dynamic_memaddr += dyn_size;
6893 }
6894
6895 return map;
6896 }
6897
6898 /* Read one pointer from MEMADDR in the inferior. */
6899
6900 static int
6901 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6902 {
6903 int ret;
6904
6905 /* Go through a union so this works on either big or little endian
6906 hosts, when the inferior's pointer size is smaller than the size
6907 of CORE_ADDR. It is assumed the inferior's endianness is the
6908 same of the superior's. */
6909 union
6910 {
6911 CORE_ADDR core_addr;
6912 unsigned int ui;
6913 unsigned char uc;
6914 } addr;
6915
6916 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6917 if (ret == 0)
6918 {
6919 if (ptr_size == sizeof (CORE_ADDR))
6920 *ptr = addr.core_addr;
6921 else if (ptr_size == sizeof (unsigned int))
6922 *ptr = addr.ui;
6923 else
6924 gdb_assert_not_reached ("unhandled pointer size");
6925 }
6926 return ret;
6927 }
6928
6929 struct link_map_offsets
6930 {
6931 /* Offset and size of r_debug.r_version. */
6932 int r_version_offset;
6933
6934 /* Offset and size of r_debug.r_map. */
6935 int r_map_offset;
6936
6937 /* Offset to l_addr field in struct link_map. */
6938 int l_addr_offset;
6939
6940 /* Offset to l_name field in struct link_map. */
6941 int l_name_offset;
6942
6943 /* Offset to l_ld field in struct link_map. */
6944 int l_ld_offset;
6945
6946 /* Offset to l_next field in struct link_map. */
6947 int l_next_offset;
6948
6949 /* Offset to l_prev field in struct link_map. */
6950 int l_prev_offset;
6951 };
6952
6953 /* Construct qXfer:libraries-svr4:read reply. */
6954
6955 static int
6956 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6957 unsigned const char *writebuf,
6958 CORE_ADDR offset, int len)
6959 {
6960 char *document;
6961 unsigned document_len;
6962 struct process_info_private *const priv = current_process ()->priv;
6963 char filename[PATH_MAX];
6964 int pid, is_elf64;
6965
6966 static const struct link_map_offsets lmo_32bit_offsets =
6967 {
6968 0, /* r_version offset. */
6969 4, /* r_debug.r_map offset. */
6970 0, /* l_addr offset in link_map. */
6971 4, /* l_name offset in link_map. */
6972 8, /* l_ld offset in link_map. */
6973 12, /* l_next offset in link_map. */
6974 16 /* l_prev offset in link_map. */
6975 };
6976
6977 static const struct link_map_offsets lmo_64bit_offsets =
6978 {
6979 0, /* r_version offset. */
6980 8, /* r_debug.r_map offset. */
6981 0, /* l_addr offset in link_map. */
6982 8, /* l_name offset in link_map. */
6983 16, /* l_ld offset in link_map. */
6984 24, /* l_next offset in link_map. */
6985 32 /* l_prev offset in link_map. */
6986 };
6987 const struct link_map_offsets *lmo;
6988 unsigned int machine;
6989 int ptr_size;
6990 CORE_ADDR lm_addr = 0, lm_prev = 0;
6991 int allocated = 1024;
6992 char *p;
6993 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6994 int header_done = 0;
6995
6996 if (writebuf != NULL)
6997 return -2;
6998 if (readbuf == NULL)
6999 return -1;
7000
7001 pid = lwpid_of (current_thread);
7002 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7003 is_elf64 = elf_64_file_p (filename, &machine);
7004 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7005 ptr_size = is_elf64 ? 8 : 4;
7006
7007 while (annex[0] != '\0')
7008 {
7009 const char *sep;
7010 CORE_ADDR *addrp;
7011 int len;
7012
7013 sep = strchr (annex, '=');
7014 if (sep == NULL)
7015 break;
7016
7017 len = sep - annex;
7018 if (len == 5 && startswith (annex, "start"))
7019 addrp = &lm_addr;
7020 else if (len == 4 && startswith (annex, "prev"))
7021 addrp = &lm_prev;
7022 else
7023 {
7024 annex = strchr (sep, ';');
7025 if (annex == NULL)
7026 break;
7027 annex++;
7028 continue;
7029 }
7030
7031 annex = decode_address_to_semicolon (addrp, sep + 1);
7032 }
7033
7034 if (lm_addr == 0)
7035 {
7036 int r_version = 0;
7037
7038 if (priv->r_debug == 0)
7039 priv->r_debug = get_r_debug (pid, is_elf64);
7040
7041 /* We failed to find DT_DEBUG. Such situation will not change
7042 for this inferior - do not retry it. Report it to GDB as
7043 E01, see for the reasons at the GDB solib-svr4.c side. */
7044 if (priv->r_debug == (CORE_ADDR) -1)
7045 return -1;
7046
7047 if (priv->r_debug != 0)
7048 {
7049 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7050 (unsigned char *) &r_version,
7051 sizeof (r_version)) != 0
7052 || r_version != 1)
7053 {
7054 warning ("unexpected r_debug version %d", r_version);
7055 }
7056 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7057 &lm_addr, ptr_size) != 0)
7058 {
7059 warning ("unable to read r_map from 0x%lx",
7060 (long) priv->r_debug + lmo->r_map_offset);
7061 }
7062 }
7063 }
7064
7065 document = (char *) xmalloc (allocated);
7066 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7067 p = document + strlen (document);
7068
7069 while (lm_addr
7070 && read_one_ptr (lm_addr + lmo->l_name_offset,
7071 &l_name, ptr_size) == 0
7072 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7073 &l_addr, ptr_size) == 0
7074 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7075 &l_ld, ptr_size) == 0
7076 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7077 &l_prev, ptr_size) == 0
7078 && read_one_ptr (lm_addr + lmo->l_next_offset,
7079 &l_next, ptr_size) == 0)
7080 {
7081 unsigned char libname[PATH_MAX];
7082
7083 if (lm_prev != l_prev)
7084 {
7085 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7086 (long) lm_prev, (long) l_prev);
7087 break;
7088 }
7089
7090 /* Ignore the first entry even if it has valid name as the first entry
7091 corresponds to the main executable. The first entry should not be
7092 skipped if the dynamic loader was loaded late by a static executable
7093 (see solib-svr4.c parameter ignore_first). But in such case the main
7094 executable does not have PT_DYNAMIC present and this function already
7095 exited above due to failed get_r_debug. */
7096 if (lm_prev == 0)
7097 {
7098 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7099 p = p + strlen (p);
7100 }
7101 else
7102 {
7103 /* Not checking for error because reading may stop before
7104 we've got PATH_MAX worth of characters. */
7105 libname[0] = '\0';
7106 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7107 libname[sizeof (libname) - 1] = '\0';
7108 if (libname[0] != '\0')
7109 {
7110 /* 6x the size for xml_escape_text below. */
7111 size_t len = 6 * strlen ((char *) libname);
7112 char *name;
7113
7114 if (!header_done)
7115 {
7116 /* Terminate `<library-list-svr4'. */
7117 *p++ = '>';
7118 header_done = 1;
7119 }
7120
7121 while (allocated < p - document + len + 200)
7122 {
7123 /* Expand to guarantee sufficient storage. */
7124 uintptr_t document_len = p - document;
7125
7126 document = (char *) xrealloc (document, 2 * allocated);
7127 allocated *= 2;
7128 p = document + document_len;
7129 }
7130
7131 name = xml_escape_text ((char *) libname);
7132 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7133 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7134 name, (unsigned long) lm_addr,
7135 (unsigned long) l_addr, (unsigned long) l_ld);
7136 free (name);
7137 }
7138 }
7139
7140 lm_prev = lm_addr;
7141 lm_addr = l_next;
7142 }
7143
7144 if (!header_done)
7145 {
7146 /* Empty list; terminate `<library-list-svr4'. */
7147 strcpy (p, "/>");
7148 }
7149 else
7150 strcpy (p, "</library-list-svr4>");
7151
7152 document_len = strlen (document);
7153 if (offset < document_len)
7154 document_len -= offset;
7155 else
7156 document_len = 0;
7157 if (len > document_len)
7158 len = document_len;
7159
7160 memcpy (readbuf, document + offset, len);
7161 xfree (document);
7162
7163 return len;
7164 }
7165
7166 #ifdef HAVE_LINUX_BTRACE
7167
7168 /* See to_disable_btrace target method. */
7169
7170 static int
7171 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7172 {
7173 enum btrace_error err;
7174
7175 err = linux_disable_btrace (tinfo);
7176 return (err == BTRACE_ERR_NONE ? 0 : -1);
7177 }
7178
7179 /* Encode an Intel Processor Trace configuration. */
7180
7181 static void
7182 linux_low_encode_pt_config (struct buffer *buffer,
7183 const struct btrace_data_pt_config *config)
7184 {
7185 buffer_grow_str (buffer, "<pt-config>\n");
7186
7187 switch (config->cpu.vendor)
7188 {
7189 case CV_INTEL:
7190 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7191 "model=\"%u\" stepping=\"%u\"/>\n",
7192 config->cpu.family, config->cpu.model,
7193 config->cpu.stepping);
7194 break;
7195
7196 default:
7197 break;
7198 }
7199
7200 buffer_grow_str (buffer, "</pt-config>\n");
7201 }
7202
7203 /* Encode a raw buffer. */
7204
7205 static void
7206 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7207 unsigned int size)
7208 {
7209 if (size == 0)
7210 return;
7211
7212 /* We use hex encoding - see common/rsp-low.h. */
7213 buffer_grow_str (buffer, "<raw>\n");
7214
7215 while (size-- > 0)
7216 {
7217 char elem[2];
7218
7219 elem[0] = tohex ((*data >> 4) & 0xf);
7220 elem[1] = tohex (*data++ & 0xf);
7221
7222 buffer_grow (buffer, elem, 2);
7223 }
7224
7225 buffer_grow_str (buffer, "</raw>\n");
7226 }
7227
7228 /* See to_read_btrace target method. */
7229
7230 static int
7231 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7232 enum btrace_read_type type)
7233 {
7234 struct btrace_data btrace;
7235 struct btrace_block *block;
7236 enum btrace_error err;
7237 int i;
7238
7239 btrace_data_init (&btrace);
7240
7241 err = linux_read_btrace (&btrace, tinfo, type);
7242 if (err != BTRACE_ERR_NONE)
7243 {
7244 if (err == BTRACE_ERR_OVERFLOW)
7245 buffer_grow_str0 (buffer, "E.Overflow.");
7246 else
7247 buffer_grow_str0 (buffer, "E.Generic Error.");
7248
7249 goto err;
7250 }
7251
7252 switch (btrace.format)
7253 {
7254 case BTRACE_FORMAT_NONE:
7255 buffer_grow_str0 (buffer, "E.No Trace.");
7256 goto err;
7257
7258 case BTRACE_FORMAT_BTS:
7259 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7260 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7261
7262 for (i = 0;
7263 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7264 i++)
7265 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7266 paddress (block->begin), paddress (block->end));
7267
7268 buffer_grow_str0 (buffer, "</btrace>\n");
7269 break;
7270
7271 case BTRACE_FORMAT_PT:
7272 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7273 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7274 buffer_grow_str (buffer, "<pt>\n");
7275
7276 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7277
7278 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7279 btrace.variant.pt.size);
7280
7281 buffer_grow_str (buffer, "</pt>\n");
7282 buffer_grow_str0 (buffer, "</btrace>\n");
7283 break;
7284
7285 default:
7286 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7287 goto err;
7288 }
7289
7290 btrace_data_fini (&btrace);
7291 return 0;
7292
7293 err:
7294 btrace_data_fini (&btrace);
7295 return -1;
7296 }
7297
7298 /* See to_btrace_conf target method. */
7299
7300 static int
7301 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7302 struct buffer *buffer)
7303 {
7304 const struct btrace_config *conf;
7305
7306 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7307 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7308
7309 conf = linux_btrace_conf (tinfo);
7310 if (conf != NULL)
7311 {
7312 switch (conf->format)
7313 {
7314 case BTRACE_FORMAT_NONE:
7315 break;
7316
7317 case BTRACE_FORMAT_BTS:
7318 buffer_xml_printf (buffer, "<bts");
7319 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7320 buffer_xml_printf (buffer, " />\n");
7321 break;
7322
7323 case BTRACE_FORMAT_PT:
7324 buffer_xml_printf (buffer, "<pt");
7325 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7326 buffer_xml_printf (buffer, "/>\n");
7327 break;
7328 }
7329 }
7330
7331 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7332 return 0;
7333 }
7334 #endif /* HAVE_LINUX_BTRACE */
7335
7336 /* See nat/linux-nat.h. */
7337
7338 ptid_t
7339 current_lwp_ptid (void)
7340 {
7341 return ptid_of (current_thread);
7342 }
7343
7344 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7345
7346 static int
7347 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7348 {
7349 if (the_low_target.breakpoint_kind_from_pc != NULL)
7350 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7351 else
7352 return default_breakpoint_kind_from_pc (pcptr);
7353 }
7354
7355 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7356
7357 static const gdb_byte *
7358 linux_sw_breakpoint_from_kind (int kind, int *size)
7359 {
7360 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7361
7362 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7363 }
7364
7365 /* Implementation of the target_ops method
7366 "breakpoint_kind_from_current_state". */
7367
7368 static int
7369 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7370 {
7371 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7372 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7373 else
7374 return linux_breakpoint_kind_from_pc (pcptr);
7375 }
7376
7377 /* Default implementation of linux_target_ops method "set_pc" for
7378 32-bit pc register which is literally named "pc". */
7379
7380 void
7381 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7382 {
7383 uint32_t newpc = pc;
7384
7385 supply_register_by_name (regcache, "pc", &newpc);
7386 }
7387
7388 /* Default implementation of linux_target_ops method "get_pc" for
7389 32-bit pc register which is literally named "pc". */
7390
7391 CORE_ADDR
7392 linux_get_pc_32bit (struct regcache *regcache)
7393 {
7394 uint32_t pc;
7395
7396 collect_register_by_name (regcache, "pc", &pc);
7397 if (debug_threads)
7398 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7399 return pc;
7400 }
7401
7402 /* Default implementation of linux_target_ops method "set_pc" for
7403 64-bit pc register which is literally named "pc". */
7404
7405 void
7406 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7407 {
7408 uint64_t newpc = pc;
7409
7410 supply_register_by_name (regcache, "pc", &newpc);
7411 }
7412
7413 /* Default implementation of linux_target_ops method "get_pc" for
7414 64-bit pc register which is literally named "pc". */
7415
7416 CORE_ADDR
7417 linux_get_pc_64bit (struct regcache *regcache)
7418 {
7419 uint64_t pc;
7420
7421 collect_register_by_name (regcache, "pc", &pc);
7422 if (debug_threads)
7423 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7424 return pc;
7425 }
7426
7427
7428 static struct target_ops linux_target_ops = {
7429 linux_create_inferior,
7430 linux_post_create_inferior,
7431 linux_attach,
7432 linux_kill,
7433 linux_detach,
7434 linux_mourn,
7435 linux_join,
7436 linux_thread_alive,
7437 linux_resume,
7438 linux_wait,
7439 linux_fetch_registers,
7440 linux_store_registers,
7441 linux_prepare_to_access_memory,
7442 linux_done_accessing_memory,
7443 linux_read_memory,
7444 linux_write_memory,
7445 linux_look_up_symbols,
7446 linux_request_interrupt,
7447 linux_read_auxv,
7448 linux_supports_z_point_type,
7449 linux_insert_point,
7450 linux_remove_point,
7451 linux_stopped_by_sw_breakpoint,
7452 linux_supports_stopped_by_sw_breakpoint,
7453 linux_stopped_by_hw_breakpoint,
7454 linux_supports_stopped_by_hw_breakpoint,
7455 linux_supports_hardware_single_step,
7456 linux_stopped_by_watchpoint,
7457 linux_stopped_data_address,
7458 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7459 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7460 && defined(PT_TEXT_END_ADDR)
7461 linux_read_offsets,
7462 #else
7463 NULL,
7464 #endif
7465 #ifdef USE_THREAD_DB
7466 thread_db_get_tls_address,
7467 #else
7468 NULL,
7469 #endif
7470 linux_qxfer_spu,
7471 hostio_last_error_from_errno,
7472 linux_qxfer_osdata,
7473 linux_xfer_siginfo,
7474 linux_supports_non_stop,
7475 linux_async,
7476 linux_start_non_stop,
7477 linux_supports_multi_process,
7478 linux_supports_fork_events,
7479 linux_supports_vfork_events,
7480 linux_supports_exec_events,
7481 linux_handle_new_gdb_connection,
7482 #ifdef USE_THREAD_DB
7483 thread_db_handle_monitor_command,
7484 #else
7485 NULL,
7486 #endif
7487 linux_common_core_of_thread,
7488 linux_read_loadmap,
7489 linux_process_qsupported,
7490 linux_supports_tracepoints,
7491 linux_read_pc,
7492 linux_write_pc,
7493 linux_thread_stopped,
7494 NULL,
7495 linux_pause_all,
7496 linux_unpause_all,
7497 linux_stabilize_threads,
7498 linux_install_fast_tracepoint_jump_pad,
7499 linux_emit_ops,
7500 linux_supports_disable_randomization,
7501 linux_get_min_fast_tracepoint_insn_len,
7502 linux_qxfer_libraries_svr4,
7503 linux_supports_agent,
7504 #ifdef HAVE_LINUX_BTRACE
7505 linux_supports_btrace,
7506 linux_enable_btrace,
7507 linux_low_disable_btrace,
7508 linux_low_read_btrace,
7509 linux_low_btrace_conf,
7510 #else
7511 NULL,
7512 NULL,
7513 NULL,
7514 NULL,
7515 NULL,
7516 #endif
7517 linux_supports_range_stepping,
7518 linux_proc_pid_to_exec_file,
7519 linux_mntns_open_cloexec,
7520 linux_mntns_unlink,
7521 linux_mntns_readlink,
7522 linux_breakpoint_kind_from_pc,
7523 linux_sw_breakpoint_from_kind,
7524 linux_proc_tid_get_name,
7525 linux_breakpoint_kind_from_current_state,
7526 linux_supports_software_single_step,
7527 linux_supports_catch_syscall,
7528 linux_get_ipa_tdesc_idx,
7529 };
7530
7531 #ifdef HAVE_LINUX_REGSETS
7532 void
7533 initialize_regsets_info (struct regsets_info *info)
7534 {
7535 for (info->num_regsets = 0;
7536 info->regsets[info->num_regsets].size >= 0;
7537 info->num_regsets++)
7538 ;
7539 }
7540 #endif
7541
7542 void
7543 initialize_low (void)
7544 {
7545 struct sigaction sigchld_action;
7546
7547 memset (&sigchld_action, 0, sizeof (sigchld_action));
7548 set_target_ops (&linux_target_ops);
7549
7550 linux_ptrace_init_warnings ();
7551
7552 sigchld_action.sa_handler = sigchld_handler;
7553 sigemptyset (&sigchld_action.sa_mask);
7554 sigchld_action.sa_flags = SA_RESTART;
7555 sigaction (SIGCHLD, &sigchld_action, NULL);
7556
7557 initialize_low_arch ();
7558
7559 linux_check_ptrace_features ();
7560 }
This page took 0.251045 seconds and 5 git commands to generate.